id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4,700 | db.py | buildbot_buildbot/master/buildbot/test/util/db.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import os
import sqlalchemy as sa
from sqlalchemy.schema import MetaData
from twisted.internet import defer
from twisted.internet import reactor
from twisted.python import log
from twisted.trial import unittest
from buildbot.db import enginestrategy
from buildbot.db import model
from buildbot.db import pool
from buildbot.db.connector import DBConnector
from buildbot.util.sautils import withoutSqliteForeignKeys
def skip_for_dialect(dialect):
"""Decorator to skip a test for a particular SQLAlchemy dialect."""
def dec(fn):
def wrap(self, *args, **kwargs):
if self.db_engine.dialect.name == dialect:
raise unittest.SkipTest(f"Not supported on dialect '{dialect}'")
return fn(self, *args, **kwargs)
return wrap
return dec
def get_trial_parallel_from_cwd(cwd):
cwd = cwd.rstrip("/")
last = os.path.basename(cwd)
prev = os.path.basename(os.path.dirname(cwd))
if last == "_trial_temp":
return False
if prev == "_trial_temp":
try:
return int(last)
except ValueError:
return None
return None
def resolve_test_index_in_db_url(db_url):
test_id = get_trial_parallel_from_cwd(os.getcwd())
if "{TEST_ID}" in db_url:
return db_url.replace("{TEST_ID}", str(test_id or 0))
if db_url == 'sqlite://':
return db_url
if test_id is not None and test_id is not False:
if db_url.startswith('sqlite:///'):
# Relative DB URLs in the test directory are fine.
path = db_url[len('sqlite:///') :]
if not os.path.relpath(path).startswith(".."):
return db_url
raise RuntimeError("Database tests cannnot run in parallel")
return db_url
class RealDatabaseMixin:
"""
A class that sets up a real database for testing. This sets self.db_url to
the URL for the database. By default, it specifies an in-memory SQLite
database, but if the BUILDBOT_TEST_DB_URL environment variable is set, it
will use the specified database, being careful to clean out *all* tables in
the database before and after the tests are run - so each test starts with
a clean database.
@ivar db_pool: a (real) DBThreadPool instance that can be used as desired
@ivar db_url: the DB URL used to run these tests
@ivar db_engine: the engine created for the test database
Note that this class uses the production database model. A
re-implementation would be virtually identical and just require extra
work to keep synchronized.
Similarly, this class uses the production DB thread pool. This achieves
a few things:
- affords more thorough tests for the pool
- avoids repetitive implementation
- cooperates better at runtime with thread-sensitive DBAPI's
Finally, it duplicates initialization performed in db.connector.DBConnector.setup().
Never call that method in tests that use RealDatabaseMixin, use
RealDatabaseWithConnectorMixin.
"""
def __thd_clean_database(self, conn):
# In general it's nearly impossible to do "bullet proof" database
# cleanup with SQLAlchemy that will work on a range of databases
# and they configurations.
#
# Following approaches were considered.
#
# 1. Drop Buildbot Model schema:
#
# model.Model.metadata.drop_all(bind=conn, checkfirst=True)
#
# Dropping schema from model is correct and working operation only
# if database schema is exactly corresponds to the model schema.
#
# If it is not (e.g. migration script failed or migration results in
# old version of model), then some tables outside model schema may be
# present, which may reference tables in the model schema.
# In this case either dropping model schema will fail (if database
# enforces referential integrity, e.g. PostgreSQL), or
# dropping left tables in the code below will fail (if database allows
# removing of tables on which other tables have references,
# e.g. SQLite).
#
# 2. Introspect database contents and drop found tables.
#
# meta = MetaData(bind=conn)
# meta.reflect()
# meta.drop_all()
#
# May fail if schema contains reference cycles (and Buildbot schema
# has them). Reflection looses metadata about how reference cycles
# can be teared up (e.g. use_alter=True).
# Introspection may fail if schema has invalid references
# (e.g. possible in SQLite).
#
# 3. What is actually needed here is accurate code for each engine
# and each engine configuration that will drop all tables,
# indexes, constraints, etc in proper order or in a proper way
# (using tables alternation, or DROP TABLE ... CASCADE, etc).
#
# Conclusion: use approach 2 with manually teared apart known
# reference cycles.
# pylint: disable=too-many-nested-blocks
try:
meta = MetaData()
# Reflect database contents. May fail, e.g. if table references
# non-existent table in SQLite.
meta.reflect(bind=conn)
# Restore `use_alter` settings to break known reference cycles.
# Main goal of this part is to remove SQLAlchemy warning
# about reference cycle.
# List of reference links (table_name, ref_table_name) that
# should be broken by adding use_alter=True.
table_referenced_table_links = [('buildsets', 'builds'), ('builds', 'buildrequests')]
for table_name, ref_table_name in table_referenced_table_links:
if table_name in meta.tables:
table = meta.tables[table_name]
for fkc in table.foreign_key_constraints:
if fkc.referred_table.name == ref_table_name:
fkc.use_alter = True
# Drop all reflected tables and indices. May fail, e.g. if
# SQLAlchemy wouldn't be able to break circular references.
# Sqlalchemy fk support with sqlite is not yet perfect, so we must deactivate fk during
# that operation, even though we made our possible to use use_alter
with withoutSqliteForeignKeys(conn):
meta.drop_all(bind=conn)
conn.commit()
except Exception:
# sometimes this goes badly wrong; being able to see the schema
# can be a big help
if conn.engine.dialect.name == 'sqlite':
r = conn.execute(sa.text("select sql from sqlite_master where type='table'"))
log.msg("Current schema:")
for row in r.fetchall():
log.msg(row.sql)
raise
def __thd_create_tables(self, conn, table_names):
table_names_set = set(table_names)
tables = [t for t in model.Model.metadata.tables.values() if t.name in table_names_set]
# Create tables using create_all() method. This way not only tables
# and direct indices are created, but also deferred references
# (that use use_alter=True in definition).
model.Model.metadata.create_all(bind=conn, tables=tables, checkfirst=True)
conn.commit()
@defer.inlineCallbacks
def setUpRealDatabase(
self, table_names=None, basedir='basedir', want_pool=True, sqlite_memory=True
):
"""
Set up a database. Ordinarily sets up an engine and a pool and takes
care of cleaning out any existing tables in the database. If
C{want_pool} is false, then no pool will be created, and the database
will not be cleaned.
@param table_names: list of names of tables to instantiate
@param basedir: (optional) basedir for the engine
@param want_pool: (optional) false to not create C{self.db_pool}
@param sqlite_memory: (optional) False to avoid using an in-memory db
@returns: Deferred
"""
if table_names is None:
table_names = []
self.__want_pool = want_pool
default_sqlite = 'sqlite://'
self.db_url = os.environ.get('BUILDBOT_TEST_DB_URL', default_sqlite)
if not sqlite_memory and self.db_url == default_sqlite:
self.db_url = "sqlite:///tmp.sqlite"
self.db_url = resolve_test_index_in_db_url(self.db_url)
if not os.path.exists(basedir):
os.makedirs(basedir)
self.basedir = basedir
self.db_engine = enginestrategy.create_engine(self.db_url, basedir=basedir)
# if the caller does not want a pool, we're done.
if not want_pool:
return None
self.db_pool = pool.DBThreadPool(self.db_engine, reactor=reactor)
log.msg(f"cleaning database {self.db_url}")
yield self.db_pool.do(self.__thd_clean_database)
yield self.db_pool.do(self.__thd_create_tables, table_names)
return None
@defer.inlineCallbacks
def tearDownRealDatabase(self):
if self.__want_pool:
yield self.db_pool.do(self.__thd_clean_database)
yield self.db_pool.shutdown()
else:
self.db_engine.engine.dispose()
@defer.inlineCallbacks
def insert_test_data(self, rows):
"""Insert test data into the database for use during the test.
@param rows: be a sequence of L{fakedb.Row} instances. These will be
sorted by table dependencies, so order does not matter.
@returns: Deferred
"""
# sort the tables by dependency
all_table_names = {row.table for row in rows}
ordered_tables = [
t for t in model.Model.metadata.sorted_tables if t.name in all_table_names
]
def thd(conn):
# insert into tables -- in order
for tbl in ordered_tables:
for row in [r for r in rows if r.table == tbl.name]:
tbl = model.Model.metadata.tables[row.table]
try:
conn.execute(tbl.insert().values(row.values))
conn.commit()
except Exception:
log.msg(f"while inserting {row} - {row.values}")
raise
yield self.db_pool.do(thd)
class RealDatabaseWithConnectorMixin(RealDatabaseMixin):
# Same as RealDatabaseMixin, except that a real DBConnector is also setup in a correct way.
@defer.inlineCallbacks
def setUpRealDatabaseWithConnector(
self, master, table_names=None, basedir='basedir', want_pool=True, sqlite_memory=True
):
yield self.setUpRealDatabase(table_names, basedir, want_pool, sqlite_memory)
master.config.db['db_url'] = self.db_url
master.db = DBConnector(self.basedir)
yield master.db.setServiceParent(master)
master.db.pool = self.db_pool
def tearDownRealDatabaseWithConnector(self):
return self.tearDownRealDatabase()
class TestCase(unittest.TestCase):
@defer.inlineCallbacks
def assertFailure(self, d, excp):
exception = None
try:
yield d
except Exception as e:
exception = e
self.assertIsInstance(exception, excp)
self.flushLoggedErrors(excp)
| 12,161 | Python | .py | 257 | 38.338521 | 99 | 0.652684 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,701 | dirs.py | buildbot_buildbot/master/buildbot/test/util/dirs.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import os
import shutil
from twisted.internet import defer
class DirsMixin:
_dirs = None
def setUpDirs(self, *dirs):
"""Make sure C{dirs} exist and are empty, and set them up to be deleted
in tearDown."""
self._dirs = map(os.path.abspath, dirs)
for dir in self._dirs:
if os.path.exists(dir):
shutil.rmtree(dir)
os.makedirs(dir)
# return a deferred to make chaining easier
return defer.succeed(None)
def tearDownDirs(self):
for dir in self._dirs:
if os.path.exists(dir):
shutil.rmtree(dir)
# return a deferred to make chaining easier
return defer.succeed(None)
| 1,424 | Python | .py | 35 | 35.142857 | 79 | 0.701158 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,702 | warnings.py | buildbot_buildbot/master/buildbot/test/util/warnings.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
# Utility functions for catching Python warnings.
# Twisted's TestCase already gathers produced warnings
# (see t.t.u.T.flushWarnings()), however Twisted's implementation doesn't
# allow fine-grained control over caught warnings.
# This implementation uses context wrapper style to specify interesting
# block of code to catch warnings, which allows to easily specify which
# exactly statements should generate warnings and which shouldn't.
# Also this implementation allows nested checks.
import contextlib
import re
import warnings
@contextlib.contextmanager
def _recordWarnings(category, output):
assert isinstance(output, list)
unrelated_warns = []
with warnings.catch_warnings(record=True) as all_warns:
# Cause all warnings of the provided category to always be
# triggered.
warnings.simplefilter("always", category)
yield
# Filter warnings.
for w in all_warns:
if isinstance(w.message, category):
output.append(w)
else:
unrelated_warns.append(w)
# Re-raise unrelated warnings.
for w in unrelated_warns:
warnings.warn_explicit(w.message, w.category, w.filename, w.lineno)
@contextlib.contextmanager
def assertProducesWarnings(
filter_category, num_warnings=None, messages_patterns=None, message_pattern=None
):
if messages_patterns is not None:
assert message_pattern is None
assert num_warnings is None
num_warnings = len(messages_patterns)
else:
assert num_warnings is not None or message_pattern is not None
warns = []
with _recordWarnings(filter_category, warns):
yield
if num_warnings is None:
num_warnings = 1
warns_str = '\n'.join(map(str, warns))
assert len(warns) == num_warnings, (
"Number of occurred warnings is not correct. "
f"Expected {num_warnings} warnings, received {len(warns)}:\n"
f"{warns_str}"
)
if messages_patterns is None and message_pattern is not None:
messages_patterns = [message_pattern] * num_warnings
if messages_patterns is not None:
for w, pattern in zip(warns, messages_patterns):
# TODO: Maybe don't use regexp, but use simple substring check?
warns_str = '\n'.join(map(str, warns))
assert re.search(pattern, str(w.message)), (
"Warning pattern doesn't match. Expected pattern:\n"
f"{pattern}\n"
"Received message:\n"
f"{w.message}\n"
"All gathered warnings:\n"
f"{warns_str}"
)
@contextlib.contextmanager
def assertProducesWarning(filter_category, message_pattern=None):
with assertProducesWarnings(filter_category, num_warnings=1, message_pattern=message_pattern):
yield
@contextlib.contextmanager
def assertNotProducesWarnings(filter_category):
with assertProducesWarnings(filter_category, 0):
yield
@contextlib.contextmanager
def ignoreWarning(category):
with _recordWarnings(category, []):
yield
| 3,814 | Python | .py | 90 | 36.2 | 98 | 0.707455 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,703 | migration.py | buildbot_buildbot/master/buildbot/test/util/migration.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import annotations
import os
from typing import TYPE_CHECKING
import sqlalchemy as sa
from alembic.operations import Operations
from alembic.runtime.migration import MigrationContext
from twisted.internet import defer
from twisted.python import log
from buildbot.db import connector
from buildbot.test.fake import fakemaster
from buildbot.test.reactor import TestReactorMixin
from buildbot.test.util import db
from buildbot.test.util import dirs
from buildbot.test.util import querylog
from buildbot.util import sautils
if TYPE_CHECKING:
from sqlalchemy.future.engine import Connection
# test_upgrade vs. migration tests
#
# test_upgrade is an integration test -- it tests the whole upgrade process,
# including the code in model.py. Migrate tests are unit tests, and test a
# single db upgrade script.
class MigrateTestMixin(TestReactorMixin, db.RealDatabaseMixin, dirs.DirsMixin):
@defer.inlineCallbacks
def setUpMigrateTest(self):
self.setup_test_reactor(auto_tear_down=False)
self.basedir = os.path.abspath("basedir")
self.setUpDirs('basedir')
yield self.setUpRealDatabase()
master = yield fakemaster.make_master(self)
self.db = connector.DBConnector(self.basedir)
yield self.db.setServiceParent(master)
self.db.pool = self.db_pool
@defer.inlineCallbacks
def tearDownMigrateTest(self):
self.tearDownDirs()
yield self.tearDownRealDatabase()
yield self.tear_down_test_reactor()
@defer.inlineCallbacks
def do_test_migration(self, base_revision, target_revision, setup_thd_cb, verify_thd_cb):
def setup_thd(conn):
metadata = sa.MetaData()
table = sautils.Table(
'alembic_version',
metadata,
sa.Column("version_num", sa.String(32), nullable=False),
)
table.create(bind=conn)
conn.execute(table.insert().values(version_num=base_revision))
conn.commit()
setup_thd_cb(conn)
yield self.db.pool.do(setup_thd)
alembic_scripts = self.db.model.alembic_get_scripts()
def upgrade_thd(engine):
with querylog.log_queries():
with engine.connect() as conn:
with sautils.withoutSqliteForeignKeys(conn):
def upgrade(rev, context):
log.msg(f'Upgrading from {rev} to {target_revision}')
return alembic_scripts._upgrade_revs(target_revision, rev)
context = MigrationContext.configure(conn, opts={'fn': upgrade})
with Operations.context(context):
with context.begin_transaction():
context.run_migrations()
conn.commit()
yield self.db.pool.do_with_engine(upgrade_thd)
def check_table_charsets_thd(conn: Connection):
# charsets are only a problem for MySQL
if conn.dialect.name != 'mysql':
return
dbs = [r[0] for r in conn.exec_driver_sql("show tables")]
for tbl in dbs:
r = conn.exec_driver_sql(f"show create table {tbl}")
assert r is not None
res = r.fetchone()
assert res is not None
create_table = res[1]
self.assertIn( # type: ignore[attr-defined]
'DEFAULT CHARSET=utf8',
create_table,
f"table {tbl} does not have the utf8 charset",
)
yield self.db.pool.do(check_table_charsets_thd)
def verify_thd(conn):
with sautils.withoutSqliteForeignKeys(conn):
verify_thd_cb(conn)
yield self.db.pool.do(verify_thd)
| 4,579 | Python | .py | 101 | 35.722772 | 93 | 0.65319 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,704 | config.py | buildbot_buildbot/master/buildbot/test/util/config.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from buildbot import config
class _AssertRaisesConfigErrorContext:
def __init__(self, substr_or_re, case):
self.substr_or_re = substr_or_re
self.case = case
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
if exc_type is None:
self.case.fail("ConfigErrors not raised")
if not issubclass(exc_type, config.ConfigErrors):
self.case.fail(f"ConfigErrors not raised, instead got {exc_type.__name__}")
self.case.assertConfigError(exc_value, self.substr_or_re)
return True
class ConfigErrorsMixin:
def assertConfigError(self, errors, substr_or_re):
if len(errors.errors) > 1:
self.fail(f"too many errors: {errors.errors}")
elif not errors.errors:
self.fail("expected error did not occur")
else:
curr_error = errors.errors[0]
if isinstance(substr_or_re, str):
if substr_or_re not in curr_error:
self.fail(f"non-matching error: {curr_error}, expected: {substr_or_re}")
else:
if not substr_or_re.search(curr_error):
self.fail(f"non-matching error: {curr_error}")
def assertRaisesConfigError(self, substr_or_re, fn=None):
context = _AssertRaisesConfigErrorContext(substr_or_re, self)
if fn is None:
return context
with context:
fn()
return None
def assertNoConfigErrors(self, errors):
self.assertEqual(errors.errors, [])
| 2,279 | Python | .py | 51 | 37.098039 | 92 | 0.66787 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,705 | pbmanager.py | buildbot_buildbot/master/buildbot/test/util/pbmanager.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from unittest import mock
from twisted.internet import defer
class PBManagerMixin:
def setUpPBChangeSource(self):
"Set up a fake self.pbmanager."
self.registrations = []
self.unregistrations = []
pbm = self.pbmanager = mock.Mock()
pbm.register = self._fake_register
def _fake_register(self, portstr, username, password, factory):
reg = mock.Mock()
def unregister():
self.unregistrations.append((portstr, username, password))
return defer.succeed(None)
reg.unregister = unregister
self.registrations.append((portstr, username, password))
return reg
def assertNotRegistered(self):
self.assertEqual(self.registrations, [])
def assertNotUnregistered(self):
self.assertEqual(self.unregistrations, [])
def assertRegistered(self, portstr, username, password):
for ps, un, pw in self.registrations:
if ps == portstr and username == un and pw == password:
return
self.fail(f"not registered: {(portstr, username, password)!r} not in {self.registrations}")
def assertUnregistered(self, portstr, username, password):
for ps, un, pw in self.unregistrations:
if ps == portstr and username == un and pw == password:
return
self.fail("still registered")
| 2,087 | Python | .py | 45 | 39.955556 | 99 | 0.700493 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,706 | configurators.py | buildbot_buildbot/master/buildbot/test/util/configurators.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from buildbot.config.master import MasterConfig
class ConfiguratorMixin:
"""
Support for testing configurators.
@ivar configurator: the configurator under test
@ivar config_dict: the config dict that the configurator is modifying
"""
def setUp(self):
self.config_dict = {}
def setupConfigurator(self, *args, **kwargs):
self.configurator = self.ConfiguratorClass(*args, **kwargs)
return self.configurator.configure(self.config_dict)
def expectWorker(self, name, klass):
if 'workers' in self.config_dict and 'slaves' in self.config_dict:
self.fail("both 'workers' and 'slaves' are in the config dict!")
for worker in self.config_dict.get('workers', []) + self.config_dict.get('slaves', []):
if isinstance(worker, klass) and worker.name == name:
return worker
self.fail(f"expected a worker named {name} of class {klass}")
return None
def expectScheduler(self, name, klass):
for scheduler in self.config_dict['schedulers']:
if scheduler.name == name and isinstance(scheduler, klass):
return scheduler
self.fail(f"expected a scheduler named {name} of class {klass}")
return None
def expectBuilder(self, name):
for builder in self.config_dict['builders']:
if builder.name == name:
return builder
self.fail(f"expected a builder named {name}")
return None
def expectBuilderHasSteps(self, name, step_classes):
builder = self.expectBuilder(name)
for step_class in step_classes:
found = [step for step in builder.factory.steps if step.step_class == step_class]
if not found:
self.fail(f"expected a buildstep of {step_class!r} in {name}")
def expectNoConfigError(self):
config = MasterConfig()
config.loadFromDict(self.config_dict, "test")
| 2,665 | Python | .py | 55 | 41.418182 | 95 | 0.688607 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,707 | integration.py | buildbot_buildbot/master/buildbot/test/util/integration.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import annotations
import os
import re
import sys
from io import StringIO
from unittest import mock
from twisted.internet import defer
from twisted.internet import reactor
from twisted.python.filepath import FilePath
from twisted.trial import unittest
from zope.interface import implementer
from buildbot.config.master import MasterConfig
from buildbot.data import resultspec
from buildbot.interfaces import IConfigLoader
from buildbot.master import BuildMaster
from buildbot.plugins import worker
from buildbot.process.properties import Interpolate
from buildbot.process.results import SUCCESS
from buildbot.process.results import statusToString
from buildbot.test.reactor import TestReactorMixin
from buildbot.test.util.misc import DebugIntegrationLogsMixin
from buildbot.test.util.sandboxed_worker import SandboxedWorker
from buildbot.worker.local import LocalWorker
Worker: type | None = None
try:
from buildbot_worker.bot import Worker
except ImportError:
pass
@implementer(IConfigLoader)
class DictLoader:
def __init__(self, config_dict):
self.config_dict = config_dict
def loadConfig(self):
return MasterConfig.loadFromDict(self.config_dict, '<dict>')
@defer.inlineCallbacks
def getMaster(case, reactor, config_dict):
"""
Create a started ``BuildMaster`` with the given configuration.
"""
basedir = FilePath(case.mktemp())
basedir.createDirectory()
config_dict['buildbotNetUsageData'] = None
master = BuildMaster(basedir.path, reactor=reactor, config_loader=DictLoader(config_dict))
if 'db_url' not in config_dict:
config_dict['db_url'] = 'sqlite://'
# TODO: Allow BuildMaster to transparently upgrade the database, at least
# for tests.
master.config.db['db_url'] = config_dict['db_url']
yield master.db.setup(check_version=False)
yield master.db.model.upgrade()
master.db.setup = lambda: None
yield master.startService()
case.addCleanup(master.db.pool.shutdown)
case.addCleanup(master.stopService)
return master
class RunFakeMasterTestCase(unittest.TestCase, TestReactorMixin, DebugIntegrationLogsMixin):
def setUp(self):
self.setup_test_reactor(auto_tear_down=False)
self.setupDebugIntegrationLogs()
@defer.inlineCallbacks
def tearDown(self):
self.assertFalse(self.master.running, "master is still running!")
yield self.tear_down_test_reactor()
@defer.inlineCallbacks
def setup_master(self, config_dict):
self.master = yield getMaster(self, self.reactor, config_dict)
@defer.inlineCallbacks
def reconfig_master(self, config_dict=None):
if config_dict is not None:
self.master.config_loader.config_dict = config_dict
yield self.master.doReconfig()
@defer.inlineCallbacks
def clean_master_shutdown(self, quick=False):
yield self.master.botmaster.cleanShutdown(quickMode=quick, stopReactor=False)
def createLocalWorker(self, name, **kwargs):
workdir = FilePath(self.mktemp())
workdir.createDirectory()
return LocalWorker(name, workdir.path, **kwargs)
@defer.inlineCallbacks
def assertBuildResults(self, build_id, result):
dbdict = yield self.master.db.builds.getBuild(build_id)
self.assertEqual(result, dbdict.results)
@defer.inlineCallbacks
def assertStepStateString(self, step_id, state_string):
datadict = yield self.master.data.get(('steps', step_id))
self.assertEqual(datadict['state_string'], state_string)
@defer.inlineCallbacks
def assertLogs(self, build_id, exp_logs):
got_logs = {}
data_logs = yield self.master.data.get(('builds', build_id, 'steps', 1, 'logs'))
for log in data_logs:
self.assertTrue(log['complete'])
log_contents = yield self.master.data.get((
'builds',
build_id,
'steps',
1,
'logs',
log['slug'],
'contents',
))
got_logs[log['name']] = log_contents['content']
self.assertEqual(got_logs, exp_logs)
@defer.inlineCallbacks
def create_build_request(self, builder_ids, properties=None):
properties = properties.asDict() if properties is not None else None
ret = yield self.master.data.updates.addBuildset(
waited_for=False,
builderids=builder_ids,
sourcestamps=[
{'codebase': '', 'repository': '', 'branch': None, 'revision': None, 'project': ''},
],
properties=properties,
)
return ret
@defer.inlineCallbacks
def do_test_build_by_name(self, builder_name):
builder_id = yield self.master.data.updates.findBuilderId(builder_name)
yield self.do_test_build(builder_id)
@defer.inlineCallbacks
def do_test_build(self, builder_id):
# setup waiting for build to finish
d_finished = defer.Deferred()
def on_finished(_, __):
if not d_finished.called:
d_finished.callback(None)
consumer = yield self.master.mq.startConsuming(on_finished, ('builds', None, 'finished'))
# start the builder
yield self.create_build_request([builder_id])
# and wait for build completion
yield d_finished
yield consumer.stopConsuming()
class RunMasterBase(unittest.TestCase):
proto = "null"
# All tests that start master need higher timeout due to test runtime variability on
# oversubscribed hosts.
timeout = 60
if Worker is None:
skip = "buildbot-worker package is not installed"
@defer.inlineCallbacks
def setup_master(self, config_dict, startWorker=True, **worker_kwargs):
"""
Setup and start a master configured
by the function configFunc defined in the test module.
@type config_dict: dict
@param configFunc: The BuildmasterConfig dictionary.
"""
# mock reactor.stop (which trial *really* doesn't
# like test code to call!)
stop = mock.create_autospec(reactor.stop)
self.patch(reactor, 'stop', stop)
if startWorker:
if self.proto == 'pb':
proto = {"pb": {"port": "tcp:0:interface=127.0.0.1"}}
workerclass = worker.Worker
elif self.proto == 'msgpack':
proto = {"msgpack_experimental_v7": {"port": 0}}
workerclass = worker.Worker
elif self.proto == 'null':
proto = {"null": {}}
workerclass = worker.LocalWorker
else:
raise RuntimeError(f"{self.proto} protocol is not supported.")
config_dict['workers'] = [
workerclass("local1", password=Interpolate("localpw"), missing_timeout=0)
]
config_dict['protocols'] = proto
m = yield getMaster(self, reactor, config_dict)
self.master_config_dict = config_dict
self.master = m
self.assertFalse(stop.called, "startService tried to stop the reactor; check logs")
if not startWorker:
return
if self.proto in ('pb', 'msgpack'):
sandboxed_worker_path = os.environ.get("SANDBOXED_WORKER_PATH", None)
if self.proto == 'pb':
protocol = 'pb'
dispatcher = next(iter(m.pbmanager.dispatchers.values()))
else:
protocol = 'msgpack_experimental_v7'
dispatcher = next(iter(m.msgmanager.dispatchers.values()))
# We currently don't handle connection closing cleanly.
dispatcher.serverFactory.setProtocolOptions(closeHandshakeTimeout=0)
workerPort = dispatcher.port.getHost().port
# create a worker, and attach it to the master, it will be started, and stopped
# along with the master
worker_dir = FilePath(self.mktemp())
worker_dir.createDirectory()
if sandboxed_worker_path is None:
self.w = Worker(
"127.0.0.1",
workerPort,
"local1",
"localpw",
worker_dir.path,
False,
protocol=protocol,
**worker_kwargs,
)
else:
self.w = SandboxedWorker(
"127.0.0.1",
workerPort,
"local1",
"localpw",
worker_dir.path,
sandboxed_worker_path,
protocol=protocol,
**worker_kwargs,
)
self.addCleanup(self.w.shutdownWorker)
elif self.proto == 'null':
self.w = None
if self.w is not None:
yield self.w.setServiceParent(m)
@defer.inlineCallbacks
def dump():
if not self._passed:
dump = StringIO()
print("FAILED! dumping build db for debug", file=dump)
builds = yield self.master.data.get(("builds",))
for build in builds:
yield self.printBuild(build, dump, withLogs=True)
raise self.failureException(dump.getvalue())
self.addCleanup(dump)
@defer.inlineCallbacks
def doForceBuild(
self,
wantSteps=False,
wantProperties=False,
wantLogs=False,
useChange=False,
forceParams=None,
triggerCallback=None,
):
if forceParams is None:
forceParams = {}
# force a build, and wait until it is finished
d = defer.Deferred()
# in order to allow trigger based integration tests
# we wait until the first started build is finished
self.firstbsid = None
def newCallback(_, data):
if self.firstbsid is None:
self.firstbsid = data['bsid']
newConsumer.stopConsuming()
def finishedCallback(_, data):
if self.firstbsid == data['bsid']:
d.callback(data)
newConsumer = yield self.master.mq.startConsuming(newCallback, ('buildsets', None, 'new'))
finishedConsumer = yield self.master.mq.startConsuming(
finishedCallback, ('buildsets', None, 'complete')
)
if triggerCallback is not None:
yield triggerCallback()
elif useChange is False:
# use data api to force a build
yield self.master.data.control("force", forceParams, ("forceschedulers", "force"))
else:
# use data api to force a build, via a new change
yield self.master.data.updates.addChange(**useChange)
# wait until we receive the build finished event
buildset = yield d
buildrequests = yield self.master.data.get(
('buildrequests',), filters=[resultspec.Filter('buildsetid', 'eq', [buildset['bsid']])]
)
buildrequest = buildrequests[-1]
builds = yield self.master.data.get(
('builds',),
filters=[resultspec.Filter('buildrequestid', 'eq', [buildrequest['buildrequestid']])],
)
# if the build has been retried, there will be several matching builds.
# We return the last build
build = builds[-1]
finishedConsumer.stopConsuming()
yield self.enrichBuild(build, wantSteps, wantProperties, wantLogs)
return build
@defer.inlineCallbacks
def enrichBuild(self, build, wantSteps=False, wantProperties=False, wantLogs=False):
# enrich the build result, with the step results
if wantSteps:
build["steps"] = yield self.master.data.get(("builds", build['buildid'], "steps"))
# enrich the step result, with the logs results
if wantLogs:
build["steps"] = list(build["steps"])
for step in build["steps"]:
step['logs'] = yield self.master.data.get(("steps", step['stepid'], "logs"))
step["logs"] = list(step['logs'])
for log in step["logs"]:
log['contents'] = yield self.master.data.get((
"logs",
log['logid'],
"contents",
))
if wantProperties:
build["properties"] = yield self.master.data.get((
"builds",
build['buildid'],
"properties",
))
@defer.inlineCallbacks
def printBuild(self, build, out=sys.stdout, withLogs=False):
# helper for debugging: print a build
yield self.enrichBuild(build, wantSteps=True, wantProperties=True, wantLogs=True)
print(
f"*** BUILD {build['buildid']} *** ==> {build['state_string']} "
f"({statusToString(build['results'])})",
file=out,
)
for step in build['steps']:
print(
f" *** STEP {step['name']} *** ==> {step['state_string']} "
f"({statusToString(step['results'])})",
file=out,
)
for url in step['urls']:
print(f" url:{url['name']} ({url['url']})", file=out)
for log in step['logs']:
print(f" log:{log['name']} ({log['num_lines']})", file=out)
if step['results'] != SUCCESS or withLogs:
self.printLog(log, out)
def _match_patterns_consume(self, text, patterns, is_regex):
for pattern in patterns[:]:
if is_regex:
if re.search(pattern, text):
patterns.remove(pattern)
else:
if pattern in text:
patterns.remove(pattern)
return patterns
@defer.inlineCallbacks
def checkBuildStepLogExist(self, build, expectedLog, onlyStdout=False, regex=False):
if isinstance(expectedLog, str):
expectedLog = [expectedLog]
if not isinstance(expectedLog, list):
raise RuntimeError(
'The expectedLog argument must be either string or a list of strings'
)
yield self.enrichBuild(build, wantSteps=True, wantProperties=True, wantLogs=True)
for step in build['steps']:
for log in step['logs']:
for line in log['contents']['content'].splitlines():
if onlyStdout and line[0] != 'o':
continue
expectedLog = self._match_patterns_consume(line, expectedLog, is_regex=regex)
if expectedLog:
print(f"{expectedLog} not found in logs")
return len(expectedLog) == 0
def printLog(self, log, out):
print(" " * 8 + f"*********** LOG: {log['name']} *********", file=out)
if log['type'] == 's':
for line in log['contents']['content'].splitlines():
linetype = line[0]
line = line[1:]
if linetype == 'h':
# cyan
line = "\x1b[36m" + line + "\x1b[0m"
if linetype == 'e':
# red
line = "\x1b[31m" + line + "\x1b[0m"
print(" " * 8 + line)
else:
print("" + log['contents']['content'], file=out)
print(" " * 8 + "********************************", file=out)
| 16,336 | Python | .py | 378 | 32.595238 | 100 | 0.599824 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,708 | tuplematching.py | buildbot_buildbot/master/buildbot/test/util/tuplematching.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
class TupleMatchingMixin:
# a bunch of tuple-matching tests that all call do_test_match
# this is used to test this behavior in a few places
def do_test_match(self, routingKey, shouldMatch, *tuples):
raise NotImplementedError
def test_simple_tuple_match(self):
return self.do_test_match(('abc',), True, ('abc',))
def test_simple_tuple_no_match(self):
return self.do_test_match(('abc',), False, ('def',))
def test_multiple_tuple_match(self):
return self.do_test_match(('a', 'b', 'c'), True, ('a', 'b', 'c'))
def test_multiple_tuple_match_tuple_prefix(self):
return self.do_test_match(('a', 'b', 'c'), False, ('a', 'b'))
def test_multiple_tuple_match_tuple_suffix(self):
return self.do_test_match(('a', 'b', 'c'), False, ('b', 'c'))
def test_multiple_tuple_match_rk_prefix(self):
return self.do_test_match(('a', 'b'), False, ('a', 'b', 'c'))
def test_multiple_tuple_match_rk_suffix(self):
return self.do_test_match(('b', 'c'), False, ('a', 'b', 'c'))
def test_None_match(self):
return self.do_test_match(('a', 'b', 'c'), True, ('a', None, 'c'))
def test_None_match_empty(self):
return self.do_test_match(('a', '', 'c'), True, ('a', None, 'c'))
def test_None_no_match(self):
return self.do_test_match(('a', 'b', 'c'), False, ('a', None, 'x'))
| 2,098 | Python | .py | 39 | 48.871795 | 79 | 0.660313 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,709 | properties.py | buildbot_buildbot/master/buildbot/test/util/properties.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from zope.interface import implementer
from buildbot.interfaces import IRenderable
@implementer(IRenderable)
class ConstantRenderable:
def __init__(self, value):
self.value = value
def getRenderingFor(self, props):
return self.value
| 968 | Python | .py | 22 | 41.636364 | 79 | 0.781915 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,710 | logging.py | buildbot_buildbot/master/buildbot/test/util/logging.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import re
from twisted.python import log
class LoggingMixin:
def setUpLogging(self):
self._logEvents = []
log.addObserver(self._logEvents.append)
self.addCleanup(log.removeObserver, self._logEvents.append)
def logContainsMessage(self, regexp):
r = re.compile(regexp)
for event in self._logEvents:
msg = log.textFromEventDict(event)
if msg is not None:
assert not msg.startswith("Unable to format event"), msg
if msg is not None and r.search(msg):
return True
return False
def assertLogged(self, regexp):
if not self.logContainsMessage(regexp):
lines = [log.textFromEventDict(e) for e in self._logEvents]
self.fail(f"{regexp!r} not matched in log output.\n{lines} ")
def assertNotLogged(self, regexp):
if self.logContainsMessage(regexp):
lines = [log.textFromEventDict(e) for e in self._logEvents]
self.fail(f"{regexp!r} matched in log output.\n{lines} ")
def assertWasQuiet(self):
self.assertEqual([log.textFromEventDict(event) for event in self._logEvents], [])
| 1,885 | Python | .py | 40 | 40.7 | 89 | 0.701525 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,711 | sandboxed_worker.py | buildbot_buildbot/master/buildbot/test/util/sandboxed_worker.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import subprocess
from twisted.internet import defer
from twisted.internet import protocol
from twisted.internet import reactor
from buildbot.util.service import AsyncService
class WorkerProcessProtocol(protocol.ProcessProtocol):
def __init__(self):
self.finished_deferred = defer.Deferred()
def outReceived(self, data):
print(data)
def errReceived(self, data):
print(data)
def processEnded(self, _):
self.finished_deferred.callback(None)
def waitForFinish(self):
return self.finished_deferred
class SandboxedWorker(AsyncService):
def __init__(
self, masterhost, port, name, passwd, workerdir, sandboxed_worker_path, protocol='pb'
):
self.masterhost = masterhost
self.port = port
self.workername = name
self.workerpasswd = passwd
self.workerdir = workerdir
self.sandboxed_worker_path = sandboxed_worker_path
self.protocol = protocol
self.worker = None
def startService(self):
# Note that we create the worker with sync API
# We don't really care as we are in tests
res = subprocess.run(
[
self.sandboxed_worker_path,
"create-worker",
f'--protocol={self.protocol}',
'-q',
self.workerdir,
self.masterhost + ":" + str(self.port),
self.workername,
self.workerpasswd,
],
capture_output=True,
check=False,
)
if res.returncode != 0:
# we do care about finding out why it failed though
raise RuntimeError(
"\n".join(["Unable to create worker!", res.stdout.decode(), res.stderr.decode()])
)
self.processprotocol = processProtocol = WorkerProcessProtocol()
# we need to spawn the worker asynchronously though
args = [self.sandboxed_worker_path, 'start', '--nodaemon', self.workerdir]
self.process = reactor.spawnProcess(processProtocol, self.sandboxed_worker_path, args=args)
self.worker = self.master.workers.getWorkerByName(self.workername)
return super().startService()
@defer.inlineCallbacks
def shutdownWorker(self):
if self.worker is None:
return
# on windows, we killing a process does not work well.
# we use the graceful shutdown feature of buildbot-worker instead to kill the worker
# but we must do that before the master is stopping.
yield self.worker.shutdown()
# wait for process to disappear
yield self.processprotocol.waitForFinish()
| 3,398 | Python | .py | 80 | 34.5625 | 99 | 0.670706 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,712 | interfaces.py | buildbot_buildbot/master/buildbot/test/util/interfaces.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import annotations
import inspect
from collections import OrderedDict
from typing import Callable
import zope.interface.interface
from zope.interface.interface import Attribute
class InterfaceTests:
# assertions
assertEqual: Callable[..., None]
def assertArgSpecMatches(self, actualMethod, *fakeMethods):
"""Usage::
@self.assertArgSpecMatches(obj.methodUnderTest)
def methodTemplate(self, arg1, arg2):
pass
or, more useful when you will be faking out C{methodUnderTest}:
self.assertArgSpecMatches(obj.methodUnderTest, self.fakeMethod)
"""
def filter(signature: inspect.Signature):
parameters = OrderedDict(signature.parameters)
for name in parameters:
if name == 'self':
parameters.pop('self')
break
delete_names = []
for name in parameters:
if name.startswith('_'):
delete_names.append(name)
for name in delete_names:
parameters.pop(name)
# Remove all type annotations
# as they can be stored as str when quoted or when `__future__.annotations`
# is imported, we can't check whether the types are compatible.
# Type checking should be left to a static type checker
signature = signature.replace(return_annotation=inspect.Signature.empty)
for name, param in parameters.items():
parameters[name] = param.replace(annotation=inspect.Parameter.empty)
signature = signature.replace(parameters=list(parameters.values()))
return signature
def remove_decorators(func):
try:
return func.__wrapped__
except AttributeError:
return func
def filter_argspec(func):
return filter(inspect.signature(remove_decorators(func)))
def assert_same_argspec(expected, actual):
if expected != actual:
msg = f"Expected: {expected}; got: {actual}"
self.fail(msg)
actual_argspec = filter_argspec(actualMethod)
for fakeMethod in fakeMethods:
fake_argspec = filter_argspec(fakeMethod)
assert_same_argspec(actual_argspec, fake_argspec)
def assert_same_argspec_decorator(decorated):
expected_argspec = filter_argspec(decorated)
assert_same_argspec(expected_argspec, actual_argspec)
# The decorated function works as usual.
return decorated
return assert_same_argspec_decorator
def assertInterfacesImplemented(self, cls):
"Given a class, assert that the zope.interface.Interfaces are implemented to specification."
for interface in zope.interface.implementedBy(cls):
for attr, template_argspec in interface.namesAndDescriptions():
if not hasattr(cls, attr):
msg = (
f"Expected: {cls!r}; to implement: {attr} as specified in " f"{interface!r}"
)
self.fail(msg)
actual_argspec = getattr(cls, attr)
if isinstance(template_argspec, Attribute):
continue
# else check method signatures
while hasattr(actual_argspec, '__wrapped__'):
actual_argspec = actual_argspec.__wrapped__
actual_argspec = zope.interface.interface.fromMethod(actual_argspec)
if actual_argspec.getSignatureInfo() != template_argspec.getSignatureInfo():
msg = (
f"{attr}: expected: {template_argspec.getSignatureString()}; got: "
f"{actual_argspec.getSignatureString()}"
)
self.fail(msg)
| 4,647 | Python | .py | 95 | 37.284211 | 100 | 0.630464 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,713 | endpoint.py | buildbot_buildbot/master/buildbot/test/util/endpoint.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import annotations
from twisted import trial
from twisted.internet import defer
from buildbot.data import base
from buildbot.data import resultspec
from buildbot.test.fake import fakemaster
from buildbot.test.reactor import TestReactorMixin
from buildbot.test.util import interfaces
from buildbot.test.util import validation
from buildbot.util import pathmatch
class EndpointMixin(TestReactorMixin, interfaces.InterfaceTests):
# test mixin for testing Endpoint subclasses
# class being tested
endpointClass: type[base.Endpoint] | None = None
# the corresponding resource type - this will be instantiated at
# self.data.rtypes[rtype.type] and self.rtype
resourceTypeClass: type[base.ResourceType] | None = None
@defer.inlineCallbacks
def setUpEndpoint(self):
self.setup_test_reactor(auto_tear_down=False)
self.master = yield fakemaster.make_master(self, wantMq=True, wantDb=True, wantData=True)
self.db = self.master.db
self.mq = self.master.mq
self.data = self.master.data
self.matcher = pathmatch.Matcher()
rtype = self.rtype = self.resourceTypeClass(self.master)
setattr(self.data.rtypes, rtype.name, rtype)
self.ep = self.endpointClass(rtype, self.master)
# this usually fails when a single-element pathPattern does not have a
# trailing comma
pathPatterns = self.ep.pathPatterns.split()
for pp in pathPatterns:
if pp == '/':
continue
if not pp.startswith('/') or pp.endswith('/'):
raise AssertionError(f"invalid pattern {pp!r}")
pathPatterns = [tuple(pp.split('/')[1:]) for pp in pathPatterns]
for pp in pathPatterns:
self.matcher[pp] = self.ep
self.pathArgs = [
{arg.split(':', 1)[1] for arg in pp if ':' in arg}
for pp in pathPatterns
if pp is not None
]
@defer.inlineCallbacks
def tearDownEndpoint(self):
yield self.tear_down_test_reactor()
def validateData(self, object):
validation.verifyData(self, self.rtype.entityType, {}, object)
# call methods, with extra checks
@defer.inlineCallbacks
def callGet(self, path, resultSpec=None):
self.assertIsInstance(path, tuple)
if resultSpec is None:
resultSpec = resultspec.ResultSpec()
endpoint, kwargs = self.matcher[path]
self.assertIdentical(endpoint, self.ep)
rv = yield endpoint.get(resultSpec, kwargs)
if self.ep.kind == base.EndpointKind.COLLECTION:
self.assertIsInstance(rv, (list, base.ListResult))
else:
self.assertIsInstance(rv, (dict, type(None)))
return rv
def callControl(self, action, args, path):
self.assertIsInstance(path, tuple)
endpoint, kwargs = self.matcher[path]
self.assertIdentical(endpoint, self.ep)
d = self.ep.control(action, args, kwargs)
self.assertIsInstance(d, defer.Deferred)
return d
# interface tests
def test_get_spec(self):
try:
@self.assertArgSpecMatches(self.ep.get)
def get(self, resultSpec, kwargs):
pass
except trial.unittest.FailTest:
@self.assertArgSpecMatches(self.ep.get)
def get(self, result_spec, kwargs):
pass
def test_control_spec(self):
@self.assertArgSpecMatches(self.ep.control)
def control(self, action, args, kwargs):
pass
def test_rootLinkName(self):
rootLinkName = self.ep.rootLinkName
if not rootLinkName:
return
try:
self.assertEqual(self.matcher[(rootLinkName,)][0], self.ep)
except KeyError:
self.fail('No match for rootlink: ' + rootLinkName)
| 4,573 | Python | .py | 106 | 35.575472 | 97 | 0.680396 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,714 | validation.py | buildbot_buildbot/master/buildbot/test/util/validation.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
# See "Type Validation" in master/docs/developer/tests.rst
from __future__ import annotations
import datetime
import json
import re
from buildbot.util import UTC
from buildbot.util import bytes2unicode
# Base class
validatorsByName = {}
class Validator:
name: str | None = None
hasArgs = False
def validate(self, name, object):
raise NotImplementedError
class __metaclass__(type):
def __new__(mcs, name, bases, attrs):
cls = type.__new__(mcs, name, bases, attrs)
if attrs.get('name'):
assert attrs['name'] not in validatorsByName
validatorsByName[attrs['name']] = cls
return cls
# Basic types
class InstanceValidator(Validator):
types: tuple[type] | tuple[()] = ()
def validate(self, name, object):
if not isinstance(object, self.types):
yield f"{name} ({object!r}) is not a {self.name or repr(self.types)}"
class IntValidator(InstanceValidator):
types = (int,)
name = 'integer'
class BooleanValidator(InstanceValidator):
types = (bool,)
name = 'boolean'
class StringValidator(InstanceValidator):
# strings must be unicode
types = (str,)
name = 'string'
class BinaryValidator(InstanceValidator):
types = (bytes,)
name = 'bytestring'
class StrValidator(InstanceValidator):
types = (str,)
name = 'str'
class DateTimeValidator(Validator):
types = (datetime.datetime,)
name = 'datetime'
def validate(self, name, object):
if not isinstance(object, datetime.datetime):
yield f"{name} - {object!r} - is not a datetime"
elif object.tzinfo != UTC:
yield f"{name} is not a UTC datetime"
class IdentifierValidator(Validator):
types = (str,)
name = 'identifier'
hasArgs = True
ident_re = re.compile(
'^[a-zA-Z\u00a0-\U0010ffff_-][a-zA-Z0-9\u00a0-\U0010ffff_-]*$', flags=re.UNICODE
)
def __init__(self, len):
self.len = len
def validate(self, name, object):
if not isinstance(object, str):
yield f"{name} - {object!r} - is not a unicode string"
elif not self.ident_re.match(object):
yield f"{name} - {object!r} - is not an identifier"
elif not object:
yield f"{name} - identifiers cannot be an empty string"
elif len(object) > self.len:
yield f"{name} - {object!r} - is longer than {self.len} characters"
# Miscellaneous
class NoneOk:
def __init__(self, original):
self.original = original
def validate(self, name, object):
if object is None:
return
else:
yield from self.original.validate(name, object)
class Any:
def validate(self, name, object):
return
# Compound Types
class DictValidator(Validator):
name = 'dict'
def __init__(self, optionalNames=None, **keys):
if optionalNames is None:
optionalNames = []
self.optionalNames = set(optionalNames)
self.keys = keys
self.expectedNames = set(keys.keys())
def validate(self, name, object):
# this uses isinstance, allowing dict subclasses as used by the DB API
if not isinstance(object, dict):
yield f"{name} ({object!r}) is not a dictionary (got type {type(object)})"
return
gotNames = set(object.keys())
unexpected = gotNames - self.expectedNames
if unexpected:
yield f'{name} has unexpected keys {", ".join([repr(n) for n in unexpected])}'
missing = self.expectedNames - self.optionalNames - gotNames
if missing:
yield f'{name} is missing keys {", ".join([repr(n) for n in missing])}'
for k in gotNames & self.expectedNames:
yield from self.keys[k].validate(f"{name}[{k!r}]", object[k])
class SequenceValidator(Validator):
type: type | None = None
def __init__(self, elementValidator):
self.elementValidator = elementValidator
def validate(self, name, object):
if not isinstance(object, self.type):
yield f"{name} ({object!r}) is not a {self.name}"
return
for idx, elt in enumerate(object):
yield from self.elementValidator.validate(f"{name}[{idx}]", elt)
class ListValidator(SequenceValidator):
type = list
name = 'list'
class TupleValidator(SequenceValidator):
type = tuple
name = 'tuple'
class StringListValidator(ListValidator):
name = 'string-list'
def __init__(self):
super().__init__(StringValidator())
class SourcedPropertiesValidator(Validator):
name = 'sourced-properties'
def validate(self, name, object):
if not isinstance(object, dict):
yield f"{name} is not sourced properties (not a dict)"
return
for k, v in object.items():
if not isinstance(k, str):
yield f"{name} property name {k!r} is not unicode"
if not isinstance(v, tuple) or len(v) != 2:
yield f"{name} property value for '{k!r}' is not a 2-tuple"
return
propval, propsrc = v
if not isinstance(propsrc, str):
yield f"{name}[{k}] source {propsrc!r} is not unicode"
try:
json.dumps(propval)
except (TypeError, ValueError):
yield f"{name}[{k!r}] value is not JSON-able"
class JsonValidator(Validator):
name = 'json'
def validate(self, name, object):
try:
json.dumps(object)
except (TypeError, ValueError):
yield f"{name}[{object!r}] value is not JSON-able"
class PatchValidator(Validator):
name: str | None = 'patch' # type: ignore[assignment]
validator = DictValidator(
body=NoneOk(BinaryValidator()),
level=NoneOk(IntValidator()),
subdir=NoneOk(StringValidator()),
author=NoneOk(StringValidator()),
comment=NoneOk(StringValidator()),
)
def validate(self, name, object):
yield from self.validator.validate(name, object)
class MessageValidator(Validator):
routingKeyValidator = TupleValidator(StrValidator())
def __init__(self, events, messageValidator):
self.events = [bytes2unicode(e) for e in set(events)]
self.messageValidator = messageValidator
def validate(self, name, routingKey_message):
try:
routingKey, message = routingKey_message
except (TypeError, ValueError) as e:
yield f"{routingKey_message!r}: not a routing key and message: {e}"
routingKeyBad = False
for msg in self.routingKeyValidator.validate("routingKey", routingKey):
yield msg
routingKeyBad = True
if not routingKeyBad:
event = routingKey[-1]
if event not in self.events:
yield f"routing key event {event!r} is not valid"
yield from self.messageValidator.validate(f"{routingKey[0]} message", message)
class Selector(Validator):
def __init__(self):
self.selectors = []
def add(self, selector, validator):
self.selectors.append((selector, validator))
def validate(self, name, arg_object):
try:
arg, object = arg_object
except (TypeError, ValueError) as e:
yield f"{arg_object!r}: not a not data options and data dict: {e}"
for selector, validator in self.selectors:
if selector is None or selector(arg):
yield from validator.validate(name, object)
return
yield f"no match for selector argument {arg!r}"
# Type definitions
message = {}
dbdict = {}
# parse and use a ResourceType class's dataFields into a validator
# masters
message['masters'] = Selector()
message['masters'].add(
None,
MessageValidator(
events=[b'started', b'stopped'],
messageValidator=DictValidator(
masterid=IntValidator(),
name=StringValidator(),
active=BooleanValidator(),
# last_active is not included
),
),
)
dbdict['masterdict'] = DictValidator(
id=IntValidator(),
name=StringValidator(),
active=BooleanValidator(),
last_active=DateTimeValidator(),
)
# sourcestamp
_sourcestamp = {
"ssid": IntValidator(),
"branch": NoneOk(StringValidator()),
"revision": NoneOk(StringValidator()),
"repository": StringValidator(),
"project": StringValidator(),
"codebase": StringValidator(),
"created_at": DateTimeValidator(),
"patch": NoneOk(
DictValidator(
body=NoneOk(BinaryValidator()),
level=NoneOk(IntValidator()),
subdir=NoneOk(StringValidator()),
author=NoneOk(StringValidator()),
comment=NoneOk(StringValidator()),
)
),
}
message['sourcestamps'] = Selector()
message['sourcestamps'].add(None, DictValidator(**_sourcestamp))
# builder
message['builders'] = Selector()
message['builders'].add(
None,
MessageValidator(
events=[b'started', b'stopped'],
messageValidator=DictValidator(
builderid=IntValidator(),
masterid=IntValidator(),
name=StringValidator(),
),
),
)
# buildset
_buildset = {
"bsid": IntValidator(),
"external_idstring": NoneOk(StringValidator()),
"reason": StringValidator(),
"submitted_at": IntValidator(),
"complete": BooleanValidator(),
"complete_at": NoneOk(IntValidator()),
"results": NoneOk(IntValidator()),
"parent_buildid": NoneOk(IntValidator()),
"parent_relationship": NoneOk(StringValidator()),
}
_buildsetEvents = [b'new', b'complete']
message['buildsets'] = Selector()
message['buildsets'].add(
lambda k: k[-1] == 'new',
MessageValidator(
events=_buildsetEvents,
messageValidator=DictValidator(
scheduler=StringValidator(), # only for 'new'
sourcestamps=ListValidator(DictValidator(**_sourcestamp)),
**_buildset,
),
),
)
message['buildsets'].add(
None,
MessageValidator(
events=_buildsetEvents,
messageValidator=DictValidator(
sourcestamps=ListValidator(DictValidator(**_sourcestamp)), **_buildset
),
),
)
# buildrequest
message['buildrequests'] = Selector()
message['buildrequests'].add(
None,
MessageValidator(
events=[b'new', b'claimed', b'unclaimed'],
messageValidator=DictValidator(
# TODO: probably wrong!
brid=IntValidator(),
builderid=IntValidator(),
bsid=IntValidator(),
buildername=StringValidator(),
),
),
)
# change
message['changes'] = Selector()
message['changes'].add(
None,
MessageValidator(
events=[b'new'],
messageValidator=DictValidator(
changeid=IntValidator(),
parent_changeids=ListValidator(IntValidator()),
author=StringValidator(),
committer=StringValidator(),
files=ListValidator(StringValidator()),
comments=StringValidator(),
revision=NoneOk(StringValidator()),
when_timestamp=IntValidator(),
branch=NoneOk(StringValidator()),
category=NoneOk(StringValidator()),
revlink=NoneOk(StringValidator()),
properties=SourcedPropertiesValidator(),
repository=StringValidator(),
project=StringValidator(),
codebase=StringValidator(),
sourcestamp=DictValidator(**_sourcestamp),
),
),
)
# builds
_build = {
"buildid": IntValidator(),
"number": IntValidator(),
"builderid": IntValidator(),
"buildrequestid": IntValidator(),
"workerid": IntValidator(),
"masterid": IntValidator(),
"started_at": IntValidator(),
"complete": BooleanValidator(),
"complete_at": NoneOk(IntValidator()),
"state_string": StringValidator(),
"results": NoneOk(IntValidator()),
}
_buildEvents = [b'new', b'complete']
message['builds'] = Selector()
message['builds'].add(
None, MessageValidator(events=_buildEvents, messageValidator=DictValidator(**_build))
)
# Validates DATA API layer
dbdict['builddict'] = DictValidator(
id=IntValidator(),
number=IntValidator(),
builderid=IntValidator(),
buildrequestid=IntValidator(),
workerid=IntValidator(),
masterid=IntValidator(),
started_at=DateTimeValidator(),
complete_at=NoneOk(DateTimeValidator()),
locks_duration_s=IntValidator(),
state_string=StringValidator(),
results=NoneOk(IntValidator()),
properties=NoneOk(SourcedPropertiesValidator()),
)
# build data
_build_data_msgdict = DictValidator(
buildid=IntValidator(),
name=StringValidator(),
value=NoneOk(BinaryValidator()),
length=IntValidator(),
source=StringValidator(),
)
message['build_data'] = Selector()
message['build_data'].add(None, MessageValidator(events=[], messageValidator=_build_data_msgdict))
# steps
_step = {
"stepid": IntValidator(),
"number": IntValidator(),
"name": IdentifierValidator(50),
"buildid": IntValidator(),
"started_at": IntValidator(),
"complete": BooleanValidator(),
"complete_at": NoneOk(IntValidator()),
"state_string": StringValidator(),
"results": NoneOk(IntValidator()),
"urls": ListValidator(StringValidator()),
"hidden": BooleanValidator(),
}
_stepEvents = [b'new', b'complete']
message['steps'] = Selector()
message['steps'].add(
None, MessageValidator(events=_stepEvents, messageValidator=DictValidator(**_step))
)
# logs
_log = {
"logid": IntValidator(),
"name": IdentifierValidator(50),
"stepid": IntValidator(),
"complete": BooleanValidator(),
"num_lines": IntValidator(),
"type": IdentifierValidator(1),
}
_logEvents = ['new', 'complete', 'appended']
# test results sets
_test_result_set_msgdict = DictValidator(
builderid=IntValidator(),
buildid=IntValidator(),
stepid=IntValidator(),
description=NoneOk(StringValidator()),
category=StringValidator(),
value_unit=StringValidator(),
tests_passed=NoneOk(IntValidator()),
tests_failed=NoneOk(IntValidator()),
complete=BooleanValidator(),
)
message['test_result_sets'] = Selector()
message['test_result_sets'].add(
None, MessageValidator(events=[b'new', b'completed'], messageValidator=_test_result_set_msgdict)
)
# test results
_test_results_msgdict = DictValidator(
builderid=IntValidator(),
test_result_setid=IntValidator(),
test_name=NoneOk(StringValidator()),
test_code_path=NoneOk(StringValidator()),
line=NoneOk(IntValidator()),
duration_ns=NoneOk(IntValidator()),
value=StringValidator(),
)
message['test_results'] = Selector()
message['test_results'].add(
None, MessageValidator(events=[b'new'], messageValidator=_test_results_msgdict)
)
# external functions
def _verify(testcase, validator, name, object):
msgs = list(validator.validate(name, object))
if msgs:
msg = "; ".join(msgs)
if testcase:
testcase.fail(msg)
else:
raise AssertionError(msg)
def verifyMessage(testcase, routingKey, message_):
# the validator is a Selector wrapping a MessageValidator, so we need to
# pass (arg, (routingKey, message)), where the routing key is the arg
# the "type" of the message is identified by last path name
# -1 being the event, and -2 the id.
validator = message[bytes2unicode(routingKey[-3])]
_verify(testcase, validator, '', (routingKey, (routingKey, message_)))
def verifyDbDict(testcase, type, value):
_verify(testcase, dbdict[type], type, value)
def verifyData(testcase, entityType, options, value):
_verify(testcase, entityType, entityType.name, value)
def verifyType(testcase, name, value, validator):
_verify(testcase, validator, name, value)
| 16,689 | Python | .py | 467 | 29.24197 | 100 | 0.656972 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,715 | changesource.py | buildbot_buildbot/master/buildbot/test/util/changesource.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from twisted.internet import defer
from buildbot.test.fake import fakemaster
class ChangeSourceMixin:
"""
This class is used for testing change sources, and handles a few things:
- starting and stopping a ChangeSource service
- a fake master with a data API implementation
"""
changesource = None
started = False
DUMMY_CHANGESOURCE_ID = 20
OTHER_MASTER_ID = 93
DEFAULT_NAME = "ChangeSource"
@defer.inlineCallbacks
def setUpChangeSource(self, want_real_reactor: bool = False):
"Set up the mixin - returns a deferred."
self.master = yield fakemaster.make_master(
self, wantDb=True, wantData=True, wantRealReactor=want_real_reactor
)
assert not hasattr(self.master, 'addChange') # just checking..
@defer.inlineCallbacks
def tearDownChangeSource(self):
"Tear down the mixin - returns a deferred."
if not self.started:
return
if self.changesource.running:
yield self.changesource.stopService()
yield self.changesource.disownServiceParent()
return
@defer.inlineCallbacks
def attachChangeSource(self, cs):
self.changesource = cs
yield self.changesource.setServiceParent(self.master)
yield self.changesource.configureService()
return cs
def startChangeSource(self):
"start the change source as a service"
self.started = True
return self.changesource.startService()
@defer.inlineCallbacks
def stopChangeSource(self):
"stop the change source again; returns a deferred"
yield self.changesource.stopService()
self.started = False
def setChangeSourceToMaster(self, otherMaster):
# some tests build the CS late, so for those tests we will require that
# they use the default name in order to run tests that require master
# assignments
if self.changesource is not None:
name = self.changesource.name
else:
name = self.DEFAULT_NAME
self.master.data.updates.changesourceIds[name] = self.DUMMY_CHANGESOURCE_ID
if otherMaster:
self.master.data.updates.changesourceMasters[self.DUMMY_CHANGESOURCE_ID] = otherMaster
else:
del self.master.data.updates.changesourceMasters[self.DUMMY_CHANGESOURCE_ID]
| 3,083 | Python | .py | 71 | 36.816901 | 98 | 0.714286 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,716 | scheduler.py | buildbot_buildbot/master/buildbot/test/util/scheduler.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import annotations
from twisted.internet import defer
from buildbot.process.properties import Properties
from buildbot.schedulers import base
from buildbot.test import fakedb
from buildbot.test.fake import fakemaster
from buildbot.test.util import interfaces
class SchedulerMixin(interfaces.InterfaceTests):
"""
This class fakes out enough of a master and the various relevant database
connectors to test schedulers. All of the database methods have identical
signatures to the real database connectors, but for ease of testing always
return an already-fired Deferred, meaning that there is no need to wait for
events to complete.
This class is tightly coupled with the various L{buildbot.test.fake.fakedb}
module. All instance variables are only available after C{attachScheduler}
has been called.
@ivar sched: scheduler instance
@ivar master: the fake master
@ivar db: the fake db (same as C{self.master.db}, but shorter)
"""
OTHER_MASTER_ID = 93
@defer.inlineCallbacks
def setUpScheduler(self):
self.master = yield fakemaster.make_master(self, wantDb=True, wantMq=True, wantData=True)
def tearDownScheduler(self):
pass
@defer.inlineCallbacks
def attachScheduler(
self, scheduler, objectid, schedulerid, overrideBuildsetMethods=False, createBuilderDB=False
):
"""Set up a scheduler with a fake master and db; sets self.sched, and
sets the master's basedir to the absolute path of 'basedir' in the test
directory.
If C{overrideBuildsetMethods} is true, then all of the
addBuildsetForXxx methods are overridden to simply append the method
name and arguments to self.addBuildsetCalls. These overridden methods
return buildsets starting with 500 and buildrequest IDs starting with
100.
For C{addBuildsetForSourceStamp}, this also overrides DB API methods
C{addSourceStamp} and C{addSourceStampSet}, and uses that information
to generate C{addBuildsetForSourceStamp} results.
@returns: scheduler
"""
scheduler.objectid = objectid
# set up a fake master
db = self.db = self.master.db
self.mq = self.master.mq
scheduler.setServiceParent(self.master)
rows = [
fakedb.Scheduler(id=schedulerid, name=scheduler.name),
]
if createBuilderDB is True:
rows.extend([
fakedb.Builder(id=300 + i, name=bname)
for i, bname in enumerate(scheduler.builderNames)
])
yield db.insert_test_data(rows)
if overrideBuildsetMethods:
self.assertArgSpecMatches(
scheduler.addBuildsetForSourceStampsWithDefaults,
self.fake_addBuildsetForSourceStampsWithDefaults,
)
scheduler.addBuildsetForSourceStampsWithDefaults = (
self.fake_addBuildsetForSourceStampsWithDefaults
)
self.assertArgSpecMatches(
scheduler.addBuildsetForChanges, self.fake_addBuildsetForChanges
)
scheduler.addBuildsetForChanges = self.fake_addBuildsetForChanges
self.assertArgSpecMatches(
scheduler.addBuildsetForSourceStamps, self.fake_addBuildsetForSourceStamps
)
scheduler.addBuildsetForSourceStamps = self.fake_addBuildsetForSourceStamps
self.addBuildsetCalls = []
self._bsidGenerator = iter(range(500, 999))
self._bridGenerator = iter(range(100, 999))
# temporarily override the sourcestamp and sourcestampset methods
self.addedSourceStamps = []
self.addedSourceStampSets = []
def fake_addSourceStamp(**kwargs):
self.assertEqual(
kwargs['sourcestampsetid'], 400 + len(self.addedSourceStampSets) - 1
)
self.addedSourceStamps.append(kwargs)
return defer.succeed(300 + len(self.addedSourceStamps) - 1)
self.db.sourcestamps.addSourceStamp = fake_addSourceStamp
def fake_addSourceStampSet():
self.addedSourceStampSets.append([])
return defer.succeed(400 + len(self.addedSourceStampSets) - 1)
self.db.sourcestamps.addSourceStampSet = fake_addSourceStampSet
# patch methods to detect a failure to upcall the activate and
# deactivate methods .. unless we're testing BaseScheduler
def patch(meth):
oldMethod = getattr(scheduler, meth)
@defer.inlineCallbacks
def newMethod():
self._parentMethodCalled = False
rv = yield oldMethod()
self.assertTrue(self._parentMethodCalled, f"'{meth}' did not call its parent")
return rv
setattr(scheduler, meth, newMethod)
oldParent = getattr(base.BaseScheduler, meth)
def newParent(self_):
self._parentMethodCalled = True
return oldParent(self_)
self.patch(base.BaseScheduler, meth, newParent)
if scheduler.__class__.activate != base.BaseScheduler.activate:
patch('activate')
if scheduler.__class__.deactivate != base.BaseScheduler.deactivate:
patch('deactivate')
self.sched = scheduler
return scheduler
@defer.inlineCallbacks
def setSchedulerToMaster(self, otherMaster):
sched_id = yield self.master.data.updates.findSchedulerId(self.sched.name)
if otherMaster:
self.master.data.updates.schedulerMasters[sched_id] = otherMaster
else:
del self.master.data.updates.schedulerMasters[sched_id]
class FakeChange:
who = ''
files: list[str] = []
comments = ''
isdir = 0
links = None
revision = None
when = None
branch = None
category = None
number = None
revlink = ''
properties: dict[str, str] = {}
repository = ''
project = ''
codebase = ''
def makeFakeChange(self, **kwargs):
"""Utility method to make a fake Change object with the given
attributes"""
ch = self.FakeChange()
ch.__dict__.update(kwargs)
properties = ch.properties
ch.properties = Properties()
ch.properties.update(properties, "Change")
return ch
@defer.inlineCallbacks
def addFakeChange(self, change):
old_change_number = change.number
change.number = yield self.master.db.changes.addChange(
author=change.who,
files=change.files,
comments=change.comments,
revision=change.revision,
when_timestamp=change.when,
branch=change.branch,
category=change.category,
revlink=change.revlink,
properties=change.properties.asDict(),
repository=change.repository,
codebase=change.codebase,
project=change.project,
_test_changeid=change.number,
)
if old_change_number is not None:
self.assertEqual(change.number, old_change_number)
return change
@defer.inlineCallbacks
def _addBuildsetReturnValue(self, builderNames):
if builderNames is None:
builderNames = self.sched.builderNames
builderids = []
builders = yield self.db.builders.getBuilders()
for builderName in builderNames:
for bldrDict in builders:
if builderName == bldrDict.name:
builderids.append(bldrDict.id)
break
assert len(builderids) == len(builderNames)
bsid = next(self._bsidGenerator)
brids = dict(zip(builderids, self._bridGenerator))
return (bsid, brids)
@defer.inlineCallbacks
def assert_classifications(self, schedulerid, expected_classifications):
classifications = yield self.master.db.schedulers.getChangeClassifications(schedulerid)
self.assertEqual(classifications, expected_classifications)
def fake_addBuildsetForSourceStampsWithDefaults(
self,
reason,
sourcestamps=None,
waited_for=False,
properties=None,
builderNames=None,
priority=None,
**kw,
):
properties = properties.asDict() if properties is not None else None
self.assertIsInstance(sourcestamps, list)
def sourceStampKey(sourceStamp):
return sourceStamp.get("codebase")
sourcestamps = sorted(sourcestamps, key=sourceStampKey)
self.addBuildsetCalls.append((
'addBuildsetForSourceStampsWithDefaults',
{
"reason": reason,
"sourcestamps": sourcestamps,
"waited_for": waited_for,
"properties": properties,
"builderNames": builderNames,
"priority": priority,
},
))
return self._addBuildsetReturnValue(builderNames)
def fake_addBuildsetForChanges(
self,
waited_for=False,
reason='',
external_idstring=None,
changeids=None,
builderNames=None,
properties=None,
priority=None,
**kw,
):
if changeids is None:
changeids = []
properties = properties.asDict() if properties is not None else None
self.addBuildsetCalls.append((
'addBuildsetForChanges',
{
"waited_for": waited_for,
"reason": reason,
"external_idstring": external_idstring,
"changeids": changeids,
"properties": properties,
"builderNames": builderNames,
"priority": priority,
},
))
return self._addBuildsetReturnValue(builderNames)
def fake_addBuildsetForSourceStamps(
self,
waited_for=False,
sourcestamps=None,
reason='',
external_idstring=None,
properties=None,
builderNames=None,
priority=None,
**kw,
):
if sourcestamps is None:
sourcestamps = []
properties = properties.asDict() if properties is not None else None
self.assertIsInstance(sourcestamps, list)
sourcestamps.sort()
self.addBuildsetCalls.append((
'addBuildsetForSourceStamps',
{
"reason": reason,
"external_idstring": external_idstring,
"properties": properties,
"builderNames": builderNames,
"sourcestamps": sourcestamps,
},
))
return self._addBuildsetReturnValue(builderNames)
| 11,626 | Python | .py | 280 | 31.396429 | 100 | 0.642181 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,717 | site.py | buildbot_buildbot/master/buildbot/test/util/site.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from twisted.python.failure import Failure
from twisted.web.server import Site
class SiteWithClose(Site):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._protocols = []
def buildProtocol(self, addr):
p = super().buildProtocol(addr)
self._protocols.append(p)
return p
def close_connections(self):
for p in self._protocols:
p.connectionLost(Failure(RuntimeError("Closing down at the end of test")))
# There is currently no other way to force all pending server-side connections to
# close.
p._channel.transport.connectionLost(
Failure(RuntimeError("Closing down at the end of test"))
)
self._protocols = []
| 1,488 | Python | .py | 33 | 39.575758 | 93 | 0.704138 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,718 | patch_delay.py | buildbot_buildbot/master/buildbot/test/util/patch_delay.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
#
# Portions of this file include source code of Python 3.7 from
# cpython/Lib/unittest/mock.py file.
#
# It is licensed under PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2.
# Copyright (c) 2001-2019 Python Software Foundation. All rights reserved.
import contextlib
import functools
from unittest import mock
from twisted.internet import defer
def _dot_lookup(thing, comp, import_path):
try:
return getattr(thing, comp)
except AttributeError:
__import__(import_path)
return getattr(thing, comp)
def _importer(target):
components = target.split('.')
import_path = components.pop(0)
thing = __import__(import_path)
for comp in components:
import_path += f".{comp}"
thing = _dot_lookup(thing, comp, import_path)
return thing
def _get_target(target):
try:
target, attribute = target.rsplit('.', 1)
except (TypeError, ValueError) as e:
raise TypeError(f"Need a valid target to patch. You supplied: {target!r}") from e
return _importer(target), attribute
class DelayWrapper:
def __init__(self):
self._deferreds = []
def add_new(self):
d = defer.Deferred()
self._deferreds.append(d)
return d
def __len__(self):
return len(self._deferreds)
def fire(self):
deferreds = self._deferreds
self._deferreds = []
for d in deferreds:
d.callback(None)
@contextlib.contextmanager
def patchForDelay(target_name):
class Default:
pass
default = Default()
target, attribute = _get_target(target_name)
original = getattr(target, attribute, default)
if original is default:
raise RuntimeError(f'Could not find name {target_name}')
if not callable(original):
raise RuntimeError(f'{target_name} is not callable')
delay = DelayWrapper()
@functools.wraps(original)
@defer.inlineCallbacks
def wrapper(*args, **kwargs):
yield delay.add_new()
return (yield original(*args, **kwargs))
with mock.patch(target_name, new=wrapper):
try:
yield delay
finally:
delay.fire()
| 2,873 | Python | .py | 80 | 30.7375 | 89 | 0.694695 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,719 | git_repository.py | buildbot_buildbot/master/buildbot/test/util/git_repository.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import annotations
import datetime
import os
import shutil
import subprocess
from pathlib import Path
class TestGitRepository:
def __init__(self, repository_path: os.PathLike, git_bin: os.PathLike | None | str = None):
if git_bin is None:
git_bin = shutil.which('git')
if git_bin is None:
raise FileNotFoundError('Failed to find git')
self.git_bin = git_bin
self.repository_path = Path(repository_path)
self.repository_path.mkdir(parents=True, exist_ok=True)
self.curr_date = datetime.datetime(2024, 6, 8, 14, 0, 0, tzinfo=datetime.timezone.utc)
self.curr_author_name = 'test user'
self.curr_author_email = '[email protected]'
self.exec_git(['init', '--quiet', '--initial-branch=main'])
def advance_time(self, timedelta):
self.curr_date += timedelta
def create_file_text(self, relative_path: str, contents: str):
path = self.repository_path / relative_path
path.write_text(contents)
os.utime(path, (self.curr_date.timestamp(), self.curr_date.timestamp()))
def amend_file_text(self, relative_path: str, contents: str):
path = self.repository_path / relative_path
with path.open('a') as fp:
fp.write(contents)
os.utime(path, (self.curr_date.timestamp(), self.curr_date.timestamp()))
def exec_git(self, args: list[str], env: dict[str, str] | None = None):
final_env = self.git_author_env(
author_name=self.curr_author_name, author_mail=self.curr_author_email
)
final_env.update(self.git_date_env(self.curr_date))
if env is not None:
final_env.update(env)
subprocess.check_call(
[str(self.git_bin), *args],
cwd=self.repository_path,
env=final_env,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
def commit(
self,
message: str,
files: list[os.PathLike] | None = None,
env: dict[str, str] | None = None,
) -> str:
args = ['commit', '--quiet', f'--message={message}']
if files is not None:
args.extend(str(f) for f in files)
self.exec_git(args, env=env)
return subprocess.check_output(
[str(self.git_bin), 'rev-parse', 'HEAD'],
cwd=self.repository_path,
text=True,
).strip()
@staticmethod
def git_author_env(author_name: str, author_mail: str):
return {
"GIT_AUTHOR_NAME": author_name,
"GIT_AUTHOR_EMAIL": author_mail,
"GIT_COMMITTER_NAME": author_name,
"GIT_COMMITTER_EMAIL": author_mail,
}
@staticmethod
def git_date_env(date: datetime.datetime):
def _format_date(_d: datetime.datetime) -> str:
# just in case, make sure we use UTC
return _d.astimezone(tz=datetime.timezone.utc).strftime("%Y-%m-%d %H:%M:%S +0000")
return {
"GIT_AUTHOR_DATE": _format_date(date),
"GIT_COMMITTER_DATE": _format_date(date),
}
| 3,854 | Python | .py | 90 | 34.9 | 95 | 0.636048 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,720 | connector_component.py | buildbot_buildbot/master/buildbot/test/util/connector_component.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import annotations
import types
from typing import TYPE_CHECKING
from twisted.internet import defer
from buildbot.db import model
from buildbot.test.fake import fakemaster
from buildbot.test.reactor import TestReactorMixin
from buildbot.test.util import db
from buildbot.util.sautils import get_upsert_method
if TYPE_CHECKING:
from buildbot.db import logs
from buildbot.db import pool
from buildbot.db import sourcestamps
class FakeDBConnector:
logs: logs.LogsConnectorComponent
pool: pool.DBThreadPool
sourcestamps: sourcestamps.SourceStampsConnectorComponent
class ConnectorComponentMixin(TestReactorMixin, db.RealDatabaseMixin):
"""
Implements a mock DBConnector object, replete with a thread pool and a DB
model. This includes a RealDatabaseMixin, so subclasses should not
instantiate that class directly. The connector appears at C{self.db}, and
the component should be attached to it as an attribute.
@ivar db: fake database connector
@ivar db.pool: DB thread pool
@ivar db.model: DB model
"""
@defer.inlineCallbacks
def setUpConnectorComponent(self, table_names=None, basedir='basedir', dialect_name='sqlite'):
"""Set up C{self.db}, using the given db_url and basedir."""
self.setup_test_reactor(auto_tear_down=False)
if table_names is None:
table_names = []
yield self.setUpRealDatabase(table_names=table_names, basedir=basedir)
self.db = FakeDBConnector()
self.db.pool = self.db_pool
self.db.upsert = get_upsert_method(self.db_engine)
self.db.has_native_upsert = self.db.upsert != get_upsert_method(None)
self.db.master = yield fakemaster.make_master(self)
self.db.model = model.Model(self.db)
self.db._engine = types.SimpleNamespace(dialect=types.SimpleNamespace(name=dialect_name))
@defer.inlineCallbacks
def tearDownConnectorComponent(self):
yield self.tearDownRealDatabase()
# break some reference loops, just for fun
del self.db.pool
del self.db.model
del self.db
yield self.tear_down_test_reactor()
class FakeConnectorComponentMixin(TestReactorMixin):
# Just like ConnectorComponentMixin, but for working with fake database
@defer.inlineCallbacks
def setUpConnectorComponent(self):
self.setup_test_reactor(auto_tear_down=False)
self.master = yield fakemaster.make_master(self, wantDb=True)
self.db = self.master.db
self.db.checkForeignKeys = True
self.insert_test_data = self.db.insert_test_data
@defer.inlineCallbacks
def tearDownConnectorComponent(self):
yield self.tear_down_test_reactor()
| 3,441 | Python | .py | 75 | 40.746667 | 98 | 0.748805 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,721 | querylog.py | buildbot_buildbot/master/buildbot/test/util/querylog.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import contextlib
import logging
from twisted.python import log
# These routines provides a way to dump SQLAlchemy SQL commands and their
# results into Twisted's log.
# Logging wrappers are not re-entrant.
class _QueryToTwistedHandler(logging.Handler):
def __init__(self, log_query_result=False, record_mode=False):
super().__init__()
self._log_query_result = log_query_result
self.recordMode = record_mode
self.records = []
def emit(self, record):
if self.recordMode:
self.records.append(record.getMessage())
return
if record.levelno == logging.DEBUG:
if self._log_query_result:
log.msg(f"{record.name}:{record.threadName}:result: {record.getMessage()}")
else:
log.msg(f"{record.name}:{record.threadName}:query: {record.getMessage()}")
def start_log_queries(log_query_result=False, record_mode=False):
handler = _QueryToTwistedHandler(log_query_result=log_query_result, record_mode=record_mode)
# In 'sqlalchemy.engine' logging namespace SQLAlchemy outputs SQL queries
# on INFO level, and SQL queries results on DEBUG level.
logger = logging.getLogger('sqlalchemy.engine')
# TODO: this is not documented field of logger, so it's probably private.
handler.prev_level = logger.level
logger.setLevel(logging.DEBUG)
logger.addHandler(handler)
# Do not propagate SQL echoing into ancestor handlers
handler.prev_propagate = logger.propagate
logger.propagate = False
# Return previous values of settings, so they can be carefully restored
# later.
return handler
def stop_log_queries(handler):
assert isinstance(handler, _QueryToTwistedHandler)
logger = logging.getLogger('sqlalchemy.engine')
logger.removeHandler(handler)
# Restore logger settings or set them to reasonable defaults.
logger.propagate = handler.prev_propagate
logger.setLevel(handler.prev_level)
@contextlib.contextmanager
def log_queries():
handler = start_log_queries()
try:
yield
finally:
stop_log_queries(handler)
class SqliteMaxVariableMixin:
@contextlib.contextmanager
def assertNoMaxVariables(self):
handler = start_log_queries(record_mode=True)
try:
yield
finally:
stop_log_queries(handler)
for line in handler.records:
self.assertFalse(line.count("?") > 999, "too much variables in " + line)
| 3,213 | Python | .py | 74 | 37.783784 | 96 | 0.720796 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,722 | sourcesteps.py | buildbot_buildbot/master/buildbot/test/util/sourcesteps.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from unittest import mock
from buildbot.test.steps import TestBuildStepMixin
class SourceStepMixin(TestBuildStepMixin):
"""
Support for testing source steps. Aside from the capabilities of
L{TestBuildStepMixin}, this adds:
- fake sourcestamps
The following instance variables are available after C{setupSourceStep}, in
addition to those made available by L{TestBuildStepMixin}:
@ivar sourcestamp: fake SourceStamp for the build
"""
def setUpSourceStep(self):
return super().setup_test_build_step()
def tearDownSourceStep(self):
return super().tear_down_test_build_step()
# utilities
def setup_step(self, step, args=None, patch=None, **kwargs):
"""
Set up C{step} for testing. This calls L{TestBuildStepMixin}'s C{setup_step}
and then does setup specific to a Source step.
"""
step = super().setup_step(step, **kwargs)
if args is None:
args = {}
ss = self.sourcestamp = mock.Mock(name="sourcestamp")
ss.ssid = 9123
ss.branch = args.get('branch', None)
ss.revision = args.get('revision', None)
ss.project = ''
ss.repository = ''
ss.patch = patch
ss.patch_info = None
ss.changes = []
self.build.getSourceStamp = lambda x=None: ss
return step
| 2,073 | Python | .py | 49 | 36.673469 | 85 | 0.700498 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,723 | protocols.py | buildbot_buildbot/master/buildbot/test/util/protocols.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from buildbot.test.util import interfaces
class ConnectionInterfaceTest(interfaces.InterfaceTests):
def setUp(self):
# subclasses must set self.conn in this method
raise NotImplementedError
def test_sig_notifyOnDisconnect(self):
@self.assertArgSpecMatches(self.conn.notifyOnDisconnect)
def notifyOnDisconnect(self, cb):
pass
def test_sig_loseConnection(self):
@self.assertArgSpecMatches(self.conn.loseConnection)
def loseConnection(self):
pass
def test_sig_remotePrint(self):
@self.assertArgSpecMatches(self.conn.remotePrint)
def remotePrint(self, message):
pass
def test_sig_remoteGetWorkerInfo(self):
@self.assertArgSpecMatches(self.conn.remoteGetWorkerInfo)
def remoteGetWorkerInfo(self):
pass
def test_sig_remoteSetBuilderList(self):
@self.assertArgSpecMatches(self.conn.remoteSetBuilderList)
def remoteSetBuilderList(self, builders):
pass
def test_sig_remoteStartCommand(self):
@self.assertArgSpecMatches(self.conn.remoteStartCommand)
def remoteStartCommand(self, remoteCommand, builderName, commandId, commandName, args):
pass
def test_sig_remoteShutdown(self):
@self.assertArgSpecMatches(self.conn.remoteShutdown)
def remoteShutdown(self):
pass
def test_sig_remoteStartBuild(self):
@self.assertArgSpecMatches(self.conn.remoteStartBuild)
def remoteStartBuild(self, builderName):
pass
def test_sig_remoteInterruptCommand(self):
@self.assertArgSpecMatches(self.conn.remoteInterruptCommand)
def remoteInterruptCommand(builderName, commandId, why):
pass
| 2,482 | Python | .py | 55 | 38.290909 | 95 | 0.733223 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,724 | www.py | buildbot_buildbot/master/buildbot/test/util/www.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import json
import os
from importlib.metadata import entry_points
from io import BytesIO
from io import StringIO
from unittest import mock
from urllib.parse import parse_qs
from urllib.parse import unquote as urlunquote
from uuid import uuid1
from twisted.internet import defer
from twisted.web import server
from buildbot.test.fake import fakemaster
from buildbot.util import bytes2unicode
from buildbot.util import unicode2bytes
from buildbot.util.importlib_compat import entry_points_get
from buildbot.www import auth
from buildbot.www import authz
class FakeSession:
def __init__(self):
self.user_info = {"anonymous": True}
def updateSession(self, request):
pass
class FakeRequest:
written = b''
finished = False
redirected_to = None
rendered_resource = None
failure = None
method = b'GET'
path = b'/req.path'
responseCode = 200
def __init__(self, path=None):
self.headers = {}
self.input_headers = {}
self.prepath = []
x = path.split(b'?', 1)
if len(x) == 1:
self.path = path
self.args = {}
else:
path, argstring = x
self.path = path
self.args = parse_qs(argstring, 1)
self.uri = self.path
self.postpath = []
for p in path[1:].split(b'/'):
path = urlunquote(bytes2unicode(p))
self.postpath.append(unicode2bytes(path))
self.deferred = defer.Deferred()
def write(self, data):
self.written = self.written + data
def redirect(self, url):
self.redirected_to = url
def render(self, rsrc):
rendered_resource = rsrc
self.deferred.callback(rendered_resource)
def finish(self):
self.finished = True
if self.redirected_to is not None:
self.deferred.callback({"redirected": self.redirected_to})
else:
self.deferred.callback(self.written)
def setResponseCode(self, code, text=None):
# twisted > 16 started to assert this
assert isinstance(code, int)
self.responseCode = code
self.responseText = text
def setHeader(self, hdr, value):
assert isinstance(hdr, bytes)
assert isinstance(value, bytes)
self.headers.setdefault(hdr, []).append(value)
def getHeader(self, key):
assert isinstance(key, bytes)
return self.input_headers.get(key)
def processingFailed(self, f):
self.deferred.errback(f)
def notifyFinish(self):
d = defer.Deferred()
@self.deferred.addBoth
def finished(res):
d.callback(res)
return res
return d
def getSession(self):
return self.session
class RequiresWwwMixin:
# mix this into a TestCase to skip if buildbot-www is not installed
if not [ep for ep in entry_points_get(entry_points(), 'buildbot.www') if ep.name == 'base']:
if 'BUILDBOT_TEST_REQUIRE_WWW' in os.environ:
raise RuntimeError(
'$BUILDBOT_TEST_REQUIRE_WWW is set but buildbot-www is not installed'
)
skip = 'buildbot-www not installed'
class WwwTestMixin(RequiresWwwMixin):
UUID = str(uuid1())
@defer.inlineCallbacks
def make_master(self, wantGraphql=False, url=None, **kwargs):
master = yield fakemaster.make_master(self, wantData=True, wantGraphql=wantGraphql)
self.master = master
master.www = mock.Mock() # to handle the resourceNeedsReconfigs call
master.www.getUserInfos = lambda _: getattr(
self.master.session, "user_info", {"anonymous": True}
)
cfg = {"port": None, "auth": auth.NoAuth(), "authz": authz.Authz()}
cfg.update(kwargs)
master.config.www = cfg
if url is not None:
master.config.buildbotURL = url
self.master.session = FakeSession()
self.master.authz = cfg["authz"]
self.master.authz.setMaster(self.master)
return master
def make_request(self, path=None, method=b'GET'):
self.request = FakeRequest(path)
self.request.session = self.master.session
self.request.method = method
return self.request
def render_resource(
self,
rsrc,
path=b'/',
accept=None,
method=b'GET',
origin=None,
access_control_request_method=None,
extraHeaders=None,
request=None,
content=None,
content_type=None,
):
if not request:
request = self.make_request(path, method=method)
if accept:
request.input_headers[b'accept'] = accept
if origin:
request.input_headers[b'origin'] = origin
if access_control_request_method:
request.input_headers[b'access-control-request-method'] = (
access_control_request_method
)
if extraHeaders is not None:
request.input_headers.update(extraHeaders)
if content_type is not None:
request.input_headers.update({b'content-type': content_type})
request.content = BytesIO(content)
rv = rsrc.render(request)
if rv != server.NOT_DONE_YET:
if rv is not None:
request.write(rv)
request.finish()
return request.deferred
@defer.inlineCallbacks
def render_control_resource(
self,
rsrc,
path=b'/',
params=None,
requestJson=None,
action="notfound",
id=None,
content_type=b'application/json',
):
# pass *either* a request or postpath
if params is None:
params = {}
id = id or self.UUID
request = self.make_request(path)
request.method = b"POST"
request.content = StringIO(
requestJson
or json.dumps({"jsonrpc": "2.0", "method": action, "params": params, "id": id})
)
request.input_headers = {b'content-type': content_type}
rv = rsrc.render(request)
if rv == server.NOT_DONE_YET:
rv = yield request.deferred
res = json.loads(bytes2unicode(rv))
self.assertIn("jsonrpc", res)
self.assertEqual(res["jsonrpc"], "2.0")
if not requestJson:
# requestJson is used for invalid requests, so don't expect ID
self.assertIn("id", res)
self.assertEqual(res["id"], id)
def assertRequest(
self,
content=None,
contentJson=None,
contentType=None,
responseCode=None,
contentDisposition=None,
headers=None,
):
if headers is None:
headers = {}
got = {}
exp = {}
if content is not None:
got['content'] = self.request.written
exp['content'] = content
if contentJson is not None:
got['contentJson'] = json.loads(bytes2unicode(self.request.written))
exp['contentJson'] = contentJson
if contentType is not None:
got['contentType'] = self.request.headers[b'content-type']
exp['contentType'] = [contentType]
if responseCode is not None:
got['responseCode'] = str(self.request.responseCode)
exp['responseCode'] = str(responseCode)
for header, value in headers.items():
got[header] = self.request.headers.get(header)
exp[header] = value
self.assertEqual(got, exp)
| 8,292 | Python | .py | 226 | 28.283186 | 96 | 0.625125 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,725 | fuzz.py | buildbot_buildbot/master/buildbot/test/util/fuzz.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import os
from twisted.internet import defer
from twisted.internet import reactor
from twisted.trial import unittest
class FuzzTestCase(unittest.TestCase):
# run each test case for 10s
FUZZ_TIME = 10
@defer.inlineCallbacks
def test_fuzz(self):
# note that this will loop if do_fuzz doesn't take long enough
endTime = reactor.seconds() + self.FUZZ_TIME
while reactor.seconds() < endTime:
yield self.do_fuzz(endTime)
# delete this test case entirely if fuzzing is not enabled
if 'BUILDBOT_FUZZ' not in os.environ:
del test_fuzz
| 1,306 | Python | .py | 30 | 40.033333 | 79 | 0.755713 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,726 | misc.py | buildbot_buildbot/master/buildbot/test/util/misc.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import os
import sys
from io import StringIO
from twisted.python import log
from twisted.trial.unittest import TestCase
import buildbot
from buildbot.process.buildstep import BuildStep
class PatcherMixin:
"""
Mix this in to get a few special-cased patching methods
"""
def patch_os_uname(self, replacement):
# twisted's 'patch' doesn't handle the case where an attribute
# doesn't exist..
if hasattr(os, 'uname'):
self.patch(os, 'uname', replacement)
else:
def cleanup():
del os.uname
self.addCleanup(cleanup)
os.uname = replacement
class StdoutAssertionsMixin:
"""
Mix this in to be able to assert on stdout during the test
"""
def setUpStdoutAssertions(self):
self.stdout = StringIO()
self.patch(sys, 'stdout', self.stdout)
def assertWasQuiet(self):
self.assertEqual(self.stdout.getvalue(), '')
def assertInStdout(self, exp):
self.assertIn(exp, self.stdout.getvalue())
def getStdout(self):
return self.stdout.getvalue().strip()
class TimeoutableTestCase(TestCase):
# The addCleanup in current Twisted does not time out any functions
# registered via addCleanups. Until we can depend on fixed Twisted, use
# TimeoutableTestCase whenever test failure may cause it to block and not
# report anything.
def deferRunCleanups(self, ignored, result):
self._deferRunCleanupResult = result
d = self._run('deferRunCleanupsTimeoutable', result)
d.addErrback(self._ebGotMaybeTimeout, result)
return d
def _ebGotMaybeTimeout(self, failure, result):
result.addError(self, failure)
def deferRunCleanupsTimeoutable(self):
return super().deferRunCleanups(None, self._deferRunCleanupResult)
def encodeExecutableAndArgs(executable, args, encoding="utf-8"):
"""
Encode executable and arguments from unicode to bytes.
This avoids a deprecation warning when calling reactor.spawnProcess()
"""
if isinstance(executable, str):
executable = executable.encode(encoding)
argsBytes = []
for arg in args:
if isinstance(arg, str):
arg = arg.encode(encoding)
argsBytes.append(arg)
return (executable, argsBytes)
def enable_trace(case, trace_exclusions=None, f=sys.stdout):
"""This function can be called to enable tracing of the execution"""
if trace_exclusions is None:
trace_exclusions = [
"twisted",
"worker_transition.py",
"util/tu",
"util/path",
"log.py",
"/mq/",
"/db/",
"buildbot/data/",
"fake/reactor.py",
]
bbbase = os.path.dirname(buildbot.__file__)
state = {'indent': 0}
def tracefunc(frame, event, arg):
if frame.f_code.co_filename.startswith(bbbase):
if not any(te in frame.f_code.co_filename for te in trace_exclusions):
if event == "call":
state['indent'] += 2
print(
"-" * state['indent'],
frame.f_code.co_filename.replace(bbbase, ""),
frame.f_code.co_name,
frame.f_code.co_varnames,
file=f,
)
if event == "return":
state['indent'] -= 2
return tracefunc
sys.settrace(tracefunc)
case.addCleanup(sys.settrace, lambda _a, _b, _c: None)
class DebugIntegrationLogsMixin:
def setupDebugIntegrationLogs(self):
# to ease debugging we display the error logs in the test log
origAddCompleteLog = BuildStep.addCompleteLog
def addCompleteLog(self, name, _log):
if name.endswith("err.text"):
log.msg("got error log!", name, _log)
return origAddCompleteLog(self, name, _log)
self.patch(BuildStep, "addCompleteLog", addCompleteLog)
if 'BBTRACE' in os.environ:
enable_trace(self)
class BuildDictLookAlike:
"""a class whose instances compares to any build dict that this reporter is supposed to send
out"""
def __init__(self, extra_keys=None, expected_missing_keys=None, **assertions):
self.keys = [
"builder",
"builderid",
"buildid",
"buildrequest",
"buildrequestid",
"buildset",
"complete",
"complete_at",
"locks_duration_s",
"masterid",
"number",
"parentbuild",
"parentbuilder",
"properties",
"results",
"started_at",
"state_string",
"url",
"workerid",
]
if extra_keys:
self.keys.extend(extra_keys)
if expected_missing_keys is not None:
for key in expected_missing_keys:
self.keys.remove(key)
self.keys.sort()
self.assertions = assertions
def __eq__(self, b):
if sorted(b.keys()) != self.keys:
raise AssertionError(
'BuildDictLookAlike is not equal to build: '
f'Extra keys: {set(b.keys()) - set(self.keys)} '
f'Missing keys: {set(self.keys) - set(b.keys())}'
)
for k, v in self.assertions.items():
if b[k] != v:
return False
return True
def __ne__(self, b):
return not self == b
def __repr__(self):
return "{ any build }"
| 6,351 | Python | .py | 166 | 29.120482 | 96 | 0.610966 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,727 | decorators.py | buildbot_buildbot/master/buildbot/test/util/decorators.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
"""
Various decorators for test cases
"""
import os
import sys
from twisted.python import runtime
_FLAKY_ENV_VAR = 'RUN_FLAKY_TESTS'
def todo(message):
"""
decorator to mark a todo test
"""
def wrap(func):
"""
just mark the test
"""
func.todo = message
return func
return wrap
def flaky(bugNumber=None, issueNumber=None, onPlatform=None):
def wrap(fn):
if onPlatform is not None and sys.platform != onPlatform:
return fn
if os.environ.get(_FLAKY_ENV_VAR):
return fn
if bugNumber is not None:
fn.skip = (
f"Flaky test (http://trac.buildbot.net/ticket/{bugNumber}) "
f"- set ${_FLAKY_ENV_VAR} to run anyway"
)
if issueNumber is not None:
fn.skip = (
f"Flaky test (https://github.com/buildbot/buildbot/issues/{issueNumber}) "
f"- set ${_FLAKY_ENV_VAR} to run anyway"
)
return fn
return wrap
def skipUnlessPlatformIs(platform):
def closure(test):
if runtime.platformType != platform:
test.skip = f"not a {platform} platform"
return test
return closure
def skipIfPythonVersionIsLess(min_version_info):
assert isinstance(min_version_info, tuple)
def closure(test):
if sys.version_info < min_version_info:
test.skip = f"requires Python >= {min_version_info}"
return test
return closure
| 2,223 | Python | .py | 63 | 28.904762 | 90 | 0.663709 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,728 | reporter.py | buildbot_buildbot/master/buildbot/test/util/reporter.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from twisted.internet import defer
from buildbot.process.results import SUCCESS
from buildbot.test import fakedb
class ReporterTestMixin:
def setup_reporter_test(self):
self.reporter_test_project = 'testProject'
self.reporter_test_repo = 'https://example.org/repo'
self.reporter_test_revision = 'd34db33fd43db33f'
self.reporter_test_branch = "master"
self.reporter_test_codebase = 'cbgerrit'
self.reporter_test_change_id = 'I5bdc2e500d00607af53f0fa4df661aada17f81fc'
self.reporter_test_builder_name = 'Builder0'
self.reporter_test_props = {
'Stash_branch': 'refs/changes/34/1234/1',
'project': self.reporter_test_project,
'got_revision': self.reporter_test_revision,
'revision': self.reporter_test_revision,
'event.change.id': self.reporter_test_change_id,
'event.change.project': self.reporter_test_project,
'branch': 'refs/pull/34/merge',
}
self.reporter_test_thing_url = 'http://thing.example.com'
@defer.inlineCallbacks
def insert_build(self, results, insert_ss=True, parent_plan=False, insert_patch=False):
yield self.insert_test_data(
[results],
results,
insertSS=insert_ss,
parentPlan=parent_plan,
insert_patch=insert_patch,
)
build = yield self.master.data.get(("builds", 20))
return build
@defer.inlineCallbacks
def insert_buildset_no_builds(
self, results, insert_ss=True, parent_plan=False, insert_patch=False
):
yield self.insert_test_data(
[], results, insertSS=insert_ss, parentPlan=parent_plan, insert_patch=insert_patch
)
buildset = yield self.master.data.get(("buildsets", 98))
return buildset
@defer.inlineCallbacks
def insert_build_finished(self, results=SUCCESS, **kwargs):
return (yield self.insert_build(results=results, **kwargs))
@defer.inlineCallbacks
def insert_build_new(self, **kwargs):
return (yield self.insert_build(results=None, **kwargs))
@defer.inlineCallbacks
def insert_buildrequest_new(self, insert_patch=False, **kwargs):
self.db = self.master.db
yield self.db.insert_test_data([
fakedb.Master(id=92),
fakedb.Worker(id=13, name='wrk'),
fakedb.Builder(id=79, name='Builder0'),
fakedb.Builder(id=80, name='Builder1'),
fakedb.Buildset(id=98, results=None, reason="testReason1", parent_buildid=None),
fakedb.BuildRequest(id=11, buildsetid=98, builderid=79),
])
patchid = 99 if insert_patch else None
yield self.db.insert_test_data([
fakedb.BuildsetSourceStamp(buildsetid=98, sourcestampid=234),
fakedb.SourceStamp(
id=234,
branch=self.reporter_test_branch,
project=self.reporter_test_project,
revision=self.reporter_test_revision,
repository=self.reporter_test_repo,
codebase=self.reporter_test_codebase,
patchid=patchid,
),
fakedb.Patch(
id=99,
patch_base64='aGVsbG8sIHdvcmxk',
patch_author='him@foo',
patch_comment='foo',
subdir='/foo',
patchlevel=3,
),
])
request = yield self.master.data.get(("buildrequests", 11))
return request
@defer.inlineCallbacks
def insert_test_data(
self, buildResults, finalResult, insertSS=True, parentPlan=False, insert_patch=False
):
self.db = self.master.db
yield self.db.insert_test_data([
fakedb.Master(id=92),
fakedb.Worker(id=13, name='wrk'),
fakedb.Builder(id=79, name='Builder0'),
fakedb.Builder(id=80, name='Builder1'),
fakedb.Buildset(
id=98,
results=finalResult,
reason="testReason1",
parent_buildid=19 if parentPlan else None,
),
fakedb.Change(
changeid=13,
branch=self.reporter_test_branch,
revision='9283',
author='me@foo',
repository=self.reporter_test_repo,
codebase=self.reporter_test_codebase,
project='world-domination',
sourcestampid=234,
),
])
if parentPlan:
yield self.db.insert_test_data([
fakedb.Worker(id=12, name='wrk_parent'),
fakedb.Builder(id=78, name='Builder_parent'),
fakedb.Buildset(id=97, results=finalResult, reason="testReason0"),
fakedb.BuildRequest(id=10, buildsetid=98, builderid=78),
fakedb.Build(
id=19,
number=1,
builderid=78,
buildrequestid=10,
workerid=12,
masterid=92,
results=finalResult,
state_string="buildText",
),
])
if insertSS:
patchid = 99 if insert_patch else None
yield self.db.insert_test_data([
fakedb.BuildsetSourceStamp(buildsetid=98, sourcestampid=234),
fakedb.SourceStamp(
id=234,
branch=self.reporter_test_branch,
project=self.reporter_test_project,
revision=self.reporter_test_revision,
repository=self.reporter_test_repo,
codebase=self.reporter_test_codebase,
patchid=patchid,
),
fakedb.Patch(
id=99,
patch_base64='aGVsbG8sIHdvcmxk',
patch_author='him@foo',
patch_comment='foo',
subdir='/foo',
patchlevel=3,
),
])
for i, results in enumerate(buildResults):
started_at = 10000001
complete_at = None if results is None else 10000005
yield self.db.insert_test_data([
fakedb.BuildRequest(id=11 + i, buildsetid=98, builderid=79 + i),
fakedb.Build(
id=20 + i,
number=i,
builderid=79 + i,
buildrequestid=11 + i,
workerid=13,
masterid=92,
results=results,
state_string="buildText",
started_at=started_at,
complete_at=complete_at,
),
fakedb.Step(id=50 + i, buildid=20 + i, number=5, name='make'),
fakedb.Log(
id=60 + i, stepid=50 + i, name='stdio', slug='stdio', type='s', num_lines=7
),
fakedb.LogChunk(
logid=60 + i,
first_line=0,
last_line=1,
compressed=0,
content='Unicode log with non-ascii (\u00e5\u00e4\u00f6).',
),
fakedb.BuildProperty(buildid=20 + i, name="workername", value="wrk"),
fakedb.BuildProperty(buildid=20 + i, name="reason", value="because"),
fakedb.BuildProperty(buildid=20 + i, name="buildername", value="Builder0"),
fakedb.BuildProperty(buildid=20 + i, name="buildnumber", value=f"{i}"),
fakedb.BuildProperty(buildid=20 + i, name="scheduler", value="checkin"),
])
for k, v in self.reporter_test_props.items():
yield self.db.insert_test_data([
fakedb.BuildProperty(buildid=20 + i, name=k, value=v)
])
self.setup_fake_get_changes_for_build()
def get_inserted_buildset(self):
return self.master.data.get(("buildsets", 98))
def setup_fake_get_changes_for_build(self, has_change=True):
@defer.inlineCallbacks
def getChangesForBuild(buildid):
if not has_change:
return []
assert buildid == 20
ch = yield self.master.db.changes.getChange(13)
return [ch]
self.master.db.changes.getChangesForBuild = getChangesForBuild
| 9,220 | Python | .py | 212 | 30.410377 | 95 | 0.565595 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,729 | gitpoller.py | buildbot_buildbot/master/buildbot/changes/gitpoller.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import annotations
import contextlib
import os
import re
from typing import TYPE_CHECKING
from typing import ClassVar
from typing import Sequence
from urllib.parse import quote as urlquote
from twisted.internet import defer
from twisted.python import log
from buildbot import config
from buildbot.changes import base
from buildbot.util import bytes2unicode
from buildbot.util import giturlparse
from buildbot.util import private_tempdir
from buildbot.util import runprocess
from buildbot.util import unicode2bytes
from buildbot.util.git import GitMixin
from buildbot.util.git import GitServiceAuth
from buildbot.util.git import check_ssh_config
from buildbot.util.git_credential import GitCredentialOptions
from buildbot.util.git_credential import add_user_password_to_credentials
from buildbot.util.state import StateMixin
from buildbot.util.twisted import async_to_deferred
if TYPE_CHECKING:
from typing import Callable
from typing import Literal
from buildbot.interfaces import IRenderable
class GitError(Exception):
"""Raised when git exits with code 128."""
class GitPoller(base.ReconfigurablePollingChangeSource, StateMixin, GitMixin):
"""This source will poll a remote git repo for changes and submit
them to the change master."""
compare_attrs: ClassVar[Sequence[str]] = (
"repourl",
"branches",
"workdir",
"pollInterval",
"gitbin",
"usetimestamps",
"category",
"project",
"pollAtLaunch",
"buildPushesWithNoCommits",
"pollRandomDelayMin",
"pollRandomDelayMax",
"_git_auth",
)
def __init__(self, repourl, **kwargs) -> None:
self._git_auth = GitServiceAuth(self)
self.lastRev: dict[str, str] | None = None
name = kwargs.get("name", None)
if name is None:
kwargs["name"] = repourl
super().__init__(repourl, **kwargs)
def checkConfig( # type: ignore[override]
self,
repourl,
branches: list[str] | Literal[True] | Callable[[str], bool] | None = None,
branch: str | None = None,
workdir=None,
pollInterval=10 * 60,
gitbin="git",
usetimestamps=True,
category=None,
project=None,
fetch_refspec=None,
encoding="utf-8",
name=None,
pollAtLaunch=False,
buildPushesWithNoCommits=False,
only_tags=False,
sshPrivateKey=None,
sshHostKey=None,
sshKnownHosts=None,
pollRandomDelayMin=0,
pollRandomDelayMax=0,
auth_credentials: tuple[IRenderable | str, IRenderable | str] | None = None,
git_credentials: GitCredentialOptions | None = None,
):
if only_tags and (branch or branches):
config.error("GitPoller: can't specify only_tags and branch/branches")
if branch and branches:
config.error("GitPoller: can't specify both branch and branches")
if branch and not isinstance(branch, str):
config.error("GitPoller: 'branch' argument must be a str")
if branches is not None and not (
(isinstance(branches, list) and all(isinstance(e, str) for e in branches))
or branches is True
or callable(branches)
):
config.error(
"GitPoller: 'branches' argument must be one of "
"list of str, True, or Callable[[str], bool]"
)
check_ssh_config('GitPoller', sshPrivateKey, sshHostKey, sshKnownHosts)
if fetch_refspec is not None:
config.error(
"GitPoller: fetch_refspec is no longer supported. "
"Instead, only the given branches are downloaded."
)
if name is None:
name = repourl
super().checkConfig(
name=name,
pollInterval=pollInterval,
pollAtLaunch=pollAtLaunch,
pollRandomDelayMin=pollRandomDelayMin,
pollRandomDelayMax=pollRandomDelayMax,
)
@defer.inlineCallbacks
def reconfigService( # type: ignore[override]
self,
repourl,
branches=None,
branch=None,
workdir=None,
pollInterval=10 * 60,
gitbin="git",
usetimestamps=True,
category=None,
project=None,
fetch_refspec=None,
encoding="utf-8",
name=None,
pollAtLaunch=False,
buildPushesWithNoCommits=False,
only_tags=False,
sshPrivateKey=None,
sshHostKey=None,
sshKnownHosts=None,
pollRandomDelayMin=0,
pollRandomDelayMax=0,
auth_credentials: tuple[IRenderable | str, IRenderable | str] | None = None,
git_credentials: GitCredentialOptions | None = None,
):
if name is None:
name = repourl
if project is None:
project = ''
if branch:
branches = [branch]
elif not branches:
if only_tags:
branches = lambda ref: ref.startswith('refs/tags/')
else:
branches = None
self.repourl = repourl
self.branches = branches
self.encoding = encoding
self.buildPushesWithNoCommits = buildPushesWithNoCommits
self.gitbin = gitbin
self.workdir = workdir
self.usetimestamps = usetimestamps
self.category = (
category if callable(category) else bytes2unicode(category, encoding=self.encoding)
)
self.project = bytes2unicode(project, encoding=self.encoding)
self.changeCount = 0
self.lastRev = None
self.setupGit()
if auth_credentials is not None:
git_credentials = add_user_password_to_credentials(
auth_credentials,
repourl,
git_credentials,
)
self._git_auth = GitServiceAuth(
self, sshPrivateKey, sshHostKey, sshKnownHosts, git_credentials
)
if self.workdir is None:
self.workdir = 'gitpoller-work'
# make our workdir absolute, relative to the master's basedir
if not os.path.isabs(self.workdir):
self.workdir = os.path.join(self.master.basedir, self.workdir)
log.msg(f"gitpoller: using workdir '{self.workdir}'")
yield super().reconfigService(
name=name,
pollInterval=pollInterval,
pollAtLaunch=pollAtLaunch,
pollRandomDelayMin=pollRandomDelayMin,
pollRandomDelayMax=pollRandomDelayMax,
)
@defer.inlineCallbacks
def _checkGitFeatures(self):
stdout = yield self._dovccmd('--version', [])
self.parseGitFeatures(stdout)
if not self.gitInstalled:
raise OSError('Git is not installed')
if not self.supportsSshPrivateKeyAsEnvOption:
has_ssh_private_key = (
yield self.renderSecrets(self._git_auth.ssh_private_key)
) is not None
if has_ssh_private_key:
raise OSError('SSH private keys require Git 2.3.0 or newer')
def activate(self):
try:
self.lastRev = None
super().activate()
except Exception as e:
log.err(e, 'while initializing GitPoller repository')
def describe(self):
str = 'GitPoller watching the remote git repository ' + bytes2unicode(
self.repourl, self.encoding
)
if self.branches:
if self.branches is True:
str += ', branches: ALL'
elif not callable(self.branches):
str += ', branches: ' + ', '.join(self.branches)
if not self.master:
str += " [STOPPED - check log]"
return str
@async_to_deferred
async def _resolve_head_ref(self, git_auth_files_path: str | None = None) -> str | None:
if self.supports_lsremote_symref:
rows: str = await self._dovccmd(
'ls-remote',
['--symref', self.repourl, 'HEAD'],
auth_files_path=git_auth_files_path,
)
# simple parse of output which should have format:
# ref: refs/heads/{branch} HEAD
# {hash} HEAD
parts = rows.split(maxsplit=3)
# sanity just in case
if len(parts) >= 3 and parts[0] == 'ref:' and parts[2] == 'HEAD':
return parts[1]
return None
# naive fallback if git version does not support --symref
rows = await self._dovccmd('ls-remote', [self.repourl, 'HEAD', 'refs/heads/*'])
refs = [row.split('\t') for row in rows.splitlines() if '\t' in row]
# retrieve hash that HEAD points to
head_hash = next((hash for hash, ref in refs if ref == 'HEAD'), None)
if head_hash is None:
return None
# get refs that points to the same hash as HEAD
candidates = [ref for hash, ref in refs if ref != 'HEAD' and hash == head_hash]
# Found default branch
if len(candidates) == 1:
return candidates[0]
# If multiple ref points to the same hash as HEAD,
# we have no way to know which one is the default
return None
@async_to_deferred
async def _list_remote_refs(
self, refs: list[str] | None = None, git_auth_files_path: str | None = None
) -> list[str]:
rows: str = await self._dovccmd(
'ls-remote',
['--refs', self.repourl] + (refs if refs is not None else []),
auth_files_path=git_auth_files_path,
)
branches: list[str] = []
for row in rows.splitlines():
if '\t' not in row:
# Not a useful line
continue
_, ref = row.split("\t")
branches.append(ref)
return branches
@staticmethod
def _trim_prefix(value: str, prefix: str) -> str:
"""Remove prefix from value."""
if value.startswith(prefix):
return value[len(prefix) :]
return value
def _removeHeads(self, branch):
"""Remove 'refs/heads/' prefix from remote references."""
if branch.startswith("refs/heads/"):
branch = branch[11:]
return branch
@staticmethod
def _tracker_ref(repourl: str, ref: str) -> str:
def _sanitize(value: str) -> str:
return urlquote(value, '').replace('~', '%7E')
tracker_prefix = "refs/buildbot"
# if ref is not a Git ref, store under a different path to avoid collision
if not ref.startswith('refs/'):
tracker_prefix += "/raw"
git_url = giturlparse(repourl)
if git_url is None:
# fallback to using the whole repourl
url_identifier = _sanitize(repourl)
else:
url_identifier = f"{git_url.proto}/{_sanitize(git_url.domain)}"
if git_url.port is not None:
# replace `:` with url encode `%3A`
url_identifier += f"%3A{git_url.port}"
if git_url.owner is not None:
url_identifier += f"/{_sanitize(git_url.owner)}"
url_identifier += f"/{_sanitize(git_url.repo)}"
return f"{tracker_prefix}/{url_identifier}/{GitPoller._trim_prefix(ref, 'refs/')}"
def poll_should_exit(self):
# A single gitpoller loop may take a while on a loaded master, which would block
# reconfiguration, so we try to exit early.
return not self.doPoll.running
@defer.inlineCallbacks
def poll(self):
yield self._checkGitFeatures()
try:
yield self._dovccmd('init', ['--bare', self.workdir])
except GitError as e:
log.msg(e.args[0])
return
tmp_dir = (
private_tempdir.PrivateTemporaryDirectory(dir=self.workdir, prefix='.buildbot-ssh')
if self._git_auth.is_auth_needed
else contextlib.nullcontext()
)
# retrieve auth files
with tmp_dir as tmp_path:
yield self._git_auth.download_auth_files_if_needed(tmp_path)
refs, trim_ref_head = yield self._get_refs(tmp_path)
# Nothing to fetch and process.
if not refs:
return
if self.poll_should_exit():
return
refspecs = [f'+{ref}:{self._tracker_ref(self.repourl, ref)}' for ref in refs]
try:
yield self._dovccmd(
'fetch',
["--progress", self.repourl, *refspecs, "--"],
path=self.workdir,
auth_files_path=tmp_path,
)
except GitError as e:
log.msg(e.args[0])
return
if self.lastRev is None:
self.lastRev = yield self.getState('lastRev', {})
revs = {}
log.msg(f'gitpoller: processing changes from "{self.repourl}"')
for ref in refs:
branch = ref if not trim_ref_head else self._trim_prefix(ref, 'refs/heads/')
try:
if self.poll_should_exit(): # pragma: no cover
# Note that we still want to update the last known revisions for the branches
# we did process
break
rev = yield self._dovccmd(
'rev-parse', [self._tracker_ref(self.repourl, ref)], path=self.workdir
)
revs[branch] = rev
yield self._process_changes(rev, branch)
except Exception:
log.err(_why=f"trying to poll branch {branch} of {self.repourl}")
self.lastRev = revs
yield self.setState('lastRev', self.lastRev)
@async_to_deferred
async def _get_refs(self, git_auth_files_path: str) -> tuple[list[str], bool]:
if callable(self.branches):
# Get all refs and let callback filter them
remote_refs = await self._list_remote_refs(git_auth_files_path=git_auth_files_path)
refs = [b for b in remote_refs if self.branches(b)]
return (refs, False)
if self.branches is True:
# Get all branch refs
refs = await self._list_remote_refs(
refs=["refs/heads/*"],
git_auth_files_path=git_auth_files_path,
)
return (refs, False)
if self.branches:
refs = await self._list_remote_refs(
refs=[f"refs/heads/{b}" for b in self.branches],
git_auth_files_path=git_auth_files_path,
)
return (refs, True)
head_ref = await self._resolve_head_ref(git_auth_files_path=git_auth_files_path)
if head_ref is not None:
return ([head_ref], False)
# unlikely, but if we can't find HEAD here, something weird happen,
# but not a critical error. Just use HEAD as the ref to use
return (['HEAD'], False)
def _get_commit_comments(self, rev):
args = ['--no-walk', r'--format=%s%n%b', rev, '--']
d = self._dovccmd('log', args, path=self.workdir)
return d
def _get_commit_timestamp(self, rev):
# unix timestamp
args = ['--no-walk', r'--format=%ct', rev, '--']
d = self._dovccmd('log', args, path=self.workdir)
@d.addCallback
def process(git_output):
if self.usetimestamps:
try:
stamp = int(git_output)
except Exception as e:
log.msg(
f'gitpoller: caught exception converting output \'{git_output}\' to '
'timestamp'
)
raise e
return stamp
return None
return d
def _get_commit_files(self, rev):
args = ['--name-only', '--no-walk', r'--format=%n', '-m', '--first-parent', rev, '--']
d = self._dovccmd('log', args, path=self.workdir)
def decode_file(file):
# git use octal char sequences in quotes when non ASCII
match = re.match('^"(.*)"$', file)
if match:
file = bytes2unicode(
match.groups()[0], encoding=self.encoding, errors='unicode_escape'
)
return bytes2unicode(file, encoding=self.encoding)
@d.addCallback
def process(git_output):
fileList = [
decode_file(file) for file in [s for s in git_output.splitlines() if len(s)]
]
return fileList
return d
def _get_commit_author(self, rev):
args = ['--no-walk', r'--format=%aN <%aE>', rev, '--']
d = self._dovccmd('log', args, path=self.workdir)
@d.addCallback
def process(git_output):
if not git_output:
raise OSError('could not get commit author for rev')
return git_output
return d
@defer.inlineCallbacks
def _get_commit_committer(self, rev):
args = ['--no-walk', r'--format=%cN <%cE>', rev, '--']
res = yield self._dovccmd('log', args, path=self.workdir)
if not res:
raise OSError('could not get commit committer for rev')
return res
@defer.inlineCallbacks
def _process_changes(self, newRev, branch):
"""
Read changes since last change.
- Read list of commit hashes.
- Extract details from each commit.
- Add changes to database.
"""
# initial run, don't parse all history
if not self.lastRev:
return
# get the change list
revListArgs = (
['--ignore-missing', '--first-parent']
+ ['--format=%H', f'{newRev}']
+ ['^' + rev for rev in sorted(self.lastRev.values())]
+ ['--']
)
self.changeCount = 0
results = yield self._dovccmd('log', revListArgs, path=self.workdir)
# process oldest change first
revList = results.split()
revList.reverse()
if self.buildPushesWithNoCommits and not revList:
existingRev = self.lastRev.get(branch)
if existingRev != newRev:
revList = [newRev]
if existingRev is None:
# This branch was completely unknown, rebuild
log.msg(f'gitpoller: rebuilding {newRev} for new branch "{branch}"')
else:
# This branch is known, but it now points to a different
# commit than last time we saw it, rebuild.
log.msg(f'gitpoller: rebuilding {newRev} for updated branch "{branch}"')
self.changeCount = len(revList)
self.lastRev[branch] = newRev
if self.changeCount:
log.msg(
f'gitpoller: processing {self.changeCount} changes: {revList} from '
f'"{self.repourl}" branch "{branch}"'
)
for rev in revList:
dl = defer.DeferredList(
[
self._get_commit_timestamp(rev),
self._get_commit_author(rev),
self._get_commit_committer(rev),
self._get_commit_files(rev),
self._get_commit_comments(rev),
],
consumeErrors=True,
)
results = yield dl
# check for failures
failures = [r[1] for r in results if not r[0]]
if failures:
for failure in failures:
log.err(failure, f"while processing changes for {rev} {branch}")
# just fail on the first error; they're probably all related!
failures[0].raiseException()
timestamp, author, committer, files, comments = [r[1] for r in results]
yield self.master.data.updates.addChange(
author=author,
committer=committer,
revision=bytes2unicode(rev, encoding=self.encoding),
files=files,
comments=comments,
when_timestamp=timestamp,
branch=bytes2unicode(self._removeHeads(branch)),
project=self.project,
repository=bytes2unicode(self.repourl, encoding=self.encoding),
category=self.category,
src='git',
)
@async_to_deferred
async def _dovccmd(
self,
command: str,
args: list[str],
path: str | None = None,
auth_files_path: str | None = None,
initial_stdin: str | None = None,
) -> str:
full_args: list[str] = []
full_env = os.environ.copy()
if self._git_auth.is_auth_needed_for_git_command(command):
if auth_files_path is None:
raise RuntimeError(
f"Git command {command} requires auth, but no auth information was provided"
)
self._git_auth.adjust_git_command_params_for_auth(
full_args,
full_env,
auth_files_path,
self,
)
full_args += [command, *args]
res = await runprocess.run_process(
self.master.reactor,
[self.gitbin, *full_args],
path,
env=full_env,
initial_stdin=unicode2bytes(initial_stdin) if initial_stdin is not None else None,
)
(code, stdout, stderr) = res
stdout = bytes2unicode(stdout, self.encoding)
stderr = bytes2unicode(stderr, self.encoding)
if code != 0:
if code == 128:
raise GitError(
f'command {full_args} in {path} on repourl {self.repourl} failed '
f'with exit code {code}: {stderr}'
)
raise OSError(
f'command {full_args} in {path} on repourl {self.repourl} '
f'failed with exit code {code}: {stderr}'
)
return stdout.strip()
| 22,942 | Python | .py | 564 | 29.656028 | 97 | 0.575949 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,730 | svnpoller.py | buildbot_buildbot/master/buildbot/changes/svnpoller.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
# Based on the work of Dave Peticolas for the P4poll
# Changed to svn (using xml.dom.minidom) by Niklaus Giger
# Hacked beyond recognition by Brian Warner
import os
import xml.dom.minidom
from typing import ClassVar
from typing import Sequence
from urllib.parse import quote_plus as urlquote_plus
from twisted.internet import defer
from twisted.python import log
from buildbot import util
from buildbot.changes import base
from buildbot.util import bytes2unicode
from buildbot.util import runprocess
# these split_file_* functions are available for use as values to the
# split_file= argument.
def split_file_alwaystrunk(path):
return {"path": path}
def split_file_branches(path):
# turn "trunk/subdir/file.c" into (None, "subdir/file.c")
# and "trunk/subdir/" into (None, "subdir/")
# and "trunk/" into (None, "")
# and "branches/1.5.x/subdir/file.c" into ("branches/1.5.x", "subdir/file.c")
# and "branches/1.5.x/subdir/" into ("branches/1.5.x", "subdir/")
# and "branches/1.5.x/" into ("branches/1.5.x", "")
pieces = path.split('/')
if len(pieces) > 1 and pieces[0] == 'trunk':
return (None, '/'.join(pieces[1:]))
elif len(pieces) > 2 and pieces[0] == 'branches':
return ('/'.join(pieces[0:2]), '/'.join(pieces[2:]))
return None
def split_file_projects_branches(path):
# turn projectname/trunk/subdir/file.c into dict(project=projectname,
# branch=trunk, path=subdir/file.c)
if "/" not in path:
return None
project, path = path.split("/", 1)
f = split_file_branches(path)
if f:
info = {"project": project, "path": f[1]}
if f[0]:
info['branch'] = f[0]
return info
return f
class SVNPoller(base.ReconfigurablePollingChangeSource, util.ComparableMixin):
"""
Poll a Subversion repository for changes and submit them to the change
master.
"""
compare_attrs: ClassVar[Sequence[str]] = (
"repourl",
"split_file",
"svnuser",
"svnpasswd",
"project",
"pollInterval",
"histmax",
"svnbin",
"category",
"cachepath",
"pollAtLaunch",
"pollRandomDelayMin",
"pollRandomDelayMax",
)
secrets = ("svnuser", "svnpasswd")
parent = None # filled in when we're added
last_change = None
loop = None
def __init__(self, repourl, **kwargs):
name = kwargs.get('name', None)
if name is None:
kwargs['name'] = repourl
super().__init__(repourl, **kwargs)
def checkConfig(
self,
repourl,
split_file=None,
svnuser=None,
svnpasswd=None,
pollInterval=10 * 60,
histmax=100,
svnbin="svn",
revlinktmpl="",
category=None,
project="",
cachepath=None,
extra_args=None,
name=None,
pollAtLaunch=False,
pollRandomDelayMin=0,
pollRandomDelayMax=0,
):
if name is None:
name = repourl
super().checkConfig(
name=name,
pollInterval=pollInterval,
pollAtLaunch=pollAtLaunch,
pollRandomDelayMin=pollRandomDelayMin,
pollRandomDelayMax=pollRandomDelayMax,
)
@defer.inlineCallbacks
def reconfigService(
self,
repourl,
split_file=None,
svnuser=None,
svnpasswd=None,
pollInterval=10 * 60,
histmax=100,
svnbin="svn",
revlinktmpl="",
category=None,
project="",
cachepath=None,
extra_args=None,
name=None,
pollAtLaunch=False,
pollRandomDelayMin=0,
pollRandomDelayMax=0,
):
if name is None:
name = repourl
if repourl.endswith("/"):
repourl = repourl[:-1] # strip the trailing slash
self.repourl = repourl
self.extra_args = extra_args
self.split_file = split_file or split_file_alwaystrunk
self.svnuser = svnuser
self.svnpasswd = svnpasswd
self.revlinktmpl = revlinktmpl
# include environment variables required for ssh-agent auth
self.environ = os.environ.copy()
self.svnbin = svnbin
self.histmax = histmax
self._prefix = None
self.category = category if callable(category) else util.bytes2unicode(category)
self.project = util.bytes2unicode(project)
self.cachepath = cachepath
if self.cachepath and os.path.exists(self.cachepath):
try:
with open(self.cachepath, encoding='utf-8') as f:
self.last_change = int(f.read().strip())
log.msg(
f"SVNPoller: SVNPoller({self.repourl}) setting last_change "
f"to {self.last_change}"
)
# try writing it, too
with open(self.cachepath, "w", encoding='utf-8') as f:
f.write(str(self.last_change))
except Exception:
self.cachepath = None
log.msg(
(
"SVNPoller: SVNPoller({}) cache file corrupt or unwriteable; "
+ "skipping and not using"
).format(self.repourl)
)
log.err()
yield super().reconfigService(
name=name,
pollInterval=pollInterval,
pollAtLaunch=pollAtLaunch,
pollRandomDelayMin=pollRandomDelayMin,
pollRandomDelayMax=pollRandomDelayMax,
)
def describe(self):
return f"SVNPoller: watching {self.repourl}"
def poll(self):
# Our return value is only used for unit testing.
# we need to figure out the repository root, so we can figure out
# repository-relative pathnames later. Each REPOURL is in the form
# (ROOT)/(PROJECT)/(BRANCH)/(FILEPATH), where (ROOT) is something
# like svn://svn.twistedmatrix.com/svn/Twisted (i.e. there is a
# physical repository at /svn/Twisted on that host), (PROJECT) is
# something like Projects/Twisted (i.e. within the repository's
# internal namespace, everything under Projects/Twisted/ has
# something to do with Twisted, but these directory names do not
# actually appear on the repository host), (BRANCH) is something like
# "trunk" or "branches/2.0.x", and (FILEPATH) is a tree-relative
# filename like "twisted/internet/defer.py".
# our self.repourl attribute contains (ROOT)/(PROJECT) combined
# together in a way that we can't separate without svn's help. If the
# user is not using the split_file= argument, then self.repourl might
# be (ROOT)/(PROJECT)/(BRANCH) . In any case, the filenames we will
# get back from 'svn log' will be of the form
# (PROJECT)/(BRANCH)/(FILEPATH), but we want to be able to remove
# that (PROJECT) prefix from them. To do this without requiring the
# user to tell us how repourl is split into ROOT and PROJECT, we do an
# 'svn info --xml' command at startup. This command will include a
# <root> element that tells us ROOT. We then strip this prefix from
# self.repourl to determine PROJECT, and then later we strip the
# PROJECT prefix from the filenames reported by 'svn log --xml' to
# get a (BRANCH)/(FILEPATH) that can be passed to split_file() to
# turn into separate BRANCH and FILEPATH values.
# whew.
if self.project:
log.msg("SVNPoller: polling " + self.project)
else:
log.msg("SVNPoller: polling")
d = defer.succeed(None)
if not self._prefix:
d.addCallback(lambda _: self.get_prefix())
@d.addCallback
def set_prefix(prefix):
self._prefix = prefix
d.addCallback(self.get_logs)
d.addCallback(self.parse_logs)
d.addCallback(self.get_new_logentries)
d.addCallback(self.create_changes)
d.addCallback(self.submit_changes)
d.addCallback(self.finished_ok)
# eat errors
d.addErrback(log.err, 'SVNPoller: Error in while polling')
return d
@defer.inlineCallbacks
def get_prefix(self):
command = [self.svnbin, "info", "--xml", "--non-interactive", self.repourl]
if self.svnuser:
command.append(f"--username={self.svnuser}")
if self.svnpasswd is not None:
command.append(f"--password={self.svnpasswd}")
if self.extra_args:
command.extend(self.extra_args)
rc, output = yield runprocess.run_process(
self.master.reactor,
command,
env=self.environ,
collect_stderr=False,
stderr_is_error=True,
)
if rc != 0:
raise OSError(f'{self}: Got error when retrieving svn prefix')
try:
doc = xml.dom.minidom.parseString(output)
except xml.parsers.expat.ExpatError:
log.msg(f"SVNPoller: SVNPoller.get_prefix: ExpatError in '{output}'")
raise
rootnodes = doc.getElementsByTagName("root")
if not rootnodes:
# this happens if the URL we gave was already the root. In this
# case, our prefix is empty.
self._prefix = ""
return self._prefix
rootnode = rootnodes[0]
root = "".join([c.data for c in rootnode.childNodes])
# root will be a unicode string
if not self.repourl.startswith(root):
log.msg(
format="Got root %(root)r from `svn info`, but it is "
"not a prefix of the configured repourl",
repourl=self.repourl,
root=root,
)
raise RuntimeError("Configured repourl doesn't match svn root")
prefix = self.repourl[len(root) :]
if prefix.startswith("/"):
prefix = prefix[1:]
log.msg(f"SVNPoller: repourl={self.repourl}, root={root}, so prefix={prefix}")
return prefix
@defer.inlineCallbacks
def get_logs(self, _):
command = [self.svnbin, "log", "--xml", "--verbose", "--non-interactive"]
if self.svnuser:
command.extend([f"--username={self.svnuser}"])
if self.svnpasswd is not None:
command.extend([f"--password={self.svnpasswd}"])
if self.extra_args:
command.extend(self.extra_args)
command.extend([f"--limit={(self.histmax)}", self.repourl])
rc, output = yield runprocess.run_process(
self.master.reactor,
command,
env=self.environ,
collect_stderr=False,
stderr_is_error=True,
)
if rc != 0:
raise OSError(f'{self}: Got error when retrieving svn logs')
return output
def parse_logs(self, output):
# parse the XML output, return a list of <logentry> nodes
try:
doc = xml.dom.minidom.parseString(output)
except xml.parsers.expat.ExpatError:
log.msg(f"SVNPoller: SVNPoller.parse_logs: ExpatError in '{output}'")
raise
logentries = doc.getElementsByTagName("logentry")
return logentries
def get_new_logentries(self, logentries):
last_change = old_last_change = self.last_change
# given a list of logentries, calculate new_last_change, and
# new_logentries, where new_logentries contains only the ones after
# last_change
new_last_change = None
new_logentries = []
if logentries:
new_last_change = int(logentries[0].getAttribute("revision"))
if last_change is None:
# if this is the first time we've been run, ignore any changes
# that occurred before now. This prevents a build at every
# startup.
log.msg(f'SVNPoller: starting at change {new_last_change}')
elif last_change == new_last_change:
# an unmodified repository will hit this case
log.msg('SVNPoller: no changes')
else:
for el in logentries:
if last_change == int(el.getAttribute("revision")):
break
new_logentries.append(el)
new_logentries.reverse() # return oldest first
self.last_change = new_last_change
log.msg(f'SVNPoller: _process_changes {old_last_change} .. {new_last_change}')
return new_logentries
def _get_text(self, element, tag_name):
try:
child_nodes = element.getElementsByTagName(tag_name)[0].childNodes
text = "".join([t.data for t in child_nodes])
except IndexError:
text = "unknown"
return text
def _transform_path(self, path):
if not path.startswith(self._prefix):
log.msg(
format="SVNPoller: ignoring path '%(path)s' which doesn't"
"start with prefix '%(prefix)s'",
path=path,
prefix=self._prefix,
)
return None
relative_path = path[len(self._prefix) :]
if relative_path.startswith("/"):
relative_path = relative_path[1:]
where = self.split_file(relative_path)
# 'where' is either None, (branch, final_path) or a dict
if not where:
return None
if isinstance(where, tuple):
where = {"branch": where[0], "path": where[1]}
return where
def create_changes(self, new_logentries):
changes = []
for el in new_logentries:
revision = str(el.getAttribute("revision"))
revlink = ''
if self.revlinktmpl and revision:
revlink = self.revlinktmpl % urlquote_plus(revision)
revlink = str(revlink)
log.msg(f"Adding change revision {revision}")
author = self._get_text(el, "author")
comments = self._get_text(el, "msg")
# there is a "date" field, but it provides localtime in the
# repository's timezone, whereas we care about buildmaster's
# localtime (since this will get used to position the boxes on
# the Waterfall display, etc). So ignore the date field, and
# addChange will fill in with the current time
branches = {}
try:
pathlist = el.getElementsByTagName("paths")[0]
except IndexError: # weird, we got an empty revision
log.msg("ignoring commit with no paths")
continue
for p in pathlist.getElementsByTagName("path"):
kind = p.getAttribute("kind")
action = p.getAttribute("action")
path = "".join([t.data for t in p.childNodes])
if path.startswith("/"):
path = path[1:]
if kind == "dir" and not path.endswith("/"):
path += "/"
where = self._transform_path(path)
# if 'where' is None, the file was outside any project that
# we care about and we should ignore it
if where:
branch = where.get("branch", None)
filename = where["path"]
if branch not in branches:
branches[branch] = {'files': [], 'number_of_directories': 0}
if filename == "":
# root directory of branch
branches[branch]['files'].append(filename)
branches[branch]['number_of_directories'] += 1
elif filename.endswith("/"):
# subdirectory of branch
branches[branch]['files'].append(filename[:-1])
branches[branch]['number_of_directories'] += 1
else:
branches[branch]['files'].append(filename)
if "action" not in branches[branch]:
branches[branch]['action'] = action
for key in ("repository", "project", "codebase"):
if key in where:
branches[branch][key] = where[key]
for branch, info in branches.items():
action = info['action']
files = info['files']
number_of_directories_changed = info['number_of_directories']
number_of_files_changed = len(files)
if (
action == 'D'
and number_of_directories_changed == 1
and number_of_files_changed == 1
and files[0] == ''
):
log.msg(f"Ignoring deletion of branch '{branch}'")
else:
chdict = {
"author": author,
"committer": None,
# weakly assume filenames are utf-8
"files": [bytes2unicode(f, 'utf-8', 'replace') for f in files],
"comments": comments,
"revision": revision,
"branch": util.bytes2unicode(branch),
"revlink": revlink,
"category": self.category,
"repository": util.bytes2unicode(info.get('repository', self.repourl)),
"project": util.bytes2unicode(info.get('project', self.project)),
"codebase": util.bytes2unicode(info.get('codebase', None)),
}
changes.append(chdict)
return changes
@defer.inlineCallbacks
def submit_changes(self, changes):
for chdict in changes:
yield self.master.data.updates.addChange(src='svn', **chdict)
def finished_ok(self, res):
if self.cachepath:
with open(self.cachepath, "w", encoding='utf-8') as f:
f.write(str(self.last_change))
log.msg(f"SVNPoller: finished polling {res}")
return res
| 19,097 | Python | .py | 447 | 31.326622 | 95 | 0.57664 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,731 | github.py | buildbot_buildbot/master/buildbot/changes/github.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from datetime import datetime
from typing import ClassVar
from typing import Sequence
from twisted.internet import defer
from twisted.logger import Logger
from buildbot import config
from buildbot.changes import base
from buildbot.util import bytes2unicode
from buildbot.util import datetime2epoch
from buildbot.util import httpclientservice
from buildbot.util.pullrequest import PullRequestMixin
from buildbot.util.state import StateMixin
log = Logger()
HOSTED_BASE_URL = "https://api.github.com"
link_urls = {"https": "clone_url", "svn": "svn_url", "git": "git_url", "ssh": "ssh_url"}
class GitHubPullrequestPoller(base.ReconfigurablePollingChangeSource, StateMixin, PullRequestMixin):
compare_attrs: ClassVar[Sequence[str]] = (
"owner",
"repo",
"token",
"branches",
"pollInterval",
"category",
"project",
"pollAtLaunch",
"name",
)
db_class_name = 'GitHubPullrequestPoller'
property_basename = "github"
def __init__(self, owner, repo, **kwargs):
name = kwargs.get("name")
if not name:
kwargs["name"] = "GitHubPullrequestPoller:" + owner + "/" + repo
super().__init__(owner, repo, **kwargs)
def checkConfig(
self,
owner,
repo,
branches=None,
category='pull',
project=None,
baseURL=None,
pullrequest_filter=True,
token=None,
magic_link=False,
repository_type="https",
github_property_whitelist=None,
**kwargs,
):
if repository_type not in ["https", "svn", "git", "ssh"]:
config.error("repository_type must be one of {https, svn, git, ssh}")
super().checkConfig(name=self.name, **kwargs)
@defer.inlineCallbacks
def reconfigService(
self,
owner,
repo,
branches=None,
pollInterval=10 * 60,
category=None,
project=None,
baseURL=None,
pullrequest_filter=True,
token=None,
pollAtLaunch=False,
magic_link=False,
repository_type="https",
github_property_whitelist=None,
**kwargs,
):
yield super().reconfigService(name=self.name, **kwargs)
if baseURL is None:
baseURL = HOSTED_BASE_URL
if baseURL.endswith('/'):
baseURL = baseURL[:-1]
http_headers = {'User-Agent': 'Buildbot'}
if token is not None:
token = yield self.renderSecrets(token)
http_headers.update({'Authorization': 'token ' + token})
if github_property_whitelist is None:
github_property_whitelist = []
self._http = yield httpclientservice.HTTPSession(
self.master.httpservice, baseURL, headers=http_headers
)
self.token = token
self.owner = owner
self.repo = repo
self.branches = branches
self.pollInterval = pollInterval
self.pollAtLaunch = pollAtLaunch
self.repository_type = link_urls[repository_type]
self.magic_link = magic_link
self.external_property_whitelist = github_property_whitelist
if callable(pullrequest_filter):
self.pullrequest_filter = pullrequest_filter
else:
self.pullrequest_filter = lambda _: pullrequest_filter
self.category = category if callable(category) else bytes2unicode(category)
self.project = bytes2unicode(project)
def describe(self):
return "GitHubPullrequestPoller watching the " f"GitHub repository {self.owner}/{self.repo}"
@defer.inlineCallbacks
def _getPullInformation(self, pull_number):
result = yield self._http.get(
'/'.join(['/repos', self.owner, self.repo, 'pulls', str(pull_number)])
)
my_json = yield result.json()
return my_json
@defer.inlineCallbacks
def _getPulls(self):
log.debug(
"GitHubPullrequestPoller: polling "
f"GitHub repository {self.owner}/{self.repo}, branches: {self.branches}"
)
result = yield self._http.get('/'.join(['/repos', self.owner, self.repo, 'pulls']))
my_json = yield result.json()
if result.code != 200:
message = my_json.get('message', 'unknown')
log.error(
f"GitHubPullrequestPoller error {result.code} '{message}' "
f"while loading {result.url}"
)
return []
return my_json
@defer.inlineCallbacks
def _getFiles(self, prnumber):
result = yield self._http.get(
"/".join(['/repos', self.owner, self.repo, 'pulls', str(prnumber), 'files'])
)
my_json = yield result.json()
return [f["filename"] for f in my_json]
@defer.inlineCallbacks
def _getCommitters(self, prnumber):
result = yield self._http.get(
"/".join(['/repos', self.owner, self.repo, 'pulls', str(prnumber), 'commits'])
)
my_json = yield result.json()
return [
[c["commit"]["committer"]["name"], c["commit"]["committer"]["email"]] for c in my_json
]
@defer.inlineCallbacks
def _getAuthors(self, prnumber):
result = yield self._http.get(
"/".join(['/repos', self.owner, self.repo, 'pulls', str(prnumber), 'commits'])
)
my_json = yield result.json()
return [[a["commit"]["author"]["name"], a["commit"]["author"]["email"]] for a in my_json]
@defer.inlineCallbacks
def _getCurrentRev(self, prnumber):
# Get currently assigned revision of PR number
result = yield self._getStateObjectId()
rev = yield self.master.db.state.getState(result, f'pull_request{prnumber}', None)
return rev
@defer.inlineCallbacks
def _setCurrentRev(self, prnumber, rev):
# Set the updated revision for PR number.
result = yield self._getStateObjectId()
yield self.master.db.state.setState(result, f'pull_request{prnumber}', rev)
@defer.inlineCallbacks
def _getStateObjectId(self):
# Return a deferred for object id in state db.
result = yield self.master.db.state.getObjectId(
f'{self.owner}/{self.repo}', self.db_class_name
)
return result
@defer.inlineCallbacks
def _processChanges(self, github_result):
for pr in github_result:
# Track PRs for specified branches
base_branch = pr['base']['ref']
prnumber = pr['number']
revision = pr['head']['sha']
# Check to see if the branch is set or matches
if self.branches is not None and base_branch not in self.branches:
continue
if self.pullrequest_filter is not None and not self.pullrequest_filter(pr):
continue
current = yield self._getCurrentRev(prnumber)
if not current or current[0:12] != revision[0:12]:
# Access title, repo, html link, and comments
pr = yield self._getPullInformation(prnumber)
title = pr['title']
if self.magic_link:
branch = f'refs/pull/{prnumber}/merge'
repo = pr['base']['repo'][self.repository_type]
else:
branch = pr['head']['ref']
repo = pr['head']['repo'][self.repository_type]
revlink = pr['html_url']
comments = pr['body']
updated = datetime.strptime(pr['updated_at'], '%Y-%m-%dT%H:%M:%SZ')
# update database
yield self._setCurrentRev(prnumber, revision)
project = self.project
if project is None:
project = pr['base']['repo']['full_name']
commits = pr['commits']
dl = defer.DeferredList(
[
self._getAuthors(prnumber),
self._getCommitters(prnumber),
self._getFiles(prnumber),
],
consumeErrors=True,
)
results = yield dl
failures = [r[1] for r in results if not r[0]]
if failures:
for failure in failures:
log.error(
"while processing changes for "
f"Pullrequest {prnumber} revision {revision}: {failure}"
)
# Fail on the first error!
failures[0].raiseException()
[authors, committers, files] = [r[1] for r in results]
author = authors[0][0] + " <" + authors[0][1] + ">"
committer = committers[0][0] + " <" + committers[0][1] + ">"
# emit the change
yield self.master.data.updates.addChange(
author=author,
committer=committer,
revision=bytes2unicode(revision),
revlink=bytes2unicode(revlink),
comments=f"GitHub Pull Request #{prnumber} "
f"({commits} commit{'s' if commits > 0 else ''})\n{title}\n{comments}",
when_timestamp=datetime2epoch(updated),
branch=bytes2unicode(branch),
category=self.category,
project=project,
repository=bytes2unicode(repo),
files=files,
properties={
'pullrequesturl': revlink,
**self.extractProperties(pr),
},
src='git',
)
@defer.inlineCallbacks
def poll(self):
result = yield self._getPulls()
yield self._processChanges(result)
| 10,639 | Python | .py | 256 | 30.652344 | 100 | 0.581939 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,732 | gerritchangesource.py | buildbot_buildbot/master/buildbot/changes/gerritchangesource.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import copy
import datetime
import hashlib
import json
from typing import ClassVar
from typing import Sequence
from twisted.internet import defer
from twisted.python import log
from buildbot import config
from buildbot import util
from buildbot.changes import base
from buildbot.changes.filter import ChangeFilter
from buildbot.config.checks import check_param_int
from buildbot.config.checks import check_param_int_none
from buildbot.util import bytes2unicode
from buildbot.util import deferwaiter
from buildbot.util import httpclientservice
from buildbot.util import runprocess
from buildbot.util import watchdog
from buildbot.util.protocol import LineProcessProtocol
from buildbot.util.pullrequest import PullRequestMixin
def _canonicalize_event(event):
"""
Return an event dictionary which is consistent between the gerrit
event stream and the gerrit event log formats.
"""
# For "patchset-created" the events-log JSON looks like:
# "project": {"name": "buildbot"}
# while the stream-events JSON looks like:
# "project": "buildbot"
# so we canonicalize them to the latter
if "change" not in event:
return event
change = event["change"]
if "project" not in change:
return event
project = change["project"]
if not isinstance(project, dict):
return event
if "name" not in project:
return event
event = copy.deepcopy(event)
event["change"]["project"] = project["name"]
return event
class GerritChangeFilter(ChangeFilter):
"""This gerrit specific change filter helps creating pre-commit and post-commit builders"""
compare_attrs: ClassVar[Sequence[str]] = ('eventtype_fn', 'gerrit_branch_fn')
def __init__(
self,
branch=util.NotABranch,
branch_re=None,
branch_fn=None,
eventtype=None,
eventtype_re=None,
eventtype_fn=None,
**kw,
):
if eventtype is not None:
kw.setdefault('property_eq', {})['event.type'] = eventtype
if eventtype_re is not None:
kw.setdefault('property_re', {})['event.type'] = eventtype_re
# for branch change filter, we take the real gerrit branch
# instead of the change's branch, which is also used as a grouping key
if branch is not util.NotABranch:
kw.setdefault('property_eq', {})['event.change.branch'] = branch
if branch_re is not None:
kw.setdefault('property_re', {})['event.change.branch'] = branch_re
super().__init__(**kw)
self.eventtype_fn = eventtype_fn
self.gerrit_branch_fn = branch_fn
def filter_change(self, change):
if self.eventtype_fn is not None:
value = change.properties.getProperty('event.type', '')
if not self.eventtype_fn(value):
return False
if self.gerrit_branch_fn is not None:
# for branch change filter, we take the real gerrit branch
# instead of the change's branch, which is also used as a grouping key
value = change.properties.getProperty('event.change.branch', '')
if not self.gerrit_branch_fn(value):
return False
return super().filter_change(change)
def _get_repr_filters(self):
filters = super()._get_repr_filters()
if self.eventtype_fn is not None:
filters.append(f'{self.eventtype_fn.__name__}(eventtype)')
if self.gerrit_branch_fn is not None:
filters.append(f'{self.gerrit_branch_fn.__name__}(branch)')
return filters
def _gerrit_user_to_author(props, username="unknown"):
"""
Convert Gerrit account properties to Buildbot format
Take into account missing values
"""
username = props.get("username", username)
username = props.get("name", username)
if "email" in props:
username += f" <{props['email']}>"
return username
class GerritChangeSourceBase(base.ChangeSource, PullRequestMixin):
"""This source will maintain a connection to gerrit ssh server
that will provide us gerrit events in json format."""
compare_attrs: ClassVar[Sequence[str]] = ("gerritserver", "gerritport")
name = None
# list of properties that are no of no use to be put in the event dict
external_property_denylist = ["event.eventCreatedOn"]
external_property_whitelist = ['*']
property_basename = 'event'
def checkConfig(
self,
gitBaseURL=None,
handled_events=("patchset-created", "ref-updated"),
debug=False,
get_files=False,
):
if gitBaseURL is None:
config.error("gitBaseURL must be specified")
def reconfigService(
self,
gitBaseURL=None,
handled_events=("patchset-created", "ref-updated"),
debug=False,
get_files=False,
):
self.gitBaseURL = gitBaseURL
self.handled_events = list(handled_events)
self._get_files = get_files
self.debug = debug
def build_properties(self, event):
properties = self.extractProperties(event)
properties["event.source"] = self.__class__.__name__
if event['type'] in ('patchset-created', 'comment-added') and 'change' in event:
properties['target_branch'] = event["change"]["branch"]
return properties
def eventReceived(self, event):
if event['type'] not in self.handled_events:
if self.debug:
log.msg(f"the event type '{event['type']}' is not setup to handle")
return defer.succeed(None)
properties = self.build_properties(event)
func_name = f'eventReceived_{event["type"].replace("-", "_")}'
func = getattr(self, func_name, None)
if func is None:
return self.addChangeFromEvent(properties, event)
return func(properties, event)
def get_branch_from_event(self, event):
if event['type'] in ('patchset-created', 'comment-added'):
return event["patchSet"]["ref"]
return event["change"]["branch"]
def strip_refs_heads_from_branch(self, branch):
if branch.startswith('refs/heads/'):
branch = branch[len('refs/heads/') :]
return branch
@defer.inlineCallbacks
def addChangeFromEvent(self, properties, event):
if "change" not in event:
if self.debug:
log.msg(f'unsupported event {event["type"]}')
return None
if "patchSet" not in event:
if self.debug:
log.msg(f'unsupported event {event["type"]}')
return None
event = _canonicalize_event(event)
event_change = event["change"]
files = ["unknown"]
if self._get_files:
files = yield self.getFiles(
change=event_change["number"], patchset=event["patchSet"]["number"]
)
yield self.master.data.updates.addChange(
author=_gerrit_user_to_author(event_change["owner"]),
project=util.bytes2unicode(event_change["project"]),
repository=f'{self.gitBaseURL}/{event_change["project"]}',
branch=self.get_branch_from_event(event),
revision=event["patchSet"]["revision"],
revlink=event_change["url"],
comments=event_change["subject"],
files=files,
category=event["type"],
properties=properties,
)
return None
def eventReceived_ref_updated(self, properties, event):
ref = event["refUpdate"]
author = "gerrit"
if "submitter" in event:
author = _gerrit_user_to_author(event["submitter"], author)
# Ignore ref-updated events if patchset-created events are expected for this push.
# ref-updated events may arrive before patchset-created events and cause problems, as
# builds would be using properties from ref-updated event and not from patchset-created.
# As a result it may appear that the change was not related to a Gerrit change and cause
# reporters to not submit reviews for example.
if 'patchset-created' in self.handled_events and ref['refName'].startswith('refs/changes/'):
return None
return self.master.data.updates.addChange(
author=author,
project=ref["project"],
repository=f'{self.gitBaseURL}/{ref["project"]}',
branch=self.strip_refs_heads_from_branch(ref["refName"]),
revision=ref["newRev"],
comments="Gerrit: commit(s) pushed.",
files=["unknown"],
category=event["type"],
properties=properties,
)
class GerritSshStreamEventsConnector:
class LocalPP(LineProcessProtocol):
MAX_STORED_OUTPUT_DEBUG_LINES = 20
def __init__(self, connector):
super().__init__()
self.connector = connector
self._output_enabled = True
self._ended_deferred = defer.Deferred()
@defer.inlineCallbacks
def outLineReceived(self, line):
if self.connector.debug:
log.msg(
f"{self.connector.change_source.name} "
+ f"stdout: {line.decode('utf-8', errors='replace')}"
)
self.connector._append_line_for_debug(line)
if self._output_enabled:
yield self.connector.on_line_received_cb(line)
def errLineReceived(self, line):
if self.connector.debug:
log.msg(
f"{self.connector.change_source.name} "
+ f"stderr: {line.decode('utf-8', errors='replace')}"
)
if self._output_enabled:
self.connector._append_line_for_debug(line)
def processEnded(self, status):
super().processEnded(status)
self._ended_deferred.callback(None)
self.connector._stream_process_stopped()
def disable_output(self):
self._output_enabled = False
def wait(self):
return self._ended_deferred
# (seconds) connections longer than this are considered good, and reset the backoff timer
STREAM_GOOD_CONNECTION_TIME = 120
# (seconds) minimum, but nonzero, time to wait before retrying a failed connection
STREAM_BACKOFF_MIN = 0.5
# multiplier used to increase the backoff from MIN to MAX on repeated failures
STREAM_BACKOFF_EXPONENT = 1.5
# (seconds) maximum time to wait before retrying a failed connection
STREAM_BACKOFF_MAX = 60
# The number of gerrit output lines to print in case of a failure
MAX_STORED_OUTPUT_DEBUG_LINES = 20
debug = False
def __init__(
self,
reactor,
change_source,
gerritserver,
username,
gerritport=29418,
identity_file=None,
ssh_server_alive_interval_s=15,
ssh_server_alive_count_max=3,
on_process_start_cb=None,
on_line_received_cb=None,
):
self.reactor = reactor
self.change_source = change_source
self.gerritserver = gerritserver
self.username = username
self.gerritport = gerritport
self.identity_file = identity_file
self.ssh_server_alive_interval_s = ssh_server_alive_interval_s
self.ssh_server_alive_count_max = ssh_server_alive_count_max
self.on_process_start_cb = on_process_start_cb
self.on_line_received_cb = on_line_received_cb
self._process = None
self._stream_process_timeout = self.STREAM_BACKOFF_MIN
self._last_lines_for_debug = []
def start(self):
self._want_process = True
self.start_stream_process()
@defer.inlineCallbacks
def stop(self):
self._want_process = False
if self._process is not None:
self._process[0].disable_output()
self._process[1].signalProcess("KILL")
yield self._process[0].wait()
@defer.inlineCallbacks
def restart(self):
if self._process is not None:
self._process[0].disable_output()
# Process will restart automatically
self._process[1].signalProcess("KILL")
yield self._process[0].wait()
else:
self.start()
def _append_line_for_debug(self, line):
self._last_lines_for_debug.append(line)
while len(self._last_lines_for_debug) > self.MAX_STORED_OUTPUT_DEBUG_LINES:
self._last_lines_for_debug.pop(0)
def _build_gerrit_command(self, *gerrit_args):
"""Get an ssh command list which invokes gerrit with the given args on the
remote host"""
options = [
"-o",
"BatchMode=yes",
]
if self.ssh_server_alive_interval_s is not None:
options += ["-o", f"ServerAliveInterval={self.ssh_server_alive_interval_s}"]
if self.ssh_server_alive_count_max is not None:
options += ["-o", f"ServerAliveCountMax={self.ssh_server_alive_count_max}"]
cmd = ["ssh", *options, f"{self.username}@{self.gerritserver}", "-p", str(self.gerritport)]
if self.identity_file is not None:
cmd.extend(["-i", self.identity_file])
cmd.append("gerrit")
cmd.extend(gerrit_args)
return cmd
def start_stream_process(self):
if self._process is not None:
return
if self.debug:
log.msg(f"{self.change_source.name}: starting 'gerrit stream-events'")
# Must be called before start of the process to ensure consistent ordering to avoid race
# conditions.
self.on_process_start_cb()
cmd = self._build_gerrit_command("stream-events")
self._last_stream_process_start = self.reactor.seconds()
protocol = self.LocalPP(self)
self._process = (protocol, self.reactor.spawnProcess(protocol, "ssh", cmd, env=None))
self._last_lines_for_debug = []
def _stream_process_stopped(self):
self._process = None
# if the service is stopped, don't try to restart the process
if not self._want_process or not self.change_source.running:
return
now = self.reactor.seconds()
if now - self._last_stream_process_start < self.STREAM_GOOD_CONNECTION_TIME:
# bad startup; start the stream process again after a timeout,
# and then increase the timeout
log_lines = "\n".join([
l.decode("utf-8", errors="ignore") for l in self._last_lines_for_debug
])
log.msg(
f"{self.change_source.name}: stream-events failed; restarting after "
f"{round(self._stream_process_timeout)}s.\n"
f"{len(self._last_lines_for_debug)} log lines follow:\n{log_lines}"
)
self.reactor.callLater(self._stream_process_timeout, self.start_stream_process)
self._stream_process_timeout *= self.STREAM_BACKOFF_EXPONENT
self._stream_process_timeout = min(
self._stream_process_timeout, self.STREAM_BACKOFF_MAX
)
else:
# good startup, but lost connection; restart immediately,
# and set the timeout to its minimum
# make sure we log the reconnection, so that it might be detected
# and network connectivity fixed
log.msg(f"{self.change_source.name}: stream-events lost connection. Reconnecting...")
self.start_stream_process()
self._stream_process_timeout = self.STREAM_BACKOFF_MIN
@defer.inlineCallbacks
def get_files(self, change, patchset):
cmd = self._build_gerrit_command(
"query", str(change), "--format", "JSON", "--files", "--patch-sets"
)
if self.debug:
log.msg(
f"{self.change_source.name}: querying for changed files in change {change}/{patchset}: {cmd}"
)
rc, out = yield runprocess.run_process(self.reactor, cmd, env=None, collect_stderr=False)
if rc != 0:
return ["unknown"]
out = out.splitlines()[0]
res = json.loads(bytes2unicode(out))
if res.get("rowCount") == 0:
return ["unknown"]
patchsets = {i["number"]: i["files"] for i in res["patchSets"]}
return [i["file"] for i in patchsets[int(patchset)]]
class GerritHttpEventLogPollerConnector:
FIRST_FETCH_LOOKBACK_DAYS = 30
debug = False
def __init__(
self,
reactor,
change_source,
base_url,
auth,
get_last_event_ts,
first_fetch_lookback=FIRST_FETCH_LOOKBACK_DAYS,
on_lines_received_cb=None,
):
if base_url.endswith('/'):
base_url = base_url[:-1]
self._reactor = reactor
self._change_source = change_source
self._get_last_event_ts = get_last_event_ts
self._base_url = base_url
self._auth = auth
self._first_fetch_lookback = first_fetch_lookback
self._on_lines_received_cb = on_lines_received_cb
self._last_event_time = None
@defer.inlineCallbacks
def setup(self):
self._http = yield httpclientservice.HTTPSession(
self._change_source.master.httpservice, self._base_url, auth=self._auth
)
@defer.inlineCallbacks
def poll(self):
last_event_ts = yield self._get_last_event_ts()
if last_event_ts is None:
# If there is not last event time stored in the database, then set
# the last event time to some historical look-back
last_event = datetime.datetime.fromtimestamp(
self._reactor.seconds(), datetime.timezone.utc
) - datetime.timedelta(days=self._first_fetch_lookback)
else:
last_event = datetime.datetime.fromtimestamp(last_event_ts, datetime.timezone.utc)
last_event_formatted = last_event.strftime("%Y-%m-%d %H:%M:%S")
if self.debug:
log.msg(f"{self._change_source.name}: Polling gerrit: {last_event_formatted}")
res = yield self._http.get(
"/plugins/events-log/events/", params={"t1": last_event_formatted}
)
if res.code != 200:
log.msg(f'{self._change_source.name}: Polling gerrit: got HTTP error code {res.code}')
return
lines = yield res.content()
yield self._on_lines_received_cb(lines.splitlines())
@defer.inlineCallbacks
def get_files(self, change, patchset):
res = yield self._http.get(f"/changes/{change}/revisions/{patchset}/files/")
res = yield res.content()
try:
res = res.splitlines()[1].decode('utf8') # the first line of every response is `)]}'`
return list(json.loads(res))
except Exception as e:
log.err(e, 'while getting files from connector')
return []
@defer.inlineCallbacks
def do_poll(self):
try:
yield self.poll()
except Exception as e:
log.err(e, 'while polling for changes')
def extract_gerrit_event_time(event):
return event["eventCreatedOn"]
def build_gerrit_event_hash(event):
return hashlib.sha1(json.dumps(event, sort_keys=True).encode("utf-8")).hexdigest()
def is_event_valid(event):
return isinstance(event, dict) and "type" in event and "eventCreatedOn" in event
class GerritChangeSource(GerritChangeSourceBase):
"""This source will maintain a connection to gerrit ssh server that will provide us gerrit
events in json format. Additionally, connection to gerrit HTTP server may be setup so
that historical events can be fetched to fill any gaps due to Buildbot or Gerrit restarts
or internet connectivity problems.
Important considerations for filling gaps in processed events:
- Gerrit events do not have unique IDs, only eventCreateOn timestamp which is common between
events coming from the HTTP and SSH APIs
- Gerrit HTTP API does not provide any ordering guarantees.
- Gerrit HTTP and SSH APIs return events encoded identically
"""
compare_attrs: ClassVar[Sequence[str]] = ("gerritserver", "gerritport")
name = None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._stream_connector = None
self._poll_connector = None
self._queued_stream_events = []
# Events are received from stream event source continuously. If HTTP API is not available,
# GerritChangeSource is always in this state.
self._is_synchronized = True
# True if SSH stream did not get events for a long time. It is unclear whether the
# connection broke or there were simply no activity, so the SSH connection should not be
# restarted. Note that _stream_activity_watchdog is disabled when
# _stream_messages_timeout is True and should be re-enabled when exiting out of this
# condition.
self._stream_messages_timeout = False
# Used for polling if last event timestamp is unknown.
self._start_ts = None
# Stores newest events that have been published for further processing and have identical
# timestamp. This is used to ensure that events are not duplicated across stream and
# polled sources.
self._last_second_events = []
# Contains hashes of self._last_second_events coming from previous run of this service.
# self._last_second_events is not stored directly because of size considerations.
self._last_second_event_hashes = []
self._last_event_ts = None
# Last event timestamp recorded to database. Equivalent to self._last_event_ts. Separate
# variable is support single database transaction for message batches.
self._last_event_ts_saved = None
self._deferwaiter = deferwaiter.DeferWaiter()
self._poll_handler = None
self._stream_activity_watchdog = None
def checkConfig(
self,
gerritserver,
username,
gerritport=29418,
identity_file=None,
ssh_server_alive_interval_s=15,
ssh_server_alive_count_max=3,
http_url=None,
http_auth=None,
http_poll_interval=30,
**kwargs,
):
if self.name is None:
self.name = f"GerritChangeSource:{username}@{gerritserver}:{gerritport}"
if 'gitBaseURL' not in kwargs:
kwargs['gitBaseURL'] = "automatic at reconfigure"
check_param_int_none(
ssh_server_alive_interval_s, self.__class__, "ssh_server_alive_interval_s"
)
check_param_int_none(
ssh_server_alive_count_max, self.__class__, "ssh_server_alive_count_max"
)
check_param_int(http_poll_interval, self.__class__, "http_poll_interval")
super().checkConfig(**kwargs)
@defer.inlineCallbacks
def reconfigService(
self,
gerritserver,
username,
gerritport=29418,
identity_file=None,
name=None,
ssh_server_alive_interval_s=15,
ssh_server_alive_count_max=3,
http_url=None,
http_auth=None,
http_poll_interval=30,
**kwargs,
):
if 'gitBaseURL' not in kwargs:
kwargs['gitBaseURL'] = f"ssh://{username}@{gerritserver}:{gerritport}"
self.gerritserver = gerritserver
self.gerritport = gerritport
self.username = username
self.identity_file = identity_file
self._http_poll_interval = http_poll_interval
if self._stream_connector is None:
# TODO: this does not support reconfiguration at the moment
self._stream_connector = GerritSshStreamEventsConnector(
self.master.reactor,
self,
gerritserver,
username,
gerritport=gerritport,
identity_file=identity_file,
ssh_server_alive_interval_s=ssh_server_alive_interval_s,
ssh_server_alive_count_max=ssh_server_alive_count_max,
on_process_start_cb=self._stream_process_started,
on_line_received_cb=self._line_received_stream,
)
self._stream_messages_timeout = False
self._oid = yield self.master.db.state.getObjectId(self.name, self.__class__.__name__)
if self._start_ts is None:
self._start_ts = self.master.reactor.seconds()
if self._last_event_ts is None:
self._last_event_ts = yield self.master.db.state.getState(
self._oid, 'last_event_ts', None
)
self._last_second_event_hashes = yield self.master.db.state.getState(
self._oid, "last_event_hashes", None
)
if self._poll_handler is not None:
self._poll_handler.stop()
self._poll_handler = deferwaiter.NonRepeatedActionHandler(
self.master.reactor,
self._deferwaiter,
lambda: self._poll_connector.do_poll(), # pylint: disable=unnecessary-lambda
)
if http_url is not None:
if self._poll_connector is None:
# TODO: this does not support reconfiguration at the moment
self._poll_connector = GerritHttpEventLogPollerConnector(
self.master.reactor,
self,
http_url + "/a",
http_auth,
lambda: self._last_event_ts or self._start_ts,
first_fetch_lookback=0,
on_lines_received_cb=self._lines_received_poll,
)
yield self._poll_connector.setup()
self._is_synchronized = False
else:
self._poll_connector = None
self._is_synchronized = True
self._stream_activity_watchdog = watchdog.Watchdog(
self.master.reactor, self._no_stream_activity_timed_out, self._http_poll_interval
)
yield super().reconfigService(**kwargs)
def activate(self):
self._is_synchronized = self._poll_connector is None
self._stream_connector.start()
self._stream_activity_watchdog.start()
@defer.inlineCallbacks
def deactivate(self):
self._stream_activity_watchdog.stop()
yield self._stream_connector.stop() # Note that this immediately stops event acceptance
self._poll_handler.stop()
yield self._deferwaiter.wait()
if self._last_second_events:
yield self.master.db.state.setState(
self._oid,
"last_event_hashes",
[build_gerrit_event_hash(event) for event in self._last_second_events],
)
else:
yield self.master.db.state.setState(self._oid, "last_event_hashes", None)
def getFiles(self, change, patchset):
return self._stream_connector.get_files(change, patchset)
def _no_stream_activity_timed_out(self):
if self._poll_connector is None:
return
self._stream_messages_timeout = True
self._poll_handler.force()
def _stream_process_started(self):
if self._poll_connector is None:
return
self._is_synchronized = False
self._poll_handler.force()
def _record_last_second_event(self, event, ts):
if self._last_event_ts != ts:
self._last_event_ts = ts
self._last_second_events.clear()
self._last_second_event_hashes = None
self._last_second_events.append(event)
@defer.inlineCallbacks
def _update_last_event_ts(self):
if self._last_event_ts != self._last_event_ts_saved:
self._last_event_ts_saved = self._last_event_ts
yield self.master.db.state.setState(self._oid, "last_event_ts", self._last_event_ts)
@defer.inlineCallbacks
def _line_received_stream(self, line):
self._stream_activity_watchdog.notify()
try:
event = json.loads(bytes2unicode(line))
except ValueError:
log.msg(f"{self.name}: bad json line: {line}")
return
if not is_event_valid(event):
if self.debug:
log.msg(f"no type in event {line}")
return
if not self._is_synchronized:
self._queued_stream_events.append((extract_gerrit_event_time(event), event))
if self._poll_connector is not None:
self._poll_handler.force()
return
if self._stream_messages_timeout:
self._stream_activity_watchdog.start()
self._stream_messages_timeout = False
self._poll_handler.stop()
self._record_last_second_event(event, extract_gerrit_event_time(event))
yield self._update_last_event_ts()
yield self.eventReceived(event)
def _filter_out_already_received_events(self, events):
if self._last_event_ts is None:
return events
filtered_events = []
for ts, event in events:
if ts < self._last_event_ts:
continue
if ts == self._last_event_ts:
if self._last_second_event_hashes is not None:
if build_gerrit_event_hash(event) in self._last_second_event_hashes:
continue
if event in self._last_second_events:
continue
filtered_events.append((ts, event))
return filtered_events
@defer.inlineCallbacks
def _lines_received_poll(self, lines):
if self._is_synchronized and not self._stream_messages_timeout:
return
# The code below parses all retrieved events, ignores already received ones, submits the
# rest for processing and if it is detected that events from polling source are synchronized
# with queued events from streaming source, switches to the streaming source.
events = []
for line in lines:
try:
event = json.loads(bytes2unicode(line))
except ValueError:
log.msg(f"{self.name}: bad json line: {line}")
continue
if not is_event_valid(event):
if self.debug:
log.msg(f"no type in event {line}")
continue
events.append((extract_gerrit_event_time(event), event))
events = sorted(self._filter_out_already_received_events(events), key=lambda e: e[0])
if not events:
self._poll_handler.schedule(self._http_poll_interval, invoke_again_if_running=True)
return
max_event_ts = events[-1][0]
got_newer_messages_unhandled_before = True
if self._last_event_ts is not None:
got_newer_messages_unhandled_before = max_event_ts > self._last_event_ts
needs_stream_restart = False
if self._stream_messages_timeout and got_newer_messages_unhandled_before:
# Stream connector has broken, because usually messages would come through the stream
# connector first. Now new messages were received through HTTP API.
#
# Note: there must not be any asynchronous code between this check and the start of
# the function to avoid race conditions.
self._is_synchronized = False
self._stream_messages_timeout = False
needs_stream_restart = True
if not self._queued_stream_events or max_event_ts <= self._queued_stream_events[0][0]:
# The events from stream source has not caught up - process all events and leave
# _is_synchronized as False.
for ts, event in events:
self._record_last_second_event(event, ts)
yield self.eventReceived(event)
yield self._update_last_event_ts()
self._poll_handler.schedule(self._http_poll_interval, invoke_again_if_running=True)
if needs_stream_restart:
self._deferwaiter.add(self._stream_connector.restart())
self._stream_activity_watchdog.start()
return
first_queued_ts = self._queued_stream_events[0][0]
# max_event_ts > first_queued_ts which means that:
# - events list is guaranteed to have all events up to first_queued_ts (inclusive)
# - starting with first_queued_ts (exclusive) the stream source has all events.
for ts, event in events:
if ts <= first_queued_ts:
self._record_last_second_event(event, ts)
yield self.eventReceived(event)
i = 0
while i < len(self._queued_stream_events):
ts, event = self._queued_stream_events[i]
if ts == self._last_event_ts and event in self._last_second_events:
i += 1
continue
self._record_last_second_event(event, ts)
yield self.eventReceived(event)
i += 1
self._queued_stream_events.clear()
self._is_synchronized = True
yield self._update_last_event_ts()
if needs_stream_restart:
self._deferwaiter.add(self._stream_connector.restart())
self._stream_activity_watchdog.start()
def describe(self):
status = ""
if not self._stream_connector or not self._stream_connector._process:
status = "[NOT CONNECTED - check log]"
return (
"GerritChangeSource watching the remote "
f"Gerrit repository {self.username}@{self.gerritserver} {status}"
)
class GerritEventLogPoller(GerritChangeSourceBase):
POLL_INTERVAL_SEC = 30
FIRST_FETCH_LOOKBACK_DAYS = 30
def checkConfig(
self,
baseURL,
auth,
pollInterval=POLL_INTERVAL_SEC,
pollAtLaunch=True,
firstFetchLookback=FIRST_FETCH_LOOKBACK_DAYS,
**kwargs,
):
if self.name is None:
self.name = f"GerritEventLogPoller:{baseURL}"
super().checkConfig(**kwargs)
@defer.inlineCallbacks
def reconfigService(
self,
baseURL,
auth,
pollInterval=POLL_INTERVAL_SEC,
pollAtLaunch=True,
firstFetchLookback=FIRST_FETCH_LOOKBACK_DAYS,
**kwargs,
):
yield super().reconfigService(**kwargs)
self._poll_interval = pollInterval
self._poll_at_launch = pollAtLaunch
self._oid = yield self.master.db.state.getObjectId(self.name, self.__class__.__name__)
def get_last_event_ts():
return self.master.db.state.getState(self._oid, 'last_event_ts', None)
self._connector = GerritHttpEventLogPollerConnector(
self.master.reactor,
self,
baseURL,
auth,
get_last_event_ts,
first_fetch_lookback=firstFetchLookback,
on_lines_received_cb=self._lines_received,
)
yield self._connector.setup()
self._poller = util.poll.Poller(self._connector.do_poll, self, self.master.reactor)
def getFiles(self, change, patchset):
return self._connector.get_files(change, patchset)
def force(self):
self._poller()
def activate(self):
self._poller.start(interval=self._poll_interval, now=self._poll_at_launch)
def deactivate(self):
return self._poller.stop()
def describe(self):
msg = "GerritEventLogPoller watching the remote Gerrit repository {}"
return msg.format(self.name)
@defer.inlineCallbacks
def _lines_received(self, lines):
last_event_ts = None
for line in lines:
try:
event = json.loads(bytes2unicode(line))
except ValueError:
log.msg(f"{self.name}: bad json line: {line}")
continue
if not is_event_valid(event):
if self.debug:
log.msg(f"no type in event {line}")
continue
yield super().eventReceived(event)
this_last_event_ts = extract_gerrit_event_time(event)
if last_event_ts is None:
last_event_ts = this_last_event_ts
else:
last_event_ts = max(last_event_ts, this_last_event_ts)
if last_event_ts is not None:
yield self.master.db.state.setState(self._oid, "last_event_ts", last_event_ts)
| 37,314 | Python | .py | 841 | 34.407848 | 109 | 0.624012 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,733 | filter.py | buildbot_buildbot/master/buildbot/changes/filter.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from typing import ClassVar
from typing import Sequence
from buildbot.util import ComparableMixin
from buildbot.util import NotABranch
from buildbot.util.ssfilter import _create_branch_filters
from buildbot.util.ssfilter import _create_filters
from buildbot.util.ssfilter import _create_property_filters
class ChangeFilter(ComparableMixin):
# NOTE: If users use a filter_fn, we have no way to determine whether it has
# changed at reconfig, so the scheduler will always be restarted. That's as
# good as Python can do.
compare_attrs: ClassVar[Sequence[str]] = (
'filter_fn',
'filters',
'property_filters',
'project_fn',
'repository_fn',
'branch_fn',
'category_fn',
'codebase_fn',
)
def __init__(
self,
filter_fn=None, # gets a Change object, returns boolean
project=None,
project_not_eq=None,
project_re=None,
project_not_re=None,
project_fn=None,
repository=None,
repository_not_eq=None,
repository_re=None,
repository_not_re=None,
repository_fn=None,
branch=NotABranch,
branch_not_eq=NotABranch,
branch_re=None,
branch_not_re=None,
branch_fn=None,
category=None,
category_not_eq=None,
category_re=None,
category_not_re=None,
category_fn=None,
codebase=None,
codebase_not_eq=None,
codebase_re=None,
codebase_not_re=None,
codebase_fn=None,
property_eq=None,
property_not_eq=None,
property_re=None,
property_not_re=None,
):
self.filter_fn = filter_fn
self.project_fn = project_fn
self.repository_fn = repository_fn
self.branch_fn = branch_fn
self.category_fn = category_fn
self.codebase_fn = codebase_fn
self.filters = _create_filters(
project, project_not_eq, project_re, project_not_re, 'project'
)
self.filters += _create_filters(
repository,
repository_not_eq,
repository_re,
repository_not_re,
'repository',
)
self.filters += _create_branch_filters(
branch,
branch_not_eq,
branch_re,
branch_not_re,
'branch',
)
self.filters += _create_filters(
category,
category_not_eq,
category_re,
category_not_re,
'category',
)
self.filters += _create_filters(
codebase,
codebase_not_eq,
codebase_re,
codebase_not_re,
'codebase',
)
self.property_filters = _create_property_filters(
property_eq, property_not_eq, property_re, property_not_re, 'property'
)
def filter_change(self, change):
if self.filter_fn is not None and not self.filter_fn(change):
return False
if self.project_fn is not None and not self.project_fn(change.project):
return False
if self.codebase_fn is not None and not self.codebase_fn(change.codebase):
return False
if self.repository_fn is not None and not self.repository_fn(change.repository):
return False
if self.category_fn is not None and not self.category_fn(change.category):
return False
if self.branch_fn is not None and not self.branch_fn(change.branch):
return False
for filter in self.filters:
value = getattr(change, filter.prop, '')
if not filter.is_matched(value):
return False
for filter in self.property_filters:
value = change.properties.getProperty(filter.prop, '')
if not filter.is_matched(value):
return False
return True
def _get_repr_filters(self):
filters = []
if self.filter_fn is not None:
filters.append(f'{self.filter_fn.__name__}()')
if self.project_fn is not None:
filters.append(f'{self.project_fn.__name__}(project)')
if self.codebase_fn is not None:
filters.append(f'{self.codebase_fn.__name__}(codebase)')
if self.repository_fn is not None:
filters.append(f'{self.repository_fn.__name__}(repository)')
if self.category_fn is not None:
filters.append(f'{self.category_fn.__name__}(category)')
if self.branch_fn is not None:
filters.append(f'{self.branch_fn.__name__}(branch)')
filters += [filter.describe() for filter in self.filters]
filters += [filter.describe() for filter in self.property_filters]
return filters
def __repr__(self):
return f"<{self.__class__.__name__} on {' and '.join(self._get_repr_filters())}>"
@staticmethod
def fromSchedulerConstructorArgs(change_filter=None, branch=NotABranch, categories=None):
"""
Static method to create a filter based on constructor args
change_filter, branch, and categories; use default values @code{None},
@code{NotABranch}, and @code{None}, respectively. These arguments are
interpreted as documented for the
L{buildbot.schedulers.basic.Scheduler} class.
@returns: L{ChangeFilter} instance or None for not filtering
"""
# use a change_filter, if given one
if change_filter:
if branch is not NotABranch or categories is not None:
raise RuntimeError("cannot specify both change_filter and branch or categories")
return change_filter
elif branch is not NotABranch or categories:
# build a change filter from the deprecated category and branch
# args
cfargs = {}
if branch is not NotABranch:
cfargs['branch'] = branch
if categories:
cfargs['category'] = categories
return ChangeFilter(**cfargs)
else:
return None
| 6,832 | Python | .py | 175 | 29.942857 | 96 | 0.622216 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,734 | hgpoller.py | buildbot_buildbot/master/buildbot/changes/hgpoller.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import os
import time
from typing import ClassVar
from typing import Sequence
from twisted.internet import defer
from twisted.python import log
from buildbot import config
from buildbot.changes import base
from buildbot.util import bytes2unicode
from buildbot.util import deferredLocked
from buildbot.util import runprocess
from buildbot.util.state import StateMixin
class HgPoller(base.ReconfigurablePollingChangeSource, StateMixin):
"""This source will poll a remote hg repo for changes and submit
them to the change master."""
compare_attrs: ClassVar[Sequence[str]] = (
"repourl",
"branch",
"branches",
"bookmarks",
"workdir",
"pollInterval",
"hgpoller",
"usetimestamps",
"category",
"project",
"pollAtLaunch",
"pollRandomDelayMin",
"pollRandomDelayMax",
)
db_class_name = 'HgPoller'
def __init__(self, repourl, **kwargs):
name = kwargs.get("name", None)
if not name:
branches = self.build_branches(kwargs.get('branch', None), kwargs.get('branches', None))
kwargs["name"] = self.build_name(None, repourl, kwargs.get('bookmarks', None), branches)
self.initLock = defer.DeferredLock()
super().__init__(repourl, **kwargs)
def checkConfig(
self,
repourl,
branch=None,
branches=None,
bookmarks=None,
workdir=None,
pollInterval=10 * 60,
hgbin="hg",
usetimestamps=True,
category=None,
project="",
encoding="utf-8",
name=None,
pollAtLaunch=False,
revlink=lambda branch, revision: (""),
pollRandomDelayMin=0,
pollRandomDelayMax=0,
):
if branch and branches:
config.error("HgPoller: can't specify both branch and branches")
if not callable(revlink):
config.error("You need to provide a valid callable for revlink")
if workdir is None:
config.error("workdir is mandatory for now in HgPoller")
name = self.build_name(name, repourl, bookmarks, self.build_branches(branch, branches))
super().checkConfig(
name=name,
pollInterval=pollInterval,
pollAtLaunch=pollAtLaunch,
pollRandomDelayMin=pollRandomDelayMin,
pollRandomDelayMax=pollRandomDelayMax,
)
@defer.inlineCallbacks
def reconfigService(
self,
repourl,
branch=None,
branches=None,
bookmarks=None,
workdir=None,
pollInterval=10 * 60,
hgbin="hg",
usetimestamps=True,
category=None,
project="",
encoding="utf-8",
name=None,
pollAtLaunch=False,
revlink=lambda branch, revision: (""),
pollRandomDelayMin=0,
pollRandomDelayMax=0,
):
self.repourl = repourl
self.branches = self.build_branches(branch, branches)
self.bookmarks = bookmarks or []
name = self.build_name(name, repourl, bookmarks, self.branches)
if not self.branches and not self.bookmarks:
self.branches = ['default']
self.encoding = encoding
self.lastChange = time.time()
self.lastPoll = time.time()
self.hgbin = hgbin
self.workdir = workdir
self.usetimestamps = usetimestamps
self.category = category if callable(category) else bytes2unicode(category)
self.project = project
self.lastRev = {}
self.revlink_callable = revlink
yield super().reconfigService(
name=name,
pollInterval=pollInterval,
pollAtLaunch=pollAtLaunch,
pollRandomDelayMin=pollRandomDelayMin,
pollRandomDelayMax=pollRandomDelayMax,
)
def build_name(self, name, repourl, bookmarks, branches):
if name is not None:
return name
name = repourl
if bookmarks:
name += "_" + "_".join(bookmarks)
if branches:
name += "_" + "_".join(branches)
return name
def build_branches(self, branch, branches):
if branch:
return [branch]
return branches or []
@defer.inlineCallbacks
def activate(self):
self.lastRev = yield self.getState('lastRev', {})
super().activate()
def describe(self):
status = ""
if not self.master:
status = "[STOPPED - check log]"
return (
f"HgPoller watching the remote Mercurial repository '{self.repourl}', "
f"branches: {', '.join(self.branches)}, in workdir '{self.workdir}' {status}"
)
@deferredLocked('initLock')
@defer.inlineCallbacks
def poll(self):
yield self._getChanges()
yield self._processChanges()
def _absWorkdir(self):
workdir = self.workdir
if os.path.isabs(workdir):
return workdir
return os.path.join(self.master.basedir, workdir)
@defer.inlineCallbacks
def _getRevDetails(self, rev):
"""Return a deferred for (date, author, files, comments) of given rev.
Deferred will be in error if rev is unknown.
"""
command = [
self.hgbin,
'log',
'-r',
rev,
os.linesep.join((
'--template={date|hgdate}',
'{author}',
"{files % '{file}" + os.pathsep + "'}",
'{desc|strip}',
)),
]
# Mercurial fails with status 255 if rev is unknown
rc, output = yield runprocess.run_process(
self.master.reactor,
command,
workdir=self._absWorkdir(),
env=os.environ,
collect_stderr=False,
stderr_is_error=True,
)
if rc != 0:
msg = f'{self}: got error {rc} when getting details for revision {rev}'
raise RuntimeError(msg)
# all file names are on one line
output = output.decode(self.encoding, "replace")
date, author, files, comments = output.split(os.linesep, 3)
if not self.usetimestamps:
stamp = None
else:
try:
stamp = float(date.split()[0])
except Exception:
log.msg(f'hgpoller: caught exception converting output {date!r} to timestamp')
raise
return stamp, author.strip(), files.split(os.pathsep)[:-1], comments.strip()
def _isRepositoryReady(self):
"""Easy to patch in tests."""
return os.path.exists(os.path.join(self._absWorkdir(), '.hg'))
@defer.inlineCallbacks
def _initRepository(self):
"""Have mercurial init the workdir as a repository (hg init) if needed.
hg init will also create all needed intermediate directories.
"""
if self._isRepositoryReady():
return
log.msg(f'hgpoller: initializing working dir from {self.repourl}')
rc = yield runprocess.run_process(
self.master.reactor,
[self.hgbin, 'init', self._absWorkdir()],
env=os.environ,
collect_stdout=False,
collect_stderr=False,
)
if rc != 0:
self._stopOnFailure()
raise OSError(f'{self}: repository init failed with exit code {rc}')
log.msg(f"hgpoller: finished initializing working dir {self.workdir}")
@defer.inlineCallbacks
def _getChanges(self):
self.lastPoll = time.time()
yield self._initRepository()
log.msg(f"{self}: polling hg repo at {self.repourl}")
command = [self.hgbin, 'pull']
for name in self.branches:
command += ['-b', name]
for name in self.bookmarks:
command += ['-B', name]
command += [self.repourl]
yield runprocess.run_process(
self.master.reactor,
command,
workdir=self._absWorkdir(),
env=os.environ,
collect_stdout=False,
collect_stderr=False,
)
def _getCurrentRev(self, branch='default'):
"""Return a deferred for current numeric rev in state db.
If never has been set, current rev is None.
"""
return self.lastRev.get(branch, None)
def _setCurrentRev(self, rev, branch='default'):
"""Return a deferred to set current revision in persistent state."""
self.lastRev[branch] = str(rev)
return self.setState('lastRev', self.lastRev)
@defer.inlineCallbacks
def _getHead(self, branch):
"""Return a deferred for branch head revision or None.
We'll get an error if there is no head for this branch, which is
probably a good thing, since it's probably a misspelling
(if really buildbotting a branch that does not have any changeset
yet, one shouldn't be surprised to get errors)
"""
rc, stdout = yield runprocess.run_process(
self.master.reactor,
[self.hgbin, 'heads', branch, '--template={rev}' + os.linesep],
workdir=self._absWorkdir(),
env=os.environ,
collect_stderr=False,
stderr_is_error=True,
)
if rc != 0:
log.err(f"{self}: could not find revision {branch} in repository {self.repourl}")
return None
if not stdout:
return None
if len(stdout.split()) > 1:
log.err(
f"{self}: caught several heads in branch {branch} "
f"from repository {self.repourl}. Staying at previous revision"
"You should wait until the situation is normal again "
"due to a merge or directly strip if remote repo "
"gets stripped later."
)
return None
# in case of whole reconstruction, are we sure that we'll get the
# same node -> rev assignations ?
return stdout.strip().decode(self.encoding)
@defer.inlineCallbacks
def _processChanges(self):
"""Send info about pulled changes to the master and record current.
HgPoller does the recording by moving the working dir to the head
of the branch.
We don't update the tree (unnecessary treatment and waste of space)
instead, we simply store the current rev number in a file.
Recall that hg rev numbers are local and incremental.
"""
for branch in self.branches + self.bookmarks:
rev = yield self._getHead(branch)
if rev is None:
# Nothing pulled?
continue
yield self._processBranchChanges(rev, branch)
@defer.inlineCallbacks
def _getRevNodeList(self, revset):
rc, stdout = yield runprocess.run_process(
self.master.reactor,
[self.hgbin, 'log', '-r', revset, r'--template={rev}:{node}\n'],
workdir=self._absWorkdir(),
env=os.environ,
collect_stdout=True,
collect_stderr=False,
stderr_is_error=True,
)
if rc != 0:
raise OSError(f'{self}: could not get rev node list: {rc}')
results = stdout.decode(self.encoding)
revNodeList = [rn.split(':', 1) for rn in results.strip().split()]
return revNodeList
@defer.inlineCallbacks
def _processBranchChanges(self, new_rev, branch):
prev_rev = yield self._getCurrentRev(branch)
if new_rev == prev_rev:
# Nothing new.
return
if prev_rev is None:
# First time monitoring; start at the top.
yield self._setCurrentRev(new_rev, branch)
return
# two passes for hg log makes parsing simpler (comments is multi-lines)
revNodeList = yield self._getRevNodeList(f'{prev_rev}::{new_rev}')
# revsets are inclusive. Strip the already-known "current" changeset.
if not revNodeList:
# empty revNodeList probably means the branch has changed head (strip of force push?)
# in that case, we should still produce a change for that new rev (but we can't know
# how many parents were pushed)
revNodeList = yield self._getRevNodeList(new_rev)
else:
del revNodeList[0]
log.msg(
f'hgpoller: processing {len(revNodeList)} changes in branch '
f'{branch!r}: {revNodeList!r} in {self._absWorkdir()!r}'
)
for _, node in revNodeList:
timestamp, author, files, comments = yield self._getRevDetails(node)
yield self.master.data.updates.addChange(
author=author,
committer=None,
revision=str(node),
revlink=self.revlink_callable(branch, str(node)),
files=files,
comments=comments,
when_timestamp=int(timestamp) if timestamp else None,
branch=bytes2unicode(branch),
category=bytes2unicode(self.category),
project=bytes2unicode(self.project),
repository=bytes2unicode(self.repourl),
src='hg',
)
# writing after addChange so that a rev is never missed,
# but at once to avoid impact from later errors
yield self._setCurrentRev(new_rev, branch)
def _stopOnFailure(self):
"utility method to stop the service when a failure occurs"
if self.running:
d = defer.maybeDeferred(self.stopService)
d.addErrback(log.err, 'while stopping broken HgPoller service')
| 14,465 | Python | .py | 366 | 29.691257 | 100 | 0.605914 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,735 | pb.py | buildbot_buildbot/master/buildbot/changes/pb.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from typing import ClassVar
from typing import Sequence
from twisted.internet import defer
from twisted.python import log
from buildbot import config
from buildbot.changes import base
from buildbot.pbutil import NewCredPerspective
class ChangePerspective(NewCredPerspective):
def __init__(self, master, prefix):
self.master = master
self.prefix = prefix
def attached(self, mind):
return self
def detached(self, mind):
pass
def perspective_addChange(self, changedict):
log.msg("perspective_addChange called")
if 'revlink' in changedict and not changedict['revlink']:
changedict['revlink'] = ''
if 'repository' in changedict and not changedict['repository']:
changedict['repository'] = ''
if 'project' in changedict and not changedict['project']:
changedict['project'] = ''
if 'files' not in changedict or not changedict['files']:
changedict['files'] = []
if 'committer' in changedict and not changedict['committer']:
changedict['committer'] = None
# rename arguments to new names. Note that the client still uses the
# "old" names (who, when, and isdir), as they are not deprecated yet,
# although the master will accept the new names (author,
# when_timestamp). After a few revisions have passed, we
# can switch the client to use the new names.
if 'who' in changedict:
changedict['author'] = changedict['who']
del changedict['who']
if 'when' in changedict:
changedict['when_timestamp'] = changedict['when']
del changedict['when']
# turn any bytestring keys into unicode, assuming utf8 but just
# replacing unknown characters. Ideally client would send us unicode
# in the first place, but older clients do not, so this fallback is
# useful.
for key in changedict:
if isinstance(changedict[key], bytes):
changedict[key] = changedict[key].decode('utf8', 'replace')
changedict['files'] = list(changedict['files'])
for i, file in enumerate(changedict.get('files', [])):
if isinstance(file, bytes):
changedict['files'][i] = file.decode('utf8', 'replace')
files = []
for path in changedict['files']:
if self.prefix:
if not path.startswith(self.prefix):
# this file does not start with the prefix, so ignore it
continue
path = path[len(self.prefix) :]
files.append(path)
changedict['files'] = files
if not files:
log.msg("No files listed in change... bit strange, but not fatal.")
if "links" in changedict:
log.msg("Found links: " + repr(changedict['links']))
del changedict['links']
d = self.master.data.updates.addChange(**changedict)
# set the return value to None, so we don't get users depending on
# getting a changeid
d.addCallback(lambda _: None)
return d
class PBChangeSource(base.ChangeSource):
compare_attrs: ClassVar[Sequence[str]] = ("user", "passwd", "port", "prefix", "port")
def __init__(self, user="change", passwd="changepw", port=None, prefix=None, name=None):
if name is None:
if prefix:
name = f"PBChangeSource:{prefix}:{port}"
else:
name = f"PBChangeSource:{port}"
super().__init__(name=name)
self.user = user
self.passwd = passwd
self.port = port
self.prefix = prefix
self.registration = None
self.registered_port = None
def describe(self):
portname = self.registered_port
d = "PBChangeSource listener on " + str(portname)
if self.prefix is not None:
d += f" (prefix '{self.prefix}')"
return d
def _calculatePort(self, cfg):
# calculate the new port, defaulting to the worker's PB port if
# none was specified
port = self.port
if port is None:
port = cfg.protocols.get('pb', {}).get('port')
return port
@defer.inlineCallbacks
def reconfigServiceWithBuildbotConfig(self, new_config):
port = self._calculatePort(new_config)
if not port:
config.error("No port specified for PBChangeSource, and no worker port configured")
# and, if it's changed, re-register
if port != self.registered_port and self.isActive():
yield self._unregister()
yield self._register(port)
yield super().reconfigServiceWithBuildbotConfig(new_config)
@defer.inlineCallbacks
def activate(self):
port = self._calculatePort(self.master.config)
yield self._register(port)
def deactivate(self):
return self._unregister()
@defer.inlineCallbacks
def _register(self, port):
if not port:
return
self.registered_port = port
self.registration = yield self.master.pbmanager.register(
port, self.user, self.passwd, self.getPerspective
)
def _unregister(self):
self.registered_port = None
if self.registration:
reg = self.registration
self.registration = None
return reg.unregister()
return defer.succeed(None)
def getPerspective(self, mind, username):
assert username == self.user
return ChangePerspective(self.master, self.prefix)
| 6,328 | Python | .py | 144 | 35.201389 | 95 | 0.640501 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,736 | manager.py | buildbot_buildbot/master/buildbot/changes/manager.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import annotations
from buildbot.process.measured_service import MeasuredBuildbotServiceManager
class ChangeManager(MeasuredBuildbotServiceManager):
name: str | None = "ChangeManager" # type: ignore[assignment]
managed_services_name = "changesources"
config_attr = "change_sources"
| 1,020 | Python | .py | 20 | 49.2 | 79 | 0.792169 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,737 | base.py | buildbot_buildbot/master/buildbot/changes/base.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from twisted.internet import defer
from twisted.python import log
from zope.interface import implementer
from buildbot import config
from buildbot.interfaces import IChangeSource
from buildbot.util import service
from buildbot.util.poll import method as poll_method
@implementer(IChangeSource)
class ChangeSource(service.ClusteredBuildbotService):
def describe(self):
pass
# activity handling
def activate(self):
return defer.succeed(None)
def deactivate(self):
return defer.succeed(None)
# service handling
def _getServiceId(self):
return self.master.data.updates.findChangeSourceId(self.name)
def _claimService(self):
return self.master.data.updates.trySetChangeSourceMaster(
self.serviceid, self.master.masterid
)
def _unclaimService(self):
return self.master.data.updates.trySetChangeSourceMaster(self.serviceid, None)
class ReconfigurablePollingChangeSource(ChangeSource):
pollInterval = None
pollAtLaunch = None
pollRandomDelayMin = None
pollRandomDelayMax = None
def checkConfig(
self,
name=None,
pollInterval=60 * 10,
pollAtLaunch=False,
pollRandomDelayMin=0,
pollRandomDelayMax=0,
):
super().checkConfig(name=name)
if pollInterval < 0:
config.error(f"interval must be >= 0: {pollInterval}")
if pollRandomDelayMin < 0:
config.error(f"min random delay must be >= 0: {pollRandomDelayMin}")
if pollRandomDelayMax < 0:
config.error(f"max random delay must be >= 0: {pollRandomDelayMax}")
if pollRandomDelayMin > pollRandomDelayMax:
config.error(f"min random delay must be <= {pollRandomDelayMax}: {pollRandomDelayMin}")
if pollRandomDelayMax >= pollInterval:
config.error(f"max random delay must be < {pollInterval}: {pollRandomDelayMax}")
@defer.inlineCallbacks
def reconfigService(
self,
name=None,
pollInterval=60 * 10,
pollAtLaunch=False,
pollRandomDelayMin=0,
pollRandomDelayMax=0,
):
prevPollInterval = self.pollInterval
self.pollInterval = pollInterval
self.pollAtLaunch = pollAtLaunch
self.pollRandomDelayMin = pollRandomDelayMin
self.pollRandomDelayMax = pollRandomDelayMax
yield super().reconfigService(name=name)
# pollInterval change is the only value which makes sense to reconfigure check.
if prevPollInterval != pollInterval and self.doPoll.running:
yield self.doPoll.stop()
# As a implementation detail, poller will 'pollAtReconfigure' if poll interval changes
# and pollAtLaunch=True
yield self.doPoll.start(
interval=self.pollInterval,
now=self.pollAtLaunch,
random_delay_min=self.pollRandomDelayMin,
random_delay_max=self.pollRandomDelayMax,
)
def poll(self):
pass
@poll_method
def doPoll(self):
d = defer.maybeDeferred(self.poll)
d.addErrback(log.err, f'{self}: while polling for changes')
return d
def force(self):
self.doPoll()
def activate(self):
self.doPoll.start(
interval=self.pollInterval,
now=self.pollAtLaunch,
random_delay_min=self.pollRandomDelayMin,
random_delay_max=self.pollRandomDelayMax,
)
def deactivate(self):
return self.doPoll.stop()
| 4,269 | Python | .py | 107 | 32.485981 | 99 | 0.695411 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,738 | changes.py | buildbot_buildbot/master/buildbot/changes/changes.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import annotations
import html
import time
from typing import TYPE_CHECKING
from twisted.internet import defer
from twisted.python import log
from buildbot import util
from buildbot.process.properties import Properties
from buildbot.util import datetime2epoch
if TYPE_CHECKING:
from buildbot.db.changes import ChangeModel
class Change:
"""I represent a single change to the source tree. This may involve several
files, but they are all changed by the same person, and there is a change
comment for the group as a whole."""
number: int | None = None
branch: str | None = None
category: str | None = None
revision: str | None = None # used to create a source-stamp
links: list[str] = [] # links are gone, but upgrade code expects this attribute
@classmethod
def fromChdict(cls, master, chdict: ChangeModel) -> Change:
"""
Class method to create a L{Change} from a L{ChangeModel} as returned
by L{ChangesConnectorComponent.getChange}.
@param master: build master instance
@param chdict: change model
@returns: L{Change} via Deferred
"""
cache = master.caches.get_cache("Changes", cls._make_ch)
return cache.get(chdict.changeid, chdict=chdict, master=master)
@classmethod
def _make_ch(cls, changeid: int, master, chdict: ChangeModel) -> defer.Deferred[Change]:
change = cls(None, None, None, _fromChdict=True)
change.who = chdict.author
change.committer = chdict.committer
change.comments = chdict.comments
change.revision = chdict.revision
change.branch = chdict.branch
change.category = chdict.category
change.revlink = chdict.revlink
change.repository = chdict.repository
change.codebase = chdict.codebase
change.project = chdict.project
change.number = chdict.changeid
when = chdict.when_timestamp
if when:
when = datetime2epoch(when)
change.when = when
change.files = sorted(chdict.files)
change.properties = Properties()
for n, (v, s) in chdict.properties.items():
change.properties.setProperty(n, v, s)
return defer.succeed(change)
def __init__(
self,
who,
files,
comments,
committer=None,
revision=None,
when=None,
branch=None,
category=None,
revlink='',
properties=None,
repository='',
codebase='',
project='',
_fromChdict=False,
):
if properties is None:
properties = {}
# skip all this madness if we're being built from the database
if _fromChdict:
return
self.who = who
self.committer = committer
self.comments = comments
def none_or_unicode(x):
if x is None:
return x
return str(x)
self.revision = none_or_unicode(revision)
now = util.now()
if when is None:
self.when = now
elif when > now:
# this happens when the committing system has an incorrect clock, for example.
# handle it gracefully
log.msg("received a Change with when > now; assuming the change happened now")
self.when = now
else:
self.when = when
self.branch = none_or_unicode(branch)
self.category = none_or_unicode(category)
self.revlink = revlink
self.properties = Properties()
self.properties.update(properties, "Change")
self.repository = repository
self.codebase = codebase
self.project = project
# keep a sorted list of the files, for easier display
self.files = sorted(files or [])
def __setstate__(self, dict):
self.__dict__ = dict
# Older Changes won't have a 'properties' attribute in them
if not hasattr(self, 'properties'):
self.properties = Properties()
if not hasattr(self, 'revlink'):
self.revlink = ""
def __str__(self):
return (
"Change(revision=%r, who=%r, committer=%r, branch=%r, comments=%r, "
+ "when=%r, category=%r, project=%r, repository=%r, "
+ "codebase=%r)"
) % (
self.revision,
self.who,
self.committer,
self.branch,
self.comments,
self.when,
self.category,
self.project,
self.repository,
self.codebase,
)
def __eq__(self, other):
return self.number == other.number
def __ne__(self, other):
return self.number != other.number
def __lt__(self, other):
return self.number < other.number
def __le__(self, other):
return self.number <= other.number
def __gt__(self, other):
return self.number > other.number
def __ge__(self, other):
return self.number >= other.number
def asText(self):
data = ""
data += "Files:\n"
for f in self.files:
data += f" {f}\n"
if self.repository:
data += f"On: {self.repository}\n"
if self.project:
data += f"For: {self.project}\n"
data += f"At: {self.getTime()}\n"
data += f"Changed By: {self.who}\n"
data += f"Committed By: {self.committer}\n"
data += f"Comments: {self.comments}"
data += "Properties: \n"
for prop in self.properties.asList():
data += f" {prop[0]}: {prop[1]}"
data += '\n\n'
return data
def asDict(self):
"""returns a dictionary with suitable info for html/mail rendering"""
files = [{"name": f} for f in self.files]
files.sort(key=lambda a: a['name'])
result = {
# Constant
'number': self.number,
'branch': self.branch,
'category': self.category,
'who': self.getShortAuthor(),
'committer': self.committer,
'comments': self.comments,
'revision': self.revision,
'rev': self.revision,
'when': self.when,
'at': self.getTime(),
'files': files,
'revlink': getattr(self, 'revlink', None),
'properties': self.properties.asList(),
'repository': getattr(self, 'repository', None),
'codebase': getattr(self, 'codebase', ''),
'project': getattr(self, 'project', None),
}
return result
def getShortAuthor(self):
return self.who
def getTime(self):
if not self.when:
return "?"
return time.strftime("%a %d %b %Y %H:%M:%S", time.localtime(self.when))
def getTimes(self):
return (self.when, None)
def getText(self):
return [html.escape(self.who)]
def getLogs(self):
return {}
| 7,714 | Python | .py | 208 | 28.466346 | 92 | 0.60249 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,739 | p4poller.py | buildbot_buildbot/master/buildbot/changes/p4poller.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Portions Copyright Buildbot Team Members
# Portions Copyright 2011 National Instruments
# Many thanks to Dave Peticolas for contributing this module
import datetime
import os
import re
from typing import ClassVar
from typing import Sequence
import dateutil.tz
from twisted.internet import defer
from twisted.internet import protocol
from twisted.internet import reactor
from twisted.python import log
from buildbot import config
from buildbot import util
from buildbot.changes import base
from buildbot.util import bytes2unicode
from buildbot.util import runprocess
debug_logging = False
class P4PollerError(Exception):
"""Something went wrong with the poll. This is used as a distinctive
exception type so that unit tests can detect and ignore it."""
class TicketLoginProtocol(protocol.ProcessProtocol):
"""Twisted process protocol to run `p4 login` and enter our password
in the stdin."""
def __init__(self, stdin, p4base):
self.deferred = defer.Deferred()
self.stdin = stdin.encode('ascii')
self.stdout = b''
self.stderr = b''
self.p4base = p4base
def connectionMade(self):
if self.stdin:
if debug_logging:
log.msg(f"P4Poller: entering password for {self.p4base}: {self.stdin}")
self.transport.write(self.stdin)
self.transport.closeStdin()
def processEnded(self, reason):
if debug_logging:
log.msg(f"P4Poller: login process finished for {self.p4base}: {reason.value.exitCode}")
self.deferred.callback(reason.value.exitCode)
def outReceived(self, data):
if debug_logging:
log.msg(f"P4Poller: login stdout for {self.p4base}: {data}")
self.stdout += data
def errReceived(self, data):
if debug_logging:
log.msg(f"P4Poller: login stderr for {self.p4base}: {data}")
self.stderr += data
def get_simple_split(branchfile):
"""Splits the branchfile argument and assuming branch is
the first path component in branchfile, will return
branch and file else None."""
index = branchfile.find('/')
if index == -1:
return None, None
branch, file = branchfile.split('/', 1)
return branch, file
class P4Source(base.ReconfigurablePollingChangeSource, util.ComparableMixin):
"""This source will poll a perforce repository for changes and submit
them to the change master."""
compare_attrs: ClassVar[Sequence[str]] = (
"p4port",
"p4user",
"p4passwd",
"p4base",
"p4bin",
"pollInterval",
"pollAtLaunch",
"server_tz",
"pollRandomDelayMin",
"pollRandomDelayMax",
)
env_vars = [
"P4CLIENT",
"P4PORT",
"P4PASSWD",
"P4USER",
"P4CHARSET",
"P4CONFIG",
"P4TICKETS",
"PATH",
"HOME",
]
changes_line_re = re.compile(r"Change (?P<num>\d+) on \S+ by \S+@\S+ '.*'$")
describe_header_re = re.compile(r"Change \d+ by (?P<who>\S+)@\S+ on (?P<when>.+)$")
file_re = re.compile(r"^\.\.\. (?P<path>[^#]+)#\d+ [/\w]+$")
datefmt = '%Y/%m/%d %H:%M:%S'
parent = None # filled in when we're added
last_change = None
loop = None
def __init__(self, **kwargs):
name = kwargs.get("name", None)
if name is None:
kwargs['name'] = self.build_name(
name, kwargs.get('p4port', None), kwargs.get('p4base', '//')
)
super().__init__(**kwargs)
def checkConfig(
self,
p4port=None,
p4user=None,
p4passwd=None,
p4base="//",
p4bin="p4",
split_file=lambda branchfile: (None, branchfile),
pollInterval=60 * 10,
histmax=None,
encoding="utf8",
project=None,
name=None,
use_tickets=False,
ticket_login_interval=60 * 60 * 24,
server_tz=None,
pollAtLaunch=False,
revlink=lambda branch, revision: (""),
resolvewho=lambda who: (who),
pollRandomDelayMin=0,
pollRandomDelayMax=0,
):
name = self.build_name(name, p4port, p4base)
if use_tickets and not p4passwd:
config.error("You need to provide a P4 password to use ticket authentication")
if not callable(revlink):
config.error("You need to provide a valid callable for revlink")
if not callable(resolvewho):
config.error("You need to provide a valid callable for resolvewho")
if server_tz is not None and dateutil.tz.gettz(server_tz) is None:
raise P4PollerError(f"Failed to get timezone from server_tz string '{server_tz}'")
super().checkConfig(
name=name,
pollInterval=pollInterval,
pollAtLaunch=pollAtLaunch,
pollRandomDelayMin=pollRandomDelayMin,
pollRandomDelayMax=pollRandomDelayMax,
)
@defer.inlineCallbacks
def reconfigService(
self,
p4port=None,
p4user=None,
p4passwd=None,
p4base="//",
p4bin="p4",
split_file=lambda branchfile: (None, branchfile),
pollInterval=60 * 10,
histmax=None,
encoding="utf8",
project=None,
name=None,
use_tickets=False,
ticket_login_interval=60 * 60 * 24,
server_tz=None,
pollAtLaunch=False,
revlink=lambda branch, revision: (""),
resolvewho=lambda who: (who),
pollRandomDelayMin=0,
pollRandomDelayMax=0,
):
name = self.build_name(name, p4port, p4base)
if project is None:
project = ''
self.p4port = p4port
self.p4user = p4user
self.p4passwd = p4passwd
self.p4base = p4base
self.p4bin = p4bin
self.split_file = split_file
self.encoding = encoding
self.project = util.bytes2unicode(project)
self.use_tickets = use_tickets
self.ticket_login_interval = ticket_login_interval
self.revlink_callable = revlink
self.resolvewho_callable = resolvewho
self.server_tz = dateutil.tz.gettz(server_tz) if server_tz else None
self._ticket_login_counter = 0
yield super().reconfigService(
name=name,
pollInterval=pollInterval,
pollAtLaunch=pollAtLaunch,
pollRandomDelayMin=pollRandomDelayMin,
pollRandomDelayMax=pollRandomDelayMax,
)
def build_name(self, name, p4port, p4base):
if name is not None:
return name
return f"P4Source:{p4port}:{p4base}"
def describe(self):
return f"p4source {self.p4port} {self.p4base}"
def poll(self):
d = self._poll()
d.addErrback(log.err, f'P4 poll failed on {self.p4port}, {self.p4base}')
return d
@defer.inlineCallbacks
def _get_process_output(self, args):
env = {e: os.environ.get(e) for e in self.env_vars if os.environ.get(e)}
res, out = yield runprocess.run_process(
self.master.reactor,
[self.p4bin, *args],
env=env,
collect_stderr=False,
stderr_is_error=True,
)
if res != 0:
raise P4PollerError(f'Failed to run {self.p4bin}')
return out
def _acquireTicket(self, protocol):
command = [
self.p4bin,
]
if self.p4port:
command.extend(['-p', self.p4port])
if self.p4user:
command.extend(['-u', self.p4user])
command.append('login')
command = [c.encode('utf-8') for c in command]
reactor.spawnProcess(protocol, self.p4bin, command, env=os.environ)
@defer.inlineCallbacks
def _poll(self):
if self.use_tickets:
self._ticket_login_counter -= 1
if self._ticket_login_counter <= 0:
# Re-acquire the ticket and reset the counter.
log.msg(f"P4Poller: (re)acquiring P4 ticket for {self.p4base}...")
protocol = TicketLoginProtocol(self.p4passwd + "\n", self.p4base)
self._acquireTicket(protocol)
yield protocol.deferred
args = []
if self.p4port:
args.extend(['-p', self.p4port])
if not self.use_tickets:
if self.p4user:
args.extend(['-u', self.p4user])
if self.p4passwd:
args.extend(['-P', self.p4passwd])
args.extend(['changes'])
if self.last_change is not None:
args.extend([f'{self.p4base}...@{self.last_change + 1},#head'])
else:
args.extend(['-m', '1', f'{self.p4base}...'])
result = yield self._get_process_output(args)
# decode the result from its designated encoding
try:
result = bytes2unicode(result, self.encoding)
except UnicodeError as ex:
log.msg(f"{ex}: cannot fully decode {result!r} in {self.encoding}")
result = bytes2unicode(result, encoding=self.encoding, errors="replace")
last_change = self.last_change
changelists = []
for line in result.split('\n'):
line = line.strip()
if not line:
continue
m = self.changes_line_re.match(line)
if not m:
raise P4PollerError(f"Unexpected 'p4 changes' output: {result!r}")
num = int(m.group('num'))
if last_change is None:
# first time through, the poller just gets a "baseline" for where to
# start on the next poll
log.msg(f'P4Poller: starting at change {num}')
self.last_change = num
return
changelists.append(num)
changelists.reverse() # oldest first
# Retrieve each sequentially.
for num in changelists:
args = []
if self.p4port:
args.extend(['-p', self.p4port])
if not self.use_tickets:
if self.p4user:
args.extend(['-u', self.p4user])
if self.p4passwd:
args.extend(['-P', self.p4passwd])
args.extend(['describe', '-s', str(num)])
result = yield self._get_process_output(args)
# decode the result from its designated encoding
try:
result = bytes2unicode(result, self.encoding)
except UnicodeError as ex:
log.msg(f"P4Poller: couldn't decode changelist description: {ex.encoding}")
log.msg(f"P4Poller: in object: {ex.object}")
log.err(f"P4Poller: poll failed on {self.p4port}, {self.p4base}")
raise
lines = result.split('\n')
# SF#1555985: Wade Brainerd reports a stray ^M at the end of the date
# field. The rstrip() is intended to remove that.
lines[0] = lines[0].rstrip()
m = self.describe_header_re.match(lines[0])
if not m:
raise P4PollerError(f"Unexpected 'p4 describe -s' result: {result!r}")
who = self.resolvewho_callable(m.group('who'))
when = datetime.datetime.strptime(m.group('when'), self.datefmt)
if self.server_tz:
# Convert from the server's timezone to the local timezone.
when = when.replace(tzinfo=self.server_tz)
when = util.datetime2epoch(when)
comment_lines = []
lines.pop(0) # describe header
lines.pop(0) # blank line
while not lines[0].startswith('Affected files'):
if lines[0].startswith('\t'): # comment is indented by one tab
comment_lines.append(lines.pop(0)[1:])
else:
lines.pop(0) # discard non comment line
comments = '\n'.join(comment_lines)
lines.pop(0) # affected files
branch_files = {} # dict for branch mapped to file(s)
while lines:
line = lines.pop(0).strip()
if not line:
continue
m = self.file_re.match(line)
if not m:
raise P4PollerError(f"Invalid file line: {line!r}")
path = m.group('path')
if path.startswith(self.p4base):
branch, file = self.split_file(path[len(self.p4base) :])
if branch is None and file is None:
continue
if branch in branch_files:
branch_files[branch].append(file)
else:
branch_files[branch] = [file]
for branch, files in branch_files.items():
yield self.master.data.updates.addChange(
author=who,
committer=None,
files=files,
comments=comments,
revision=str(num),
when_timestamp=when,
branch=branch,
project=self.project,
revlink=self.revlink_callable(branch, str(num)),
)
self.last_change = num
| 14,008 | Python | .py | 351 | 29.561254 | 99 | 0.586592 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,740 | mail.py | buildbot_buildbot/master/buildbot/changes/mail.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
"""
Parse various kinds of 'CVS notify' email.
"""
from __future__ import annotations
import calendar
import datetime
import re
import time
from email import message_from_file
from email.iterators import body_line_iterator
from email.utils import mktime_tz
from email.utils import parseaddr
from email.utils import parsedate_tz
from typing import ClassVar
from typing import Sequence
from twisted.internet import defer
from twisted.python import log
from zope.interface import implementer
from buildbot import util
from buildbot.interfaces import IChangeSource
from buildbot.util.maildir import MaildirService
@implementer(IChangeSource)
class MaildirSource(MaildirService, util.ComparableMixin):
"""Generic base class for Maildir-based change sources"""
compare_attrs: ClassVar[Sequence[str]] = ("basedir", "pollInterval", "prefix")
# twisted is marked as typed, but doesn't specify this type correctly
name: str | None = 'MaildirSource' # type: ignore[assignment]
def __init__(self, maildir, prefix=None, category='', repository=''):
super().__init__(maildir)
self.prefix = prefix
self.category = category
self.repository = repository
if prefix and not prefix.endswith("/"):
log.msg(
f"MaildirSource: you probably want your prefix=('{prefix}') to end with " "a slash"
)
def describe(self):
return f"{self.__class__.__name__} watching maildir '{self.basedir}'"
@defer.inlineCallbacks
def messageReceived(self, filename):
with self.moveToCurDir(filename) as f:
chtuple = self.parse_file(f, self.prefix)
src = None
chdict = None
if chtuple:
src, chdict = chtuple
if chdict:
yield self.master.data.updates.addChange(src=str(src), **chdict)
else:
log.msg(f"no change found in maildir file '{filename}'")
def parse_file(self, fd, prefix=None):
m = message_from_file(fd)
return self.parse(m, prefix)
class CVSMaildirSource(MaildirSource):
name = "CVSMaildirSource"
def __init__(self, maildir, prefix=None, category='', repository='', properties=None):
super().__init__(maildir, prefix, category, repository)
if properties is None:
properties = {}
self.properties = properties
def parse(self, m, prefix=None):
"""Parse messages sent by the 'buildbot-cvs-mail' program."""
# The mail is sent from the person doing the checkin. Assume that the
# local username is enough to identify them (this assumes a one-server
# cvs-over-rsh environment rather than the server-dirs-shared-over-NFS
# model)
_, addr = parseaddr(m["from"])
if not addr:
# no From means this message isn't from buildbot-cvs-mail
return None
at = addr.find("@")
if at == -1:
author = addr # might still be useful
else:
author = addr[:at]
author = util.bytes2unicode(author, encoding="ascii")
# CVS accepts RFC822 dates. buildbot-cvs-mail adds the date as
# part of the mail header, so use that.
# This assumes cvs is being access via ssh or pserver, so the time
# will be the CVS server's time.
# calculate a "revision" based on that timestamp, or the current time
# if we're unable to parse the date.
log.msg('Processing CVS mail')
dateTuple = parsedate_tz(m["date"])
if dateTuple is None:
when = util.now()
else:
when = mktime_tz(dateTuple)
theTime = datetime.datetime.fromtimestamp(float(when), datetime.timezone.utc)
rev = theTime.strftime('%Y-%m-%d %H:%M:%S')
catRE = re.compile(r'^Category:\s*(\S.*)')
cvsRE = re.compile(r'^CVSROOT:\s*(\S.*)')
cvsmodeRE = re.compile(r'^Cvsmode:\s*(\S.*)')
filesRE = re.compile(r'^Files:\s*(\S.*)')
modRE = re.compile(r'^Module:\s*(\S.*)')
pathRE = re.compile(r'^Path:\s*(\S.*)')
projRE = re.compile(r'^Project:\s*(\S.*)')
singleFileRE = re.compile(r'(.*) (NONE|\d(\.|\d)+) (NONE|\d(\.|\d)+)')
tagRE = re.compile(r'^\s+Tag:\s*(\S.*)')
updateRE = re.compile(r'^Update of:\s*(\S.*)')
comments = ""
branch = None
category = None
cvsroot = None
cvsmode = None
fileList = None
files = []
isdir = 0
path = None
project = None
lines = list(body_line_iterator(m))
while lines:
line = lines.pop(0)
m = catRE.match(line)
if m:
category = m.group(1)
continue
m = cvsRE.match(line)
if m:
cvsroot = m.group(1)
continue
m = cvsmodeRE.match(line)
if m:
cvsmode = m.group(1)
continue
m = filesRE.match(line)
if m:
fileList = m.group(1)
continue
m = modRE.match(line)
if m:
# We don't actually use this
# module = m.group(1)
continue
m = pathRE.match(line)
if m:
path = m.group(1)
continue
m = projRE.match(line)
if m:
project = m.group(1)
continue
m = tagRE.match(line)
if m:
branch = m.group(1)
continue
m = updateRE.match(line)
if m:
# We don't actually use this
# updateof = m.group(1)
continue
if line == "Log Message:\n":
break
# CVS 1.11 lists files as:
# repo/path file,old-version,new-version file2,old-version,new-version
# Version 1.12 lists files as:
# file1 old-version new-version file2 old-version new-version
#
# files consists of tuples of 'file-name old-version new-version'
# The versions are either dotted-decimal version numbers, ie 1.1
# or NONE. New files are of the form 'NONE NUMBER', while removed
# files are 'NUMBER NONE'. 'NONE' is a literal string
# Parsing this instead of files list in 'Added File:' etc
# makes it possible to handle files with embedded spaces, though
# it could fail if the filename was 'bad 1.1 1.2'
# For cvs version 1.11, we expect
# my_module new_file.c,NONE,1.1
# my_module removed.txt,1.2,NONE
# my_module modified_file.c,1.1,1.2
# While cvs version 1.12 gives us
# new_file.c NONE 1.1
# removed.txt 1.2 NONE
# modified_file.c 1.1,1.2
if fileList is None:
log.msg('CVSMaildirSource Mail with no files. Ignoring')
return None # We don't have any files. Email not from CVS
if cvsmode == '1.11':
# Please, no repo paths with spaces!
m = re.search('([^ ]*) ', fileList)
if m:
path = m.group(1)
else:
log.msg('CVSMaildirSource can\'t get path from file list. Ignoring mail')
return None
fileList = fileList[len(path) :].strip()
singleFileRE = re.compile(
r'(.+?),(NONE|(?:\d+\.(?:\d+\.\d+\.)*\d+)),(NONE|(?:\d+\.(?:\d+\.\d+\.)*\d+))(?: |$)'
)
elif cvsmode == '1.12':
singleFileRE = re.compile(
r'(.+?) (NONE|(?:\d+\.(?:\d+\.\d+\.)*\d+)) (NONE|(?:\d+\.(?:\d+\.\d+\.)*\d+))(?: |$)'
)
if path is None:
raise ValueError('CVSMaildirSource cvs 1.12 require path. Check cvs loginfo config')
else:
raise ValueError(f'Expected cvsmode 1.11 or 1.12. got: {cvsmode}')
log.msg(f"CVSMaildirSource processing filelist: {fileList}")
while fileList:
m = singleFileRE.match(fileList)
if m:
curFile = path + '/' + m.group(1)
files.append(curFile)
fileList = fileList[m.end() :]
else:
log.msg('CVSMaildirSource no files matched regex. Ignoring')
return None # bail - we couldn't parse the files that changed
# Now get comments
while lines:
line = lines.pop(0)
comments += line
comments = comments.rstrip() + "\n"
if comments == '\n':
comments = None
return (
'cvs',
{
"author": author,
"committer": None,
"files": files,
"comments": comments,
"isdir": isdir,
"when": when,
"branch": branch,
"revision": rev,
"category": category,
"repository": cvsroot,
"project": project,
"properties": self.properties,
},
)
# svn "commit-email.pl" handler. The format is very similar to freshcvs mail;
# here's a sample:
# From: username [at] apache.org [slightly obfuscated to avoid spam here]
# To: commits [at] spamassassin.apache.org
# Subject: svn commit: r105955 - in spamassassin/trunk: . lib/Mail
# ...
#
# Author: username
# Date: Sat Nov 20 00:17:49 2004 [note: TZ = local tz on server!]
# New Revision: 105955
#
# Modified: [also Removed: and Added:]
# [filename]
# ...
# Log:
# [log message]
# ...
#
#
# Modified: spamassassin/trunk/lib/Mail/SpamAssassin.pm
# [unified diff]
#
# [end of mail]
class SVNCommitEmailMaildirSource(MaildirSource):
name = "SVN commit-email.pl"
def parse(self, m, prefix=None):
"""Parse messages sent by the svn 'commit-email.pl' trigger."""
# The mail is sent from the person doing the checkin. Assume that the
# local username is enough to identify them (this assumes a one-server
# cvs-over-rsh environment rather than the server-dirs-shared-over-NFS
# model)
_, addr = parseaddr(m["from"])
if not addr:
return None # no From means this message isn't from svn
at = addr.find("@")
if at == -1:
author = addr # might still be useful
else:
author = addr[:at]
# we take the time of receipt as the time of checkin. Not correct (it
# depends upon the email latency), but it avoids the
# out-of-order-changes issue. Also syncmail doesn't give us anything
# better to work with, unless you count pulling the v1-vs-v2
# timestamp out of the diffs, which would be ugly. TODO: Pulling the
# 'Date:' header from the mail is a possibility, and
# email.utils.parsedate_tz may be useful. It should be configurable,
# however, because there are a lot of broken clocks out there.
when = util.now()
files = []
comments = ""
lines = list(body_line_iterator(m))
rev = None
while lines:
line = lines.pop(0)
# "Author: jmason"
match = re.search(r"^Author: (\S+)", line)
if match:
author = match.group(1)
# "New Revision: 105955"
match = re.search(r"^New Revision: (\d+)", line)
if match:
rev = match.group(1)
# possible TODO: use "Date: ..." data here instead of time of
# commit message receipt, above. however, this timestamp is
# specified *without* a timezone, in the server's local TZ, so to
# be accurate buildbot would need a config setting to specify the
# source server's expected TZ setting! messy.
# this stanza ends with the "Log:"
if line == "Log:\n":
break
# commit message is terminated by the file-listing section
while lines:
line = lines.pop(0)
if line in ("Modified:\n", "Added:\n", "Removed:\n"):
break
comments += line
comments = comments.rstrip() + "\n"
while lines:
line = lines.pop(0)
if line == "\n":
break
if line.find("Modified:\n") == 0:
continue # ignore this line
if line.find("Added:\n") == 0:
continue # ignore this line
if line.find("Removed:\n") == 0:
continue # ignore this line
line = line.strip()
thesefiles = line.split(" ")
for f in thesefiles:
if prefix:
# insist that the file start with the prefix: we may get
# changes we don't care about too
if f.startswith(prefix):
f = f[len(prefix) :]
else:
log.msg(
f"ignored file from svn commit: prefix '{prefix}' "
f"does not match filename '{f}'"
)
continue
# TODO: figure out how new directories are described, set
# .isdir
files.append(f)
if not files:
log.msg("no matching files found, ignoring commit")
return None
return (
'svn',
{
"author": author,
"committer": None,
"files": files,
"comments": comments,
"when": when,
"revision": rev,
},
)
# bzr Launchpad branch subscription mails. Sample mail:
#
# From: [email protected]
# Subject: [Branch ~knielsen/maria/tmp-buildbot-test] Rev 2701: test add file
# To: Joe <[email protected]>
# ...
#
# ------------------------------------------------------------
# revno: 2701
# committer: Joe <[email protected]>
# branch nick: tmpbb
# timestamp: Fri 2009-05-15 10:35:43 +0200
# message:
# test add file
# added:
# test-add-file
#
#
# --
#
# https://code.launchpad.net/~knielsen/maria/tmp-buildbot-test
#
# You are subscribed to branch lp:~knielsen/maria/tmp-buildbot-test.
# To unsubscribe from this branch go to
# https://code.launchpad.net/~knielsen/maria/tmp-buildbot-test/+edit-subscription.
#
# [end of mail]
class BzrLaunchpadEmailMaildirSource(MaildirSource):
name = "Launchpad"
compare_attrs: ClassVar[Sequence[str]] = ("branchMap", "defaultBranch")
def __init__(self, maildir, prefix=None, branchMap=None, defaultBranch=None, **kwargs):
self.branchMap = branchMap
self.defaultBranch = defaultBranch
super().__init__(maildir, prefix, **kwargs)
def parse(self, m, prefix=None):
"""Parse branch notification messages sent by Launchpad."""
subject = m["subject"]
match = re.search(r"^\s*\[Branch\s+([^]]+)\]", subject)
if match:
repository = match.group(1)
else:
repository = None
# Put these into a dictionary, otherwise we cannot assign them
# from nested function definitions.
d = {'files': [], 'comments': ""}
gobbler = None
rev = None
author = None
when = util.now()
def gobble_comment(s):
d['comments'] += s + "\n"
def gobble_removed(s):
d['files'].append(f'{s} REMOVED')
def gobble_added(s):
d['files'].append(f'{s} ADDED')
def gobble_modified(s):
d['files'].append(f'{s} MODIFIED')
def gobble_renamed(s):
match = re.search(r"^(.+) => (.+)$", s)
if match:
d['files'].append(f'{match.group(1)} RENAMED {match.group(2)}')
else:
d['files'].append(f'{s} RENAMED')
lines = list(body_line_iterator(m, True))
rev = None
while lines:
line = str(lines.pop(0), "utf-8", errors="ignore")
# revno: 101
match = re.search(r"^revno: ([0-9.]+)", line)
if match:
rev = match.group(1)
# committer: Joe <[email protected]>
match = re.search(r"^committer: (.*)$", line)
if match:
author = match.group(1)
# timestamp: Fri 2009-05-15 10:35:43 +0200
# datetime.strptime() is supposed to support %z for time zone, but
# it does not seem to work. So handle the time zone manually.
match = re.search(
r"^timestamp: [a-zA-Z]{3} (\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}) ([-+])(\d{2})(\d{2})$",
line,
)
if match:
datestr = match.group(1)
tz_sign = match.group(2)
tz_hours = match.group(3)
tz_minutes = match.group(4)
when = parseLaunchpadDate(datestr, tz_sign, tz_hours, tz_minutes)
if re.search(r"^message:\s*$", line):
gobbler = gobble_comment
elif re.search(r"^removed:\s*$", line):
gobbler = gobble_removed
elif re.search(r"^added:\s*$", line):
gobbler = gobble_added
elif re.search(r"^renamed:\s*$", line):
gobbler = gobble_renamed
elif re.search(r"^modified:\s*$", line):
gobbler = gobble_modified
elif re.search(r"^ ", line) and gobbler:
gobbler(line[2:-1]) # Use :-1 to gobble trailing newline
# Determine the name of the branch.
branch = None
if self.branchMap and repository:
if repository in self.branchMap:
branch = self.branchMap[repository]
elif "lp:" + repository in self.branchMap:
branch = self.branchMap['lp:' + repository]
if not branch:
if self.defaultBranch:
branch = self.defaultBranch
else:
if repository:
branch = 'lp:' + repository
else:
branch = None
if rev and author:
return (
'bzr',
{
"author": author,
"committer": None,
"files": d['files'],
"comments": d['comments'],
"when": when,
"revision": rev,
"branch": branch,
"repository": repository or '',
},
)
return None
def parseLaunchpadDate(datestr, tz_sign, tz_hours, tz_minutes):
time_no_tz = calendar.timegm(time.strptime(datestr, "%Y-%m-%d %H:%M:%S"))
tz_delta = 60 * 60 * int(tz_sign + tz_hours) + 60 * int(tz_minutes)
return time_no_tz - tz_delta
| 19,740 | Python | .py | 496 | 29.344758 | 103 | 0.547744 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,741 | bitbucket.py | buildbot_buildbot/master/buildbot/changes/bitbucket.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import time
from datetime import datetime
from typing import ClassVar
from typing import Sequence
from twisted.internet import defer
from twisted.python import log
from unidiff import PatchSet
from buildbot.changes import base
from buildbot.util import bytes2unicode
from buildbot.util import datetime2epoch
from buildbot.util import deferredLocked
from buildbot.util import epoch2datetime
from buildbot.util import httpclientservice
from buildbot.util.pullrequest import PullRequestMixin
class BitbucketPullrequestPoller(base.ReconfigurablePollingChangeSource, PullRequestMixin):
compare_attrs: ClassVar[Sequence[str]] = (
"owner",
"slug",
"branch",
"pollInterval",
"useTimestamps",
"category",
"project",
"pollAtLaunch",
)
db_class_name = 'BitbucketPullrequestPoller'
property_basename = "bitbucket"
def __init__(self, owner, slug, **kwargs):
kwargs['name'] = self.build_name(owner, slug)
self.initLock = defer.DeferredLock()
super().__init__(owner, slug, **kwargs)
def checkConfig(
self,
owner,
slug,
branch=None,
pollInterval=10 * 60,
useTimestamps=True,
category=None,
project='',
pullrequest_filter=True,
pollAtLaunch=False,
auth=None,
bitbucket_property_whitelist=None,
):
super().checkConfig(
name=self.build_name(owner, slug), pollInterval=pollInterval, pollAtLaunch=pollAtLaunch
)
@defer.inlineCallbacks
def reconfigService(
self,
owner,
slug,
branch=None,
pollInterval=10 * 60,
useTimestamps=True,
category=None,
project='',
pullrequest_filter=True,
pollAtLaunch=False,
auth=None,
bitbucket_property_whitelist=None,
):
self.owner = owner
self.slug = slug
self.branch = branch
if bitbucket_property_whitelist is None:
bitbucket_property_whitelist = []
if callable(pullrequest_filter):
self.pullrequest_filter = pullrequest_filter
else:
self.pullrequest_filter = lambda _: pullrequest_filter
self.lastChange = time.time()
self.lastPoll = time.time()
self.useTimestamps = useTimestamps
self.category = category if callable(category) else bytes2unicode(category)
self.project = bytes2unicode(project)
self.external_property_whitelist = bitbucket_property_whitelist
base_url = "https://api.bitbucket.org/2.0"
self._http = yield httpclientservice.HTTPSession(
self.master.httpservice, base_url, auth=auth
)
yield super().reconfigService(
self.build_name(owner, slug), pollInterval=pollInterval, pollAtLaunch=pollAtLaunch
)
def build_name(self, owner, slug):
return '/'.join([owner, slug])
def describe(self):
return (
"BitbucketPullrequestPoller watching the "
f"Bitbucket repository {self.owner}/{self.slug}, branch: {self.branch}"
)
@deferredLocked('initLock')
@defer.inlineCallbacks
def poll(self):
response = yield self._getChanges()
if response.code != 200:
log.err(
f"{self.__class__.__name__}: error {response.code} while loading {response.url}"
)
return
json_result = yield response.json()
yield self._processChanges(json_result)
def _getChanges(self):
self.lastPoll = time.time()
log.msg(
"BitbucketPullrequestPoller: polling "
f"Bitbucket repository {self.owner}/{self.slug}, branch: {self.branch}"
)
url = f"/repositories/{self.owner}/{self.slug}/pullrequests"
return self._http.get(url, timeout=self.pollInterval)
@defer.inlineCallbacks
def _processChanges(self, result):
for pr in result['values']:
branch = pr['source']['branch']['name']
nr = int(pr['id'])
# Note that this is a short hash. The full length hash can be accessed via the
# commit api resource but we want to avoid requesting multiple pages as long as
# we are not sure that the pull request is new or updated.
revision = pr['source']['commit']['hash']
# check branch
if not self.branch or branch in self.branch:
current = yield self._getCurrentRev(nr)
# compare _short_ hashes to check if the PR has been updated
if not current or current[0:12] != revision[0:12]:
# parse pull request api page (required for the filter)
response = yield self._http.get(str(pr['links']['self']['href']))
pr_json = yield response.json()
# filter pull requests by user function
if not self.pullrequest_filter(pr_json):
log.msg('pull request does not match filter')
continue
# access additional information
author = pr['author']['display_name']
prlink = pr['links']['html']['href']
# Get time updated time. Note that the timezone offset is
# ignored.
if self.useTimestamps:
updated = datetime.strptime(
pr['updated_on'].split('.')[0], '%Y-%m-%dT%H:%M:%S'
)
else:
updated = epoch2datetime(self.master.reactor.seconds())
title = pr['title']
# parse commit api page
response = yield self._http.get(
str(pr['source']['commit']['links']['self']['href'])
)
commit_json = yield response.json()
# use the full-length hash from now on
revision = commit_json['hash']
revlink = commit_json['links']['html']['href']
# Retrieve the list of added/modified files in the commit
response = yield self._http.get(str(commit_json['links']['diff']['href']))
content = yield response.content()
patchset = PatchSet(content.decode())
files = [
file.path
for file in patchset
if file.is_added_file or file.is_modified_file
]
# parse repo api page
response = yield self._http.get(
str(pr['source']['repository']['links']['self']['href'])
)
repo_json = yield response.json()
repo = repo_json['links']['html']['href']
# update database
yield self._setCurrentRev(nr, revision)
# emit the change
yield self.master.data.updates.addChange(
author=bytes2unicode(author),
committer=None,
revision=bytes2unicode(revision),
revlink=bytes2unicode(revlink),
comments=f'pull-request #{nr}: {title}\n{prlink}',
when_timestamp=datetime2epoch(updated),
branch=bytes2unicode(branch),
category=self.category,
project=self.project,
repository=bytes2unicode(repo),
properties={
'pullrequesturl': prlink,
**self.extractProperties(pr),
},
src='bitbucket',
files=files,
)
def _getCurrentRev(self, pr_id):
# Return a deferred datetime object for the given pull request number
# or None.
d = self._getStateObjectId()
@d.addCallback
def oid_callback(oid):
current = self.master.db.state.getState(oid, f'pull_request{pr_id}', None)
@current.addCallback
def result_callback(result):
return result
return current
return d
def _setCurrentRev(self, pr_id, rev):
# Set the datetime entry for a specified pull request.
d = self._getStateObjectId()
@d.addCallback
def oid_callback(oid):
return self.master.db.state.setState(oid, f'pull_request{pr_id}', rev)
return d
def _getStateObjectId(self):
# Return a deferred for object id in state db.
return self.master.db.state.getObjectId(
f'{self.owner}/{self.slug}#{self.branch}', self.db_class_name
)
| 9,714 | Python | .py | 226 | 30.663717 | 99 | 0.576223 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,742 | secret.py | buildbot_buildbot/master/buildbot/secrets/secret.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
class SecretDetails:
"""
A SecretDetails object has secrets attributes:
- source: provider where the secret was retrieved
- key: secret key identifier
- value: secret value
"""
def __init__(self, source, key, value):
self._source = source
self._value = value
self._key = key
@property
def source(self):
"""
source of the secret
"""
return self._source
@property
def value(self):
"""
secret value
"""
return self._value
@property
def key(self):
"""
secret value
"""
return self._key
def __str__(self):
return f'{self._source} {self._key}: {self.value!r}'
def __eq__(self, other):
return self._source == other._source and self.key == other.key and self.value == other.value
| 1,581 | Python | .py | 47 | 28.297872 | 100 | 0.663827 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,743 | manager.py | buildbot_buildbot/master/buildbot/secrets/manager.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
"""
manage providers and handle secrets
"""
from __future__ import annotations
from twisted.internet import defer
from buildbot.secrets.providers.base import SecretProviderBase
from buildbot.secrets.secret import SecretDetails
from buildbot.util import service
class SecretManager(service.BuildbotServiceManager):
"""
Secret manager
"""
name: str | None = 'secrets' # type: ignore[assignment]
config_attr = "secretsProviders"
@defer.inlineCallbacks
def setup(self):
configuredProviders = self.get_service_config(self.master.config)
for child in configuredProviders.values():
assert isinstance(child, SecretProviderBase)
yield child.setServiceParent(self)
yield child.configureService()
@defer.inlineCallbacks
def get(self, secret, *args, **kwargs):
"""
get secrets from the provider defined in the secret using args and
kwargs
@secrets: secrets keys
@type: string
@return type: SecretDetails
"""
for provider in self.services:
value = yield provider.get(secret)
source_name = provider.__class__.__name__
if value is not None:
return SecretDetails(source_name, secret, value)
return None
| 2,014 | Python | .py | 50 | 34.86 | 79 | 0.719182 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,744 | __init__.py | buildbot_buildbot/master/buildbot/secrets/__init__.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
| 705 | Python | .py | 14 | 49.357143 | 79 | 0.788712 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,745 | file.py | buildbot_buildbot/master/buildbot/secrets/providers/file.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
"""
file based provider
"""
from __future__ import annotations
import os
import stat
from buildbot import config
from buildbot.secrets.providers.base import SecretProviderBase
class SecretInAFile(SecretProviderBase):
"""
secret is stored in a separate file under the given directory name
"""
name: str | None = "SecretInAFile" # type: ignore[assignment]
def checkFileIsReadOnly(self, dirname, secretfile):
filepath = os.path.join(dirname, secretfile)
obs_stat = stat.S_IMODE(os.stat(filepath).st_mode)
if (obs_stat & 0o7) != 0 and os.name == "posix":
config.error(
f"Permissions {oct(obs_stat)} on file {secretfile} are too open."
" It is required that your secret files are NOT"
" accessible by others!"
)
def checkSecretDirectoryIsAvailableAndReadable(self, dirname, suffixes):
if not os.access(dirname, os.F_OK):
config.error(f"directory {dirname} does not exists")
for secretfile in os.listdir(dirname):
for suffix in suffixes:
if secretfile.endswith(suffix):
self.checkFileIsReadOnly(dirname, secretfile)
def loadSecrets(self, dirname, suffixes, strip):
secrets = {}
for secretfile in os.listdir(dirname):
secretvalue = None
for suffix in suffixes:
if secretfile.endswith(suffix):
with open(os.path.join(dirname, secretfile), encoding='utf-8') as source:
secretvalue = source.read()
if suffix:
secretfile = secretfile[: -len(suffix)]
if strip:
secretvalue = secretvalue.rstrip("\r\n")
secrets[secretfile] = secretvalue
return secrets
def checkConfig(self, dirname, suffixes=None, strip=True):
self._dirname = dirname
if suffixes is None:
suffixes = [""]
self.checkSecretDirectoryIsAvailableAndReadable(dirname, suffixes=suffixes)
def reconfigService(self, dirname, suffixes=None, strip=True):
self._dirname = dirname
self.secrets = {}
if suffixes is None:
suffixes = [""]
self.secrets = self.loadSecrets(self._dirname, suffixes=suffixes, strip=strip)
def get(self, entry):
"""
get the value from the file identified by 'entry'
"""
return self.secrets.get(entry)
| 3,225 | Python | .py | 73 | 35.616438 | 93 | 0.653822 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,746 | __init__.py | buildbot_buildbot/master/buildbot/secrets/providers/__init__.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
| 705 | Python | .py | 14 | 49.357143 | 79 | 0.788712 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,747 | base.py | buildbot_buildbot/master/buildbot/secrets/providers/base.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
"""
secret provider interface
"""
import abc
from buildbot.util.service import BuildbotService
class SecretProviderBase(BuildbotService):
"""
Secret provider base
"""
@abc.abstractmethod
def get(self, *args, **kwargs):
"""
this should be an abstract method
"""
| 1,015 | Python | .py | 28 | 33.5 | 79 | 0.755601 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,748 | vault_hvac.py | buildbot_buildbot/master/buildbot/secrets/providers/vault_hvac.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
"""
HVAC based providers
"""
from __future__ import annotations
import importlib.metadata
from packaging.version import parse as parse_version
from twisted.internet import defer
from twisted.internet import threads
from buildbot import config
from buildbot.secrets.providers.base import SecretProviderBase
class VaultAuthenticator:
"""
base HVAC authenticator class
"""
def authenticate(self, client):
pass
class VaultAuthenticatorToken(VaultAuthenticator):
"""
HVAC authenticator for static token
"""
def __init__(self, token):
self.token = token
def authenticate(self, client):
client.token = self.token
class VaultAuthenticatorApprole(VaultAuthenticator):
"""
HVAC authenticator for Approle login method
"""
def __init__(self, roleId, secretId):
self.roleId = roleId
self.secretId = secretId
def authenticate(self, client):
client.auth.approle.login(role_id=self.roleId, secret_id=self.secretId)
class HashiCorpVaultKvSecretProvider(SecretProviderBase):
"""
Basic provider where each secret is stored in Vault KV secret engine.
In case more secret engines are going to be supported, each engine should have it's own class.
"""
name: str | None = 'SecretInVaultKv' # type: ignore[assignment]
def checkConfig(
self,
vault_server=None,
authenticator=None,
secrets_mount=None,
api_version=2,
path_delimiter='|',
path_escape='\\',
):
try:
import hvac
_ = hvac
except ImportError: # pragma: no cover
config.error(
f"{self.__class__.__name__} needs the hvac package installed "
+ "(pip install hvac)"
)
if not isinstance(vault_server, str):
config.error(f"vault_server must be a string while it is {type(vault_server)}")
if not isinstance(path_delimiter, str) or len(path_delimiter) > 1:
config.error("path_delimiter must be a single character")
if not isinstance(path_escape, str) or len(path_escape) > 1:
config.error("path_escape must be a single character")
if not isinstance(authenticator, VaultAuthenticator):
config.error(
"authenticator must be instance of VaultAuthenticator while it is "
f"{type(authenticator)}"
)
if api_version not in [1, 2]:
config.error(f"api_version {api_version} is not supported")
def reconfigService(
self,
vault_server=None,
authenticator=None,
secrets_mount=None,
api_version=2,
path_delimiter='|',
path_escape='\\',
):
try:
import hvac
except ImportError: # pragma: no cover
config.error(
f"{self.__class__.__name__} needs the hvac package installed "
+ "(pip install hvac)"
)
if secrets_mount is None:
secrets_mount = "secret"
self.secrets_mount = secrets_mount
self.path_delimiter = path_delimiter
self.path_escape = path_escape
self.authenticator = authenticator
self.api_version = api_version
if vault_server.endswith('/'): # pragma: no cover
vault_server = vault_server[:-1]
self.client = hvac.Client(vault_server)
self.version = parse_version(importlib.metadata.version('hvac'))
self.client.secrets.kv.default_kv_version = api_version
return self
def escaped_split(self, s):
"""
parse and split string, respecting escape characters
"""
ret = []
current = []
itr = iter(s)
for ch in itr:
if ch == self.path_escape:
try:
# skip the next character; it has been escaped and remove
# escape character
current.append(next(itr))
except StopIteration:
# escape character on end of the string is safest to ignore, as buildbot for
# each secret identifier tries all secret providers until value is found,
# meaning we may end up parsing identifiers for other secret providers, where
# our escape character may be valid on end of string
pass
elif ch == self.path_delimiter:
# split! (add current to the list and reset it)
ret.append(''.join(current))
current = []
else:
current.append(ch)
ret.append(''.join(current))
return ret
def thd_hvac_wrap_read(self, path):
if self.api_version == 1:
return self.client.secrets.kv.v1.read_secret(path=path, mount_point=self.secrets_mount)
else:
if self.version >= parse_version("1.1.1"):
return self.client.secrets.kv.v2.read_secret_version(
path=path, mount_point=self.secrets_mount, raise_on_deleted_version=True
)
return self.client.secrets.kv.v2.read_secret_version(
path=path, mount_point=self.secrets_mount
)
def thd_hvac_get(self, path):
"""
query secret from Vault and re-authenticate if not authenticated
"""
if not self.client.is_authenticated():
self.authenticator.authenticate(self.client)
response = self.thd_hvac_wrap_read(path=path)
return response
@defer.inlineCallbacks
def get(self, entry):
"""
get the value from vault secret backend
"""
parts = self.escaped_split(entry)
if len(parts) == 1:
raise KeyError(
"Vault secret specification must contain attribute name separated from "
f"path by '{self.path_delimiter}'"
)
if len(parts) > 2:
raise KeyError(
f"Multiple separators ('{self.path_delimiter}') found in vault "
f"path '{entry}'. All occurrences of '{self.path_delimiter}' in path or "
f"attribute name must be escaped using '{self.path_escape}'"
)
name = parts[0]
key = parts[1]
response = yield threads.deferToThread(self.thd_hvac_get, path=name)
# in KVv2 we have extra "data" dictionary, as vault provides metadata as well
if self.api_version == 2:
response = response['data']
try:
return response['data'][key]
except KeyError as e:
raise KeyError(f"The secret {entry} does not exist in Vault provider: {e}") from e
| 7,491 | Python | .py | 185 | 31.016216 | 99 | 0.618157 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,749 | passwordstore.py | buildbot_buildbot/master/buildbot/secrets/providers/passwordstore.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
"""
password store based provider
"""
from __future__ import annotations
import os
from pathlib import Path
from twisted.internet import defer
from buildbot import config
from buildbot.secrets.providers.base import SecretProviderBase
from buildbot.util import runprocess
class SecretInPass(SecretProviderBase):
"""
secret is stored in a password store
"""
name: str | None = "SecretInPass" # type: ignore[assignment]
def checkPassIsInPath(self):
if not any((Path(p) / "pass").is_file() for p in os.environ["PATH"].split(":")):
config.error("pass does not exist in PATH")
def checkPassDirectoryIsAvailableAndReadable(self, dirname):
if not os.access(dirname, os.F_OK):
config.error(f"directory {dirname} does not exist")
def checkConfig(self, gpgPassphrase=None, dirname=None):
self.checkPassIsInPath()
if dirname:
self.checkPassDirectoryIsAvailableAndReadable(dirname)
def reconfigService(self, gpgPassphrase=None, dirname=None):
self._env = {**os.environ}
if gpgPassphrase:
self._env["PASSWORD_STORE_GPG_OPTS"] = f"--passphrase {gpgPassphrase}"
if dirname:
self._env["PASSWORD_STORE_DIR"] = dirname
@defer.inlineCallbacks
def get(self, entry):
"""
get the value from pass identified by 'entry'
"""
try:
rc, output = yield runprocess.run_process(
self.master.reactor,
['pass', entry],
env=self._env,
collect_stderr=False,
stderr_is_error=True,
)
if rc != 0:
return None
return output.decode("utf-8", "ignore").splitlines()[0]
except OSError:
return None
| 2,524 | Python | .py | 63 | 33.285714 | 88 | 0.674969 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,750 | users.py | buildbot_buildbot/master/buildbot/db/users.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import annotations
import dataclasses
from typing import TYPE_CHECKING
import sqlalchemy as sa
from twisted.python import deprecate
from twisted.python import versions
from buildbot.db import base
from buildbot.util import identifiers
from buildbot.warnings import warn_deprecated
if TYPE_CHECKING:
from twisted.internet import defer
@dataclasses.dataclass
class UserModel:
uid: int
identifier: str
bb_username: str | None = None
bb_password: str | None = None
attributes: dict[str, str] | None = None
# For backward compatibility
def __getitem__(self, key: str):
warn_deprecated(
'4.1.0',
(
'UsersConnectorComponent '
'getUser, getUserByUsername, and getUsers '
'no longer return User as dictionnaries. '
'Usage of [] accessor is deprecated: please access the member directly'
),
)
if hasattr(self, key):
return getattr(self, key)
if self.attributes is not None and key in self.attributes:
return self.attributes[key]
raise KeyError(key)
@deprecate.deprecated(versions.Version("buildbot", 4, 1, 0), UserModel)
class UsDict(UserModel):
pass
class UsersConnectorComponent(base.DBConnectorComponent):
def findUserByAttr(
self, identifier: str, attr_type: str, attr_data: str, _race_hook=None
) -> defer.Deferred[int]:
# note that since this involves two tables, self.findSomethingId is not
# helpful
def thd(conn, no_recurse=False, identifier=identifier) -> int:
tbl = self.db.model.users
tbl_info = self.db.model.users_info
self.checkLength(tbl.c.identifier, identifier)
self.checkLength(tbl_info.c.attr_type, attr_type)
self.checkLength(tbl_info.c.attr_data, attr_data)
# try to find the user
q = sa.select(
tbl_info.c.uid,
).where(tbl_info.c.attr_type == attr_type, tbl_info.c.attr_data == attr_data)
rows = conn.execute(q).fetchall()
if rows:
return rows[0].uid
if _race_hook is not None:
_race_hook(conn)
# try to do both of these inserts in a transaction, so that both
# the new user and the corresponding attributes appear at the same
# time from the perspective of other masters.
transaction = conn.begin_nested()
inserted_user = False
try:
r = conn.execute(tbl.insert(), {"identifier": identifier})
uid = r.inserted_primary_key[0]
inserted_user = True
conn.execute(
tbl_info.insert(), {"uid": uid, "attr_type": attr_type, "attr_data": attr_data}
)
transaction.commit()
except (sa.exc.IntegrityError, sa.exc.ProgrammingError):
transaction.rollback()
# try it all over again, in case there was an overlapping,
# identical call to findUserByAttr. If the identifier
# collided, we'll try again indefinitely; otherwise, only once.
if no_recurse:
raise
# if we failed to insert the user, then it's because the
# identifier wasn't unique
if not inserted_user:
identifier = identifiers.incrementIdentifier(256, identifier)
else:
no_recurse = True
return thd(conn, no_recurse=no_recurse, identifier=identifier)
conn.commit()
return uid
return self.db.pool.do(thd)
@base.cached("usdicts")
def getUser(self, uid: int) -> defer.Deferred[UserModel | None]:
def thd(conn) -> UserModel | None:
tbl = self.db.model.users
tbl_info = self.db.model.users_info
q = tbl.select().where(tbl.c.uid == uid)
users_row = conn.execute(q).fetchone()
if not users_row:
return None
# gather all attr_type and attr_data entries from users_info table
q = tbl_info.select().where(tbl_info.c.uid == uid)
rows = conn.execute(q).fetchall()
return self._model_from_row(users_row, rows)
return self.db.pool.do(thd)
def _model_from_row(self, users_row, attribute_rows=None):
attributes = None
if attribute_rows is not None:
attributes = {row.attr_type: row.attr_data for row in attribute_rows}
return UserModel(
uid=users_row.uid,
identifier=users_row.identifier,
bb_username=users_row.bb_username,
bb_password=users_row.bb_password,
attributes=attributes,
)
# returns a Deferred that returns a value
def getUserByUsername(self, username: str | None) -> defer.Deferred[UserModel | None]:
def thd(conn) -> UserModel | None:
tbl = self.db.model.users
tbl_info = self.db.model.users_info
q = tbl.select().where(tbl.c.bb_username == username)
users_row = conn.execute(q).fetchone()
if not users_row:
return None
# gather all attr_type and attr_data entries from users_info table
q = tbl_info.select().where(tbl_info.c.uid == users_row.uid)
rows = conn.execute(q).fetchall()
return self._model_from_row(users_row, rows)
return self.db.pool.do(thd)
def getUsers(self) -> defer.Deferred[list[UserModel]]:
def thd(conn) -> list[UserModel]:
tbl = self.db.model.users
rows = conn.execute(tbl.select()).fetchall()
return [self._model_from_row(row, attribute_rows=None) for row in rows]
return self.db.pool.do(thd)
# returns a Deferred that returns None
def updateUser(
self,
uid: int | None = None,
identifier: str | None = None,
bb_username: str | None = None,
bb_password: str | None = None,
attr_type: str | None = None,
attr_data: str | None = None,
_race_hook=None,
):
def thd(conn):
tbl = self.db.model.users
tbl_info = self.db.model.users_info
update_dict = {}
# first, add the identifier is it exists
if identifier is not None:
self.checkLength(tbl.c.identifier, identifier)
update_dict['identifier'] = identifier
# then, add the creds if they exist
if bb_username is not None:
assert bb_password is not None
self.checkLength(tbl.c.bb_username, bb_username)
self.checkLength(tbl.c.bb_password, bb_password)
update_dict['bb_username'] = bb_username
update_dict['bb_password'] = bb_password
# update the users table if it needs to be updated
if update_dict:
q = tbl.update().where(tbl.c.uid == uid)
conn.execute(q, update_dict)
# then, update the attributes, carefully handling the potential
# update-or-insert race condition.
if attr_type is not None:
assert attr_data is not None
self.checkLength(tbl_info.c.attr_type, attr_type)
self.checkLength(tbl_info.c.attr_data, attr_data)
try:
self.db.upsert(
conn,
tbl_info,
where_values=(
(tbl_info.c.uid, uid),
(tbl_info.c.attr_type, attr_type),
),
update_values=((tbl_info.c.attr_data, attr_data),),
_race_hook=_race_hook,
)
conn.commit()
except (sa.exc.IntegrityError, sa.exc.ProgrammingError):
# someone else beat us to the punch inserting this row;
# let them win.
conn.rollback()
return self.db.pool.do_with_transaction(thd)
# returns a Deferred that returns None
def removeUser(self, uid):
def thd(conn):
# delete from dependent tables first, followed by 'users'
for tbl in [
self.db.model.change_users,
self.db.model.users_info,
self.db.model.users,
]:
conn.execute(tbl.delete().where(tbl.c.uid == uid))
return self.db.pool.do_with_transaction(thd)
# returns a Deferred that returns a value
def identifierToUid(self, identifier) -> defer.Deferred[int | None]:
def thd(conn) -> int | None:
tbl = self.db.model.users
q = tbl.select().where(tbl.c.identifier == identifier)
row = conn.execute(q).fetchone()
if not row:
return None
return row.uid
return self.db.pool.do(thd)
| 9,900 | Python | .py | 221 | 32.968326 | 99 | 0.586053 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,751 | state.py | buildbot_buildbot/master/buildbot/db/state.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import json
import sqlalchemy as sa
import sqlalchemy.exc
from buildbot.db import base
class _IdNotFoundError(Exception):
pass # used internally
class ObjDict(dict):
pass
class StateConnectorComponent(base.DBConnectorComponent):
def getObjectId(self, name, class_name):
# defer to a cached method that only takes one parameter (a tuple)
d = self._getObjectId((name, class_name))
d.addCallback(lambda objdict: objdict['id'])
return d
# returns a Deferred that returns a value
@base.cached('objectids')
def _getObjectId(self, name_class_name_tuple):
name, class_name = name_class_name_tuple
def thd(conn):
return self.thdGetObjectId(conn, name, class_name)
return self.db.pool.do(thd)
def thdGetObjectId(self, conn, name, class_name):
objects_tbl = self.db.model.objects
name = self.ensureLength(objects_tbl.c.name, name)
self.checkLength(objects_tbl.c.class_name, class_name)
def select():
q = sa.select(objects_tbl.c.id).where(
objects_tbl.c.name == name,
objects_tbl.c.class_name == class_name,
)
res = conn.execute(q)
row = res.fetchone()
res.close()
if not row:
raise _IdNotFoundError
return row.id
def insert():
res = conn.execute(objects_tbl.insert().values(name=name, class_name=class_name))
conn.commit()
return res.inserted_primary_key[0]
# we want to try selecting, then inserting, but if the insert fails
# then try selecting again. We include an invocation of a hook
# method to allow tests to exercise this particular behavior
try:
return ObjDict(id=select())
except _IdNotFoundError:
pass
self._test_timing_hook(conn)
try:
return ObjDict(id=insert())
except (sqlalchemy.exc.IntegrityError, sqlalchemy.exc.ProgrammingError):
conn.rollback()
return ObjDict(id=select())
class Thunk:
pass
# returns a Deferred that returns a value
def getState(self, objectid, name, default=Thunk):
def thd(conn):
return self.thdGetState(conn, objectid, name, default=default)
return self.db.pool.do(thd)
def thdGetState(self, conn, objectid, name, default=Thunk):
object_state_tbl = self.db.model.object_state
q = sa.select(
object_state_tbl.c.value_json,
).where(
object_state_tbl.c.objectid == objectid,
object_state_tbl.c.name == name,
)
res = conn.execute(q)
row = res.fetchone()
res.close()
if not row:
if default is self.Thunk:
raise KeyError(f"no such state value '{name}' for object {objectid}")
return default
try:
return json.loads(row.value_json)
except ValueError as e:
raise TypeError(f"JSON error loading state value '{name}' for {objectid}") from e
# returns a Deferred that returns a value
def setState(self, objectid, name, value):
def thd(conn):
return self.thdSetState(conn, objectid, name, value)
return self.db.pool.do(thd)
def thdSetState(self, conn, objectid, name, value):
object_state_tbl = self.db.model.object_state
try:
value_json = json.dumps(value)
except (TypeError, ValueError) as e:
raise TypeError(f"Error encoding JSON for {value!r}") from e
name = self.ensureLength(object_state_tbl.c.name, name)
def update():
q = object_state_tbl.update().where(
object_state_tbl.c.objectid == objectid, object_state_tbl.c.name == name
)
res = conn.execute(q.values(value_json=value_json))
conn.commit()
# check whether that worked
return res.rowcount > 0
def insert():
conn.execute(
object_state_tbl.insert().values(
objectid=objectid, name=name, value_json=value_json
)
)
conn.commit()
# try updating; if that fails, try inserting; if that fails, then
# we raced with another instance to insert, so let that instance
# win.
if update():
return
self._test_timing_hook(conn)
try:
insert()
except (sqlalchemy.exc.IntegrityError, sqlalchemy.exc.ProgrammingError):
conn.rollback() # someone beat us to it - oh well
def _test_timing_hook(self, conn):
# called so tests can simulate another process inserting a database row
# at an inopportune moment
pass
# returns a Deferred that returns a value
def atomicCreateState(self, objectid, name, thd_create_callback):
def thd(conn):
object_state_tbl = self.db.model.object_state
res = self.thdGetState(conn, objectid, name, default=None)
if res is None:
res = thd_create_callback()
try:
value_json = json.dumps(res)
except (TypeError, ValueError) as e:
raise TypeError(f"Error encoding JSON for {res!r}") from e
self._test_timing_hook(conn)
try:
conn.execute(
object_state_tbl.insert().values(
objectid=objectid,
name=name,
value_json=value_json,
)
)
conn.commit()
except (sqlalchemy.exc.IntegrityError, sqlalchemy.exc.ProgrammingError):
conn.rollback()
# someone beat us to it - oh well return that value
return self.thdGetState(conn, objectid, name)
return res
return self.db.pool.do(thd)
| 6,821 | Python | .py | 161 | 31.639752 | 93 | 0.604866 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,752 | buildrequests.py | buildbot_buildbot/master/buildbot/db/buildrequests.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import annotations
import itertools
from dataclasses import dataclass
from typing import TYPE_CHECKING
import sqlalchemy as sa
from twisted.internet import defer
from twisted.python import deprecate
from twisted.python import log
from twisted.python import versions
from buildbot.db import NULL
from buildbot.db import base
from buildbot.process.results import RETRY
from buildbot.util import datetime2epoch
from buildbot.util import epoch2datetime
from buildbot.warnings import warn_deprecated
if TYPE_CHECKING:
import datetime
class AlreadyClaimedError(Exception):
pass
class NotClaimedError(Exception):
pass
@dataclass
class BuildRequestModel:
buildrequestid: int
buildsetid: int
builderid: int
buildername: str
submitted_at: datetime.datetime
complete_at: datetime.datetime | None = None
complete: bool = False
results: int | None = None
waited_for: bool = False
priority: int = 0
claimed_at: datetime.datetime | None = None
claimed_by_masterid: int | None = None
@property
def claimed(self) -> bool:
return self.claimed_at is not None
# For backward compatibility from when SsDict inherited from Dict
def __getitem__(self, key: str):
warn_deprecated(
'4.1.0',
(
'BuildRequestsConnectorComponent '
'getBuildRequest, and getBuildRequests '
'no longer return BuildRequest as dictionnaries. '
'Usage of [] accessor is deprecated: please access the member directly'
),
)
if hasattr(self, key):
return getattr(self, key)
raise KeyError(key)
@deprecate.deprecated(versions.Version("buildbot", 4, 1, 0), BuildRequestModel)
class BrDict(BuildRequestModel):
pass
class BuildRequestsConnectorComponent(base.DBConnectorComponent):
def _simple_sa_select_query(self):
reqs_tbl = self.db.model.buildrequests
claims_tbl = self.db.model.buildrequest_claims
builder_tbl = self.db.model.builders
from_clause = reqs_tbl.outerjoin(claims_tbl, reqs_tbl.c.id == claims_tbl.c.brid)
from_clause = from_clause.join(builder_tbl, reqs_tbl.c.builderid == builder_tbl.c.id)
return sa.select(
reqs_tbl,
claims_tbl,
builder_tbl.c.name.label('buildername'),
).select_from(from_clause)
def _saSelectQuery(self):
reqs_tbl = self.db.model.buildrequests
claims_tbl = self.db.model.buildrequest_claims
bsets_tbl = self.db.model.buildsets
builder_tbl = self.db.model.builders
bsss_tbl = self.db.model.buildset_sourcestamps
sstamps_tbl = self.db.model.sourcestamps
from_clause = reqs_tbl.outerjoin(claims_tbl, reqs_tbl.c.id == claims_tbl.c.brid)
from_clause = from_clause.join(bsets_tbl, reqs_tbl.c.buildsetid == bsets_tbl.c.id)
from_clause = from_clause.join(bsss_tbl, bsets_tbl.c.id == bsss_tbl.c.buildsetid)
from_clause = from_clause.join(sstamps_tbl, bsss_tbl.c.sourcestampid == sstamps_tbl.c.id)
from_clause = from_clause.join(builder_tbl, reqs_tbl.c.builderid == builder_tbl.c.id)
return sa.select(
reqs_tbl,
claims_tbl,
sstamps_tbl.c.branch,
sstamps_tbl.c.repository,
sstamps_tbl.c.codebase,
builder_tbl.c.name.label('buildername'),
).select_from(from_clause)
def getBuildRequest(self, brid) -> defer.Deferred[BuildRequestModel | None]:
def thd(conn) -> BuildRequestModel | None:
reqs_tbl = self.db.model.buildrequests
q = self._simple_sa_select_query()
q = q.where(reqs_tbl.c.id == brid)
res = conn.execute(q)
row = res.fetchone()
rv = None
if row:
rv = self._modelFromRow(row)
res.close()
return rv
return self.db.pool.do(thd)
@defer.inlineCallbacks
def getBuildRequests(
self,
builderid=None,
complete=None,
claimed=None,
bsid=None,
branch=None,
repository=None,
resultSpec=None,
):
def deduplicateBrdict(brdicts: list[BuildRequestModel]) -> list[BuildRequestModel]:
return list(({b.buildrequestid: b for b in brdicts}).values())
def thd(conn) -> list[BuildRequestModel]:
reqs_tbl = self.db.model.buildrequests
claims_tbl = self.db.model.buildrequest_claims
sstamps_tbl = self.db.model.sourcestamps
q = self._saSelectQuery()
if claimed is not None:
if isinstance(claimed, bool):
if not claimed:
q = q.where((claims_tbl.c.claimed_at == NULL) & (reqs_tbl.c.complete == 0))
else:
q = q.where(claims_tbl.c.claimed_at != NULL)
else:
q = q.where(claims_tbl.c.masterid == claimed)
if builderid is not None:
q = q.where(reqs_tbl.c.builderid == builderid)
if complete is not None:
if complete:
q = q.where(reqs_tbl.c.complete != 0)
else:
q = q.where(reqs_tbl.c.complete == 0)
if bsid is not None:
q = q.where(reqs_tbl.c.buildsetid == bsid)
if branch is not None:
q = q.where(sstamps_tbl.c.branch == branch)
if repository is not None:
q = q.where(sstamps_tbl.c.repository == repository)
if resultSpec is not None:
return deduplicateBrdict(resultSpec.thd_execute(conn, q, self._modelFromRow))
res = conn.execute(q)
return deduplicateBrdict([self._modelFromRow(row) for row in res.fetchall()])
res = yield self.db.pool.do(thd)
return res
@defer.inlineCallbacks
def claimBuildRequests(self, brids, claimed_at=None):
if claimed_at is not None:
claimed_at = datetime2epoch(claimed_at)
else:
claimed_at = int(self.master.reactor.seconds())
yield self._claim_buildrequests_for_master(brids, claimed_at, self.db.master.masterid)
@defer.inlineCallbacks
def _claim_buildrequests_for_master(self, brids, claimed_at, masterid):
def thd(conn):
transaction = conn.begin()
tbl = self.db.model.buildrequest_claims
try:
q = tbl.insert()
conn.execute(
q,
[{"brid": id, "masterid": masterid, "claimed_at": claimed_at} for id in brids],
)
except (sa.exc.IntegrityError, sa.exc.ProgrammingError) as e:
transaction.rollback()
raise AlreadyClaimedError() from e
transaction.commit()
yield self.db.pool.do(thd)
@defer.inlineCallbacks
def unclaimBuildRequests(self, brids):
yield self._unclaim_buildrequests_for_master(brids, self.db.master.masterid)
@defer.inlineCallbacks
def _unclaim_buildrequests_for_master(self, brids, masterid):
def thd(conn):
transaction = conn.begin()
claims_tbl = self.db.model.buildrequest_claims
# we'll need to batch the brids into groups of 100, so that the
# parameter lists supported by the DBAPI aren't exhausted
iterator = iter(brids)
while True:
batch = list(itertools.islice(iterator, 100))
if not batch:
break # success!
try:
q = claims_tbl.delete().where(
claims_tbl.c.brid.in_(batch),
claims_tbl.c.masterid == masterid,
)
conn.execute(q)
except Exception:
transaction.rollback()
raise
transaction.commit()
yield self.db.pool.do(thd)
@defer.inlineCallbacks
def completeBuildRequests(self, brids, results, complete_at=None):
assert results != RETRY, "a buildrequest cannot be completed with a retry status!"
if complete_at is not None:
complete_at = datetime2epoch(complete_at)
else:
complete_at = int(self.master.reactor.seconds())
def thd(conn):
transaction = conn.begin()
# the update here is simple, but a number of conditions are
# attached to ensure that we do not update a row inappropriately,
# Note that checking that the request is mine would require a
# subquery, so for efficiency that is not checked.
reqs_tbl = self.db.model.buildrequests
# we'll need to batch the brids into groups of 100, so that the
# parameter lists supported by the DBAPI aren't exhausted
for batch in self.doBatch(brids, 100):
q = reqs_tbl.update()
q = q.where(reqs_tbl.c.id.in_(batch))
q = q.where(reqs_tbl.c.complete != 1)
res = conn.execute(q.values(complete=1, results=results, complete_at=complete_at))
# if an incorrect number of rows were updated, then we failed.
if res.rowcount != len(batch):
log.msg(
f"tried to complete {len(batch)} buildrequests, "
f"but only completed {res.rowcount}"
)
transaction.rollback()
raise NotClaimedError
transaction.commit()
yield self.db.pool.do(thd)
def set_build_requests_priority(self, brids, priority):
def thd(conn):
transaction = conn.begin()
# the update here is simple, but a number of conditions are
# attached to ensure that we do not update a row inappropriately,
# Note that checking that the request is mine would require a
# subquery, so for efficiency that is not checked.
reqs_tbl = self.db.model.buildrequests
# we'll need to batch the brids into groups of 100, so that the
# parameter lists supported by the DBAPI aren't exhausted
for batch in self.doBatch(brids, 100):
q = reqs_tbl.update()
q = q.where(reqs_tbl.c.id.in_(batch))
q = q.where(reqs_tbl.c.complete != 1)
res = conn.execute(q, priority=priority)
# if an incorrect number of rows were updated, then we failed.
if res.rowcount != len(batch):
log.msg(
f"tried to complete {len(batch)} buildrequests, "
f"but only completed {res.rowcount}"
)
transaction.rollback()
raise NotClaimedError
transaction.commit()
return self.db.pool.do(thd)
@staticmethod
def _modelFromRow(row):
return BuildRequestModel(
buildrequestid=row.id,
buildsetid=row.buildsetid,
builderid=row.builderid,
buildername=row.buildername,
submitted_at=epoch2datetime(row.submitted_at),
complete_at=epoch2datetime(row.complete_at),
complete=bool(row.complete),
results=row.results,
waited_for=bool(row.waited_for),
priority=row.priority,
claimed_at=epoch2datetime(row.claimed_at),
claimed_by_masterid=row.masterid,
)
| 12,432 | Python | .py | 280 | 33.239286 | 99 | 0.608717 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,753 | buildsets.py | buildbot_buildbot/master/buildbot/db/buildsets.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
"""
Support for buildsets in the database
"""
from __future__ import annotations
import json
from dataclasses import dataclass
from dataclasses import field
from typing import TYPE_CHECKING
import sqlalchemy as sa
from twisted.internet import defer
from buildbot.db import NULL
from buildbot.db import base
from buildbot.util import datetime2epoch
from buildbot.util import epoch2datetime
from buildbot.warnings import warn_deprecated
if TYPE_CHECKING:
import datetime
class BsProps(dict):
pass
class AlreadyCompleteError(RuntimeError):
pass
@dataclass
class BuildSetModel:
bsid: int
external_idstring: str | None
reason: str | None
submitted_at: datetime.datetime
complete: bool = False
complete_at: datetime.datetime | None = None
results: int | None = None
parent_buildid: int | None = None
parent_relationship: str | None = None
rebuilt_buildid: int | None = None
sourcestamps: list[int] = field(default_factory=list)
# For backward compatibility
def __getitem__(self, key: str):
warn_deprecated(
'4.1.0',
(
'BuildsetsConnectorComponent '
'getBuildset, getBuildsets, and getRecentBuildsets '
'no longer return BuildSet as dictionnaries. '
'Usage of [] accessor is deprecated: please access the member directly'
),
)
if hasattr(self, key):
return getattr(self, key)
raise KeyError(key)
class BuildsetsConnectorComponent(base.DBConnectorComponent):
@defer.inlineCallbacks
def addBuildset(
self,
sourcestamps,
reason,
properties,
builderids,
waited_for,
external_idstring=None,
submitted_at=None,
rebuilt_buildid=None,
parent_buildid=None,
parent_relationship=None,
priority=0,
):
# We've gotten this wrong a couple times.
assert isinstance(waited_for, bool), f'waited_for should be boolean: {waited_for!r}'
if submitted_at is not None:
submitted_at = datetime2epoch(submitted_at)
else:
submitted_at = int(self.master.reactor.seconds())
# convert to sourcestamp IDs first, as necessary
def toSsid(sourcestamp):
if isinstance(sourcestamp, int):
return defer.succeed(sourcestamp)
ssConnector = self.master.db.sourcestamps
return ssConnector.findSourceStampId(**sourcestamp)
sourcestamps = yield defer.DeferredList(
[toSsid(ss) for ss in sourcestamps], fireOnOneErrback=True, consumeErrors=True
)
sourcestampids = [r[1] for r in sourcestamps]
def thd(conn):
buildsets_tbl = self.db.model.buildsets
self.checkLength(buildsets_tbl.c.reason, reason)
self.checkLength(buildsets_tbl.c.external_idstring, external_idstring)
transaction = conn.begin()
# insert the buildset itself
r = conn.execute(
buildsets_tbl.insert(),
{
"submitted_at": submitted_at,
"reason": reason,
"rebuilt_buildid": rebuilt_buildid,
"complete": 0,
"complete_at": None,
"results": -1,
"external_idstring": external_idstring,
"parent_buildid": parent_buildid,
"parent_relationship": parent_relationship,
},
)
bsid = r.inserted_primary_key[0]
# add any properties
if properties:
bs_props_tbl = self.db.model.buildset_properties
inserts = [
{"buildsetid": bsid, "property_name": k, "property_value": json.dumps([v, s])}
for k, (v, s) in properties.items()
]
for i in inserts:
self.checkLength(bs_props_tbl.c.property_name, i['property_name'])
conn.execute(bs_props_tbl.insert(), inserts)
# add sourcestamp ids
r = conn.execute(
self.db.model.buildset_sourcestamps.insert(),
[{"buildsetid": bsid, "sourcestampid": ssid} for ssid in sourcestampids],
)
# and finish with a build request for each builder. Note that
# sqlalchemy and the Python DBAPI do not provide a way to recover
# inserted IDs from a multi-row insert, so this is done one row at
# a time.
brids = {}
br_tbl = self.db.model.buildrequests
ins = br_tbl.insert()
for builderid in builderids:
r = conn.execute(
ins,
{
"buildsetid": bsid,
"builderid": builderid,
"priority": priority,
"claimed_at": 0,
"claimed_by_name": None,
"claimed_by_incarnation": None,
"complete": 0,
"results": -1,
"submitted_at": submitted_at,
"complete_at": None,
"waited_for": 1 if waited_for else 0,
},
)
brids[builderid] = r.inserted_primary_key[0]
transaction.commit()
return (bsid, brids)
bsid, brids = yield self.db.pool.do(thd)
# Seed the buildset property cache.
self.getBuildsetProperties.cache.put(bsid, BsProps(properties))
return (bsid, brids)
@defer.inlineCallbacks
def completeBuildset(self, bsid, results, complete_at=None):
if complete_at is not None:
complete_at = datetime2epoch(complete_at)
else:
complete_at = int(self.master.reactor.seconds())
def thd(conn):
tbl = self.db.model.buildsets
q = tbl.update().where(
(tbl.c.id == bsid) & ((tbl.c.complete == NULL) | (tbl.c.complete != 1))
)
res = conn.execute(q.values(complete=1, results=results, complete_at=complete_at))
conn.commit()
if res.rowcount != 1:
# happens when two buildrequests finish at the same time
raise AlreadyCompleteError()
yield self.db.pool.do(thd)
def getBuildset(self, bsid) -> defer.Deferred[BuildSetModel | None]:
def thd(conn) -> BuildSetModel | None:
bs_tbl = self.db.model.buildsets
q = bs_tbl.select().where(bs_tbl.c.id == bsid)
res = conn.execute(q)
row = res.fetchone()
if not row:
return None
return self._thd_model_from_row(conn, row)
return self.db.pool.do(thd)
@defer.inlineCallbacks
def getBuildsets(self, complete=None, resultSpec=None):
def thd(conn) -> list[BuildSetModel]:
bs_tbl = self.db.model.buildsets
q = bs_tbl.select()
if complete is not None:
if complete:
q = q.where(bs_tbl.c.complete != 0)
else:
q = q.where((bs_tbl.c.complete == 0) | (bs_tbl.c.complete == NULL))
if resultSpec is not None:
return resultSpec.thd_execute(conn, q, lambda x: self._thd_model_from_row(conn, x))
res = conn.execute(q)
return [self._thd_model_from_row(conn, row) for row in res.fetchall()]
res = yield self.db.pool.do(thd)
return res
def getRecentBuildsets(
self,
count: int | None = None,
branch: str | None = None,
repository: str | None = None,
complete: bool | None = None,
) -> defer.Deferred[list[BuildSetModel]]:
def thd(conn) -> list[BuildSetModel]:
bs_tbl = self.db.model.buildsets
ss_tbl = self.db.model.sourcestamps
j = self.db.model.buildsets
j = j.join(self.db.model.buildset_sourcestamps)
j = j.join(self.db.model.sourcestamps)
q = sa.select(bs_tbl).select_from(j).distinct()
q = q.order_by(sa.desc(bs_tbl.c.submitted_at))
q = q.limit(count)
if complete is not None:
if complete:
q = q.where(bs_tbl.c.complete != 0)
else:
q = q.where((bs_tbl.c.complete == 0) | (bs_tbl.c.complete == NULL))
if branch:
q = q.where(ss_tbl.c.branch == branch)
if repository:
q = q.where(ss_tbl.c.repository == repository)
res = conn.execute(q)
return list(reversed([self._thd_model_from_row(conn, row) for row in res.fetchall()]))
return self.db.pool.do(thd)
@base.cached("BuildsetProperties")
def getBuildsetProperties(self, bsid) -> defer.Deferred[BsProps]:
def thd(conn) -> BsProps:
bsp_tbl = self.db.model.buildset_properties
q = sa.select(
bsp_tbl.c.property_name,
bsp_tbl.c.property_value,
).where(bsp_tbl.c.buildsetid == bsid)
ret = []
for row in conn.execute(q):
try:
properties = json.loads(row.property_value)
ret.append((row.property_name, tuple(properties)))
except ValueError:
pass
return BsProps(ret)
return self.db.pool.do(thd)
def _thd_model_from_row(self, conn, row):
# get sourcestamps
tbl = self.db.model.buildset_sourcestamps
sourcestamps = [
r.sourcestampid
for r in conn.execute(
sa.select(tbl.c.sourcestampid).where(tbl.c.buildsetid == row.id)
).fetchall()
]
return BuildSetModel(
bsid=row.id,
external_idstring=row.external_idstring,
reason=row.reason,
submitted_at=epoch2datetime(row.submitted_at),
complete=bool(row.complete),
complete_at=epoch2datetime(row.complete_at),
results=row.results,
parent_buildid=row.parent_buildid,
parent_relationship=row.parent_relationship,
rebuilt_buildid=row.rebuilt_buildid,
sourcestamps=sourcestamps,
)
| 11,241 | Python | .py | 271 | 29.856089 | 99 | 0.576793 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,754 | migrate_utils.py | buildbot_buildbot/master/buildbot/db/migrate_utils.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import sqlalchemy as sa
from buildbot.util import sautils
def test_unicode(migrate_engine):
"""Test that the database can handle inserting and selecting Unicode"""
# set up a subsidiary MetaData object to hold this temporary table
submeta = sa.MetaData()
submeta.bind = migrate_engine
test_unicode = sautils.Table(
'test_unicode',
submeta,
sa.Column('u', sa.Unicode(length=100)),
sa.Column('b', sa.LargeBinary),
)
test_unicode.create(bind=migrate_engine)
migrate_engine.commit()
# insert a unicode value in there
u = "Frosty the \N{SNOWMAN}"
b = b'\xff\xff\x00'
ins = test_unicode.insert().values(u=u, b=b)
migrate_engine.execute(ins)
migrate_engine.commit()
# see if the data is intact
row = migrate_engine.execute(test_unicode.select()).fetchall()[0]
assert isinstance(row.u, str)
assert row.u == u
assert isinstance(row.b, bytes)
assert row.b == b
# drop the test table
test_unicode.drop(bind=migrate_engine)
migrate_engine.commit()
| 1,774 | Python | .py | 44 | 36.295455 | 79 | 0.72516 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,755 | connector.py | buildbot_buildbot/master/buildbot/db/connector.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import textwrap
from twisted.application import internet
from twisted.internet import defer
from twisted.python import log
from buildbot import config
from buildbot import util
from buildbot.db import build_data
from buildbot.db import builders
from buildbot.db import buildrequests
from buildbot.db import builds
from buildbot.db import buildsets
from buildbot.db import changes
from buildbot.db import changesources
from buildbot.db import enginestrategy
from buildbot.db import exceptions
from buildbot.db import logs
from buildbot.db import masters
from buildbot.db import model
from buildbot.db import pool
from buildbot.db import projects
from buildbot.db import schedulers
from buildbot.db import sourcestamps
from buildbot.db import state
from buildbot.db import steps
from buildbot.db import tags
from buildbot.db import test_result_sets
from buildbot.db import test_results
from buildbot.db import users
from buildbot.db import workers
from buildbot.util import service
from buildbot.util.deferwaiter import DeferWaiter
from buildbot.util.sautils import get_upsert_method
from buildbot.util.twisted import async_to_deferred
upgrade_message = textwrap.dedent("""\
The Buildmaster database needs to be upgraded before this version of
buildbot can run. Use the following command-line
buildbot upgrade-master {basedir}
to upgrade the database, and try starting the buildmaster again. You may
want to make a backup of your buildmaster before doing so.
""").strip()
class AbstractDBConnector(service.ReconfigurableServiceMixin, service.AsyncMultiService):
def __init__(self) -> None:
super().__init__()
self.configured_url = None
@defer.inlineCallbacks
def setup(self):
if self.configured_url is None:
self.configured_url = yield self.master.get_db_url(self.master.config)
@defer.inlineCallbacks
def reconfigServiceWithBuildbotConfig(self, new_config):
new_db_url = yield self.master.get_db_url(new_config)
if self.configured_url is None:
self.configured_url = new_db_url
elif self.configured_url != new_db_url:
config.error(
"Cannot change c['db']['db_url'] after the master has started",
)
return (yield super().reconfigServiceWithBuildbotConfig(new_config))
class DBConnector(AbstractDBConnector):
# The connection between Buildbot and its backend database. This is
# generally accessible as master.db, but is also used during upgrades.
#
# Most of the interesting operations available via the connector are
# implemented in connector components, available as attributes of this
# object, and listed below.
# Period, in seconds, of the cleanup task. This master will perform
# periodic cleanup actions on this schedule.
CLEANUP_PERIOD = 3600
def __init__(self, basedir):
super().__init__()
self.setName('db')
self.basedir = basedir
# not configured yet - we don't build an engine until the first
# reconfig
self.configured_url = None
# set up components
self._engine = None # set up in reconfigService
self.pool = None # set up in reconfigService
self.upsert = get_upsert_method(None) # set up in reconfigService
@defer.inlineCallbacks
def setServiceParent(self, p):
yield super().setServiceParent(p)
self.model = model.Model(self)
self.changes = changes.ChangesConnectorComponent(self)
self.changesources = changesources.ChangeSourcesConnectorComponent(self)
self.schedulers = schedulers.SchedulersConnectorComponent(self)
self.sourcestamps = sourcestamps.SourceStampsConnectorComponent(self)
self.buildsets = buildsets.BuildsetsConnectorComponent(self)
self.buildrequests = buildrequests.BuildRequestsConnectorComponent(self)
self.state = state.StateConnectorComponent(self)
self.builds = builds.BuildsConnectorComponent(self)
self.build_data = build_data.BuildDataConnectorComponent(self)
self.workers = workers.WorkersConnectorComponent(self)
self.users = users.UsersConnectorComponent(self)
self.masters = masters.MastersConnectorComponent(self)
self.builders = builders.BuildersConnectorComponent(self)
self.projects = projects.ProjectsConnectorComponent(self)
self.steps = steps.StepsConnectorComponent(self)
self.tags = tags.TagsConnectorComponent(self)
self.logs = logs.LogsConnectorComponent(self)
self.test_results = test_results.TestResultsConnectorComponent(self)
self.test_result_sets = test_result_sets.TestResultSetsConnectorComponent(self)
self.cleanup_timer = internet.TimerService(self.CLEANUP_PERIOD, self._doCleanup)
self.cleanup_timer.clock = self.master.reactor
yield self.cleanup_timer.setServiceParent(self)
@defer.inlineCallbacks
def setup(self, check_version=True, verbose=True):
super().setup()
db_url = self.configured_url
log.msg(f"Setting up database with URL {util.stripUrlPassword(db_url)!r}")
# set up the engine and pool
self._engine = enginestrategy.create_engine(db_url, basedir=self.basedir)
self.upsert = get_upsert_method(self._engine)
self.pool = pool.DBThreadPool(self._engine, reactor=self.master.reactor, verbose=verbose)
self._db_tasks_waiter = DeferWaiter()
# make sure the db is up to date, unless specifically asked not to
if check_version:
if db_url == 'sqlite://':
# Using in-memory database. Since it is reset after each process
# restart, `buildbot upgrade-master` cannot be used (data is not
# persistent). Upgrade model here to allow startup to continue.
yield self.model.upgrade()
current = yield self.model.is_current()
if not current:
for l in upgrade_message.format(basedir=self.master.basedir).split('\n'):
log.msg(l)
raise exceptions.DatabaseNotReadyError()
@async_to_deferred
async def _shutdown(self) -> None:
"""
Called by stopService, except in test context
as most tests don't call startService
"""
await self._db_tasks_waiter.wait()
@defer.inlineCallbacks
def stopService(self):
yield self._shutdown()
yield super().stopService()
def _doCleanup(self):
"""
Perform any periodic database cleanup tasks.
@returns: Deferred
"""
# pass on this if we're not configured yet
if not self.configured_url:
return None
d = self.changes.pruneChanges(self.master.config.changeHorizon)
d.addErrback(log.err, 'while pruning changes')
return d
def run_db_task(self, deferred_task: defer.Deferred) -> None:
self._db_tasks_waiter.add(deferred_task)
| 7,746 | Python | .py | 165 | 40.30303 | 97 | 0.72106 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,756 | pool.py | buildbot_buildbot/master/buildbot/db/pool.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import annotations
import inspect
import time
import traceback
from typing import TYPE_CHECKING
import sqlalchemy as sa
from twisted.internet import defer
from twisted.internet import threads
from twisted.python import log
from twisted.python import threadpool
from buildbot.db.buildrequests import AlreadyClaimedError
from buildbot.db.buildsets import AlreadyCompleteError
from buildbot.db.changesources import ChangeSourceAlreadyClaimedError
from buildbot.db.logs import LogSlugExistsError
from buildbot.db.schedulers import SchedulerAlreadyClaimedError
from buildbot.process import metrics
from buildbot.util.sautils import get_sqlite_version
if TYPE_CHECKING:
from typing import Callable
from typing import TypeVar
from typing_extensions import Concatenate
from typing_extensions import ParamSpec
_T = TypeVar('_T')
_P = ParamSpec('_P')
# set this to True for *very* verbose query debugging output; this can
# be monkey-patched from master.cfg, too:
# from buildbot.db import pool
# pool.debug = True
debug = False
_debug_id = 1
def timed_do_fn(f):
"""Decorate a do function to log before, after, and elapsed time,
with the name of the calling function. This is not speedy!"""
def wrap(callable, *args, **kwargs):
global _debug_id
# get a description of the function that called us
st = traceback.extract_stack(limit=2)
file, line, name, _ = st[0]
# and its locals
frame = inspect.currentframe()
locals = frame.f_locals
# invent a unique ID for the description
id = _debug_id
_debug_id = _debug_id + 1
descr = f"{name}-{id:08x}"
start_time = time.time()
log.msg(f"{descr} - before ('{file}' line {line})")
for name in locals:
if name in ('self', 'thd'):
continue
log.msg(f"{descr} - {name} = {locals[name]!r}")
# wrap the callable to log the begin and end of the actual thread
# function
def callable_wrap(*args, **kargs):
log.msg(f"{descr} - thd start")
try:
return callable(*args, **kwargs)
finally:
log.msg(f"{descr} - thd end")
d = f(callable_wrap, *args, **kwargs)
@d.addBoth
def after(x):
end_time = time.time()
elapsed = (end_time - start_time) * 1000
log.msg(f"{descr} - after ({elapsed:0.2f} ms elapsed)")
return x
return d
wrap.__name__ = f.__name__
wrap.__doc__ = f.__doc__
return wrap
class DBThreadPool:
running = False
def __init__(self, engine, reactor, verbose=False):
# verbose is used by upgrade scripts, and if it is set we should print
# messages about versions and other warnings
log_msg = log.msg
if verbose:
def _log_msg(m):
print(m)
log_msg = _log_msg
self.reactor = reactor
pool_size = 5
# If the engine has an C{optimal_thread_pool_size} attribute, then the
# maxthreads of the thread pool will be set to that value. This is
# most useful for SQLite in-memory connections, where exactly one
# connection (and thus thread) should be used.
if hasattr(engine, 'optimal_thread_pool_size'):
pool_size = engine.optimal_thread_pool_size
self._pool = threadpool.ThreadPool(minthreads=1, maxthreads=pool_size, name='DBThreadPool')
self.engine = engine
if engine.dialect.name == 'sqlite':
vers = get_sqlite_version()
if vers < (3, 7):
log_msg(f"Using SQLite Version {vers}")
log_msg(
"NOTE: this old version of SQLite does not support "
"WAL journal mode; a busy master may encounter "
"'Database is locked' errors. Consider upgrading."
)
if vers < (3, 6, 19):
log_msg("NOTE: this old version of SQLite is not supported.")
raise RuntimeError("unsupported SQLite version")
self._start_evt = self.reactor.callWhenRunning(self._start)
# patch the do methods to do verbose logging if necessary
if debug:
self.do = timed_do_fn(self.do)
self.do_with_engine = timed_do_fn(self.do_with_engine)
self.forbidded_callable_return_type = self.get_sqlalchemy_result_type()
def get_sqlalchemy_result_type(self):
try:
from sqlalchemy.engine import ResultProxy # sqlalchemy 1.x - 1.3
return ResultProxy
except ImportError:
pass
try:
from sqlalchemy.engine import Result # sqlalchemy 1.4 and newer
return Result
except ImportError:
pass
raise ImportError("Could not import SQLAlchemy result type")
def _start(self):
self._start_evt = None
if not self.running:
self._pool.start()
self._stop_evt = self.reactor.addSystemEventTrigger(
'during', 'shutdown', self._stop_nowait
)
self.running = True
def _stop_nowait(self):
self._stop_evt = None
threads.deferToThreadPool(self.reactor, self._pool, self.engine.dispose)
self._pool.stop()
self.running = False
@defer.inlineCallbacks
def _stop(self):
self._stop_evt = None
yield threads.deferToThreadPool(self.reactor, self._pool, self.engine.dispose)
self._pool.stop()
self.running = False
@defer.inlineCallbacks
def shutdown(self):
"""Manually stop the pool. This is only necessary from tests, as the
pool will stop itself when the reactor stops under normal
circumstances."""
if not self._stop_evt:
return # pool is already stopped
self.reactor.removeSystemEventTrigger(self._stop_evt)
yield self._stop()
# Try about 170 times over the space of a day, with the last few tries
# being about an hour apart. This is designed to span a reasonable amount
# of time for repairing a broken database server, while still failing
# actual problematic queries eventually
BACKOFF_START = 1.0
BACKOFF_MULT = 1.05
MAX_OPERATIONALERROR_TIME = 3600 * 24 # one day
def __thd(
self,
with_engine: bool,
callable: Callable[Concatenate[sa.engine.Engine | sa.engine.Connection, _P], _T],
args: _P.args,
kwargs: _P.kwargs,
) -> _T:
# try to call callable(arg, *args, **kwargs) repeatedly until no
# OperationalErrors occur, where arg is either the engine (with_engine)
# or a connection (not with_engine)
backoff = self.BACKOFF_START
start = time.time()
while True:
if with_engine:
arg = self.engine
else:
arg = self.engine.connect()
try:
try:
rv = callable(arg, *args, **kwargs)
assert not isinstance(
rv, self.forbidded_callable_return_type
), "do not return ResultProxy objects!"
except sa.exc.OperationalError as e:
if not self.engine.should_retry(e):
log.err(e, 'Got fatal OperationalError on DB')
raise
elapsed = time.time() - start
if elapsed > self.MAX_OPERATIONALERROR_TIME:
log.err(
e,
f'Raising due to {self.MAX_OPERATIONALERROR_TIME} '
'seconds delay on DB query retries',
)
raise
metrics.MetricCountEvent.log("DBThreadPool.retry-on-OperationalError")
# sleep (remember, we're in a thread..)
time.sleep(backoff)
backoff *= self.BACKOFF_MULT
# and re-try
log.err(e, f'retrying {callable} after sql error {e}')
continue
except Exception as e:
# AlreadyClaimedError are normal especially in a multimaster
# configuration
if not isinstance(
e,
(
AlreadyClaimedError,
ChangeSourceAlreadyClaimedError,
SchedulerAlreadyClaimedError,
AlreadyCompleteError,
LogSlugExistsError,
),
):
log.err(e, 'Got fatal Exception on DB')
raise
finally:
if not with_engine:
arg.close()
break
return rv
def do_with_transaction(
self,
callable: Callable[Concatenate[sa.engine.Connection, _P], _T],
*args: _P.args,
**kwargs: _P.kwargs,
) -> defer.Deferred[_T]:
"""Same as `do`, but will wrap callable with `with conn.begin():`"""
def _transaction(
conn: sa.engine.Connection,
callable: Callable[Concatenate[sa.engine.Connection, _P], _T],
*args: _P.args,
**kwargs: _P.kwargs,
) -> _T:
with conn.begin():
return callable(conn, *args, **kwargs)
return self.do(_transaction, callable, *args, **kwargs)
def do(
self,
callable: Callable[Concatenate[sa.engine.Connection, _P], _T],
*args: _P.args,
**kwargs: _P.kwargs,
) -> defer.Deferred[_T]:
return threads.deferToThreadPool(
self.reactor,
self._pool,
self.__thd, # type: ignore[arg-type]
False,
callable,
args,
kwargs,
)
def do_with_engine(
self,
callable: Callable[Concatenate[sa.engine.Engine, _P], _T],
*args: _P.args,
**kwargs: _P.kwargs,
) -> defer.Deferred[_T]:
return threads.deferToThreadPool(
self.reactor,
self._pool,
self.__thd, # type: ignore[arg-type]
True,
callable,
args,
kwargs,
)
| 11,232 | Python | .py | 277 | 29.649819 | 99 | 0.58221 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,757 | test_results.py | buildbot_buildbot/master/buildbot/db/test_results.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import annotations
from dataclasses import dataclass
import sqlalchemy as sa
from twisted.internet import defer
from twisted.python import deprecate
from twisted.python import versions
from buildbot.db import base
from buildbot.warnings import warn_deprecated
@dataclass
class TestResultModel:
id: int
builderid: int
test_result_setid: int
test_name: str | None
test_code_path: str | None
line: int | None
duration_ns: int | None
value: str | None
# For backward compatibility
def __getitem__(self, key: str):
warn_deprecated(
'4.1.0',
(
'TestResultsConnectorComponent '
'getTestResult, and getTestResults '
'no longer return TestResult as dictionnaries. '
'Usage of [] accessor is deprecated: please access the member directly'
),
)
if hasattr(self, key):
return getattr(self, key)
raise KeyError(key)
@deprecate.deprecated(versions.Version("buildbot", 4, 1, 0), TestResultModel)
class TestResultDict(TestResultModel):
pass
class TestResultsConnectorComponent(base.DBConnectorComponent):
def _add_code_paths(self, builderid: int, paths: set[str]) -> defer.Deferred[dict[str, int]]:
# returns a dictionary of path to id in the test_code_paths table.
# For paths that already exist, the id of the row in the test_code_paths is retrieved.
assert isinstance(paths, set)
def thd(conn) -> dict[str, int]:
paths_to_ids = {}
paths_table = self.db.model.test_code_paths
for path_batch in self.doBatch(paths, batch_n=3000):
path_batch = set(path_batch)
while path_batch:
# Use expanding bindparam, because performance of sqlalchemy is very slow
# when filtering large sets otherwise.
q = paths_table.select().where(
(paths_table.c.path.in_(sa.bindparam('paths', expanding=True)))
& (paths_table.c.builderid == builderid)
)
res = conn.execute(q, {'paths': list(path_batch)})
for row in res.fetchall():
paths_to_ids[row.path] = row.id
path_batch.remove(row.path)
# paths now contains all the paths that need insertion.
try:
insert_values = [
{'builderid': builderid, 'path': path} for path in path_batch
]
q = paths_table.insert().values(insert_values)
if self.db.pool.engine.dialect.name in ['postgresql', 'mssql']:
# Use RETURNING, this way we won't need an additional select query
q = q.returning(paths_table.c.id, paths_table.c.path)
res = conn.execute(q)
conn.commit()
for row in res.fetchall():
paths_to_ids[row.path] = row.id
path_batch.remove(row.path)
else:
conn.execute(q)
conn.commit()
except (sa.exc.IntegrityError, sa.exc.ProgrammingError):
# There was a competing addCodePaths() call that added a path for the same
# builder. Depending on the DB driver, none or some rows were inserted, but
# we will re-check what's got inserted in the next iteration of the loop
conn.rollback()
return paths_to_ids
return self.db.pool.do(thd)
def getTestCodePaths(
self, builderid, path_prefix: str | None = None, result_spec=None
) -> defer.Deferred[list[str]]:
def thd(conn) -> list[str]:
paths_table = self.db.model.test_code_paths
q = paths_table.select()
if path_prefix is not None:
q = q.where(paths_table.c.path.startswith(path_prefix))
if result_spec is not None:
return result_spec.thd_execute(conn, q, lambda x: x['path'])
res = conn.execute(q)
return [row.path for row in res.fetchall()]
return self.db.pool.do(thd)
def _add_names(self, builderid: int, names: set[str]) -> defer.Deferred[dict[str, int]]:
# returns a dictionary of name to id in the test_names table.
# For names that already exist, the id of the row in the test_names is retrieved.
assert isinstance(names, set)
def thd(conn) -> dict[str, int]:
names_to_ids = {}
names_table = self.db.model.test_names
for name_batch in self.doBatch(names, batch_n=3000):
name_batch = set(name_batch)
while name_batch:
# Use expanding bindparam, because performance of sqlalchemy is very slow
# when filtering large sets otherwise.
q = names_table.select().where(
(names_table.c.name.in_(sa.bindparam('names', expanding=True)))
& (names_table.c.builderid == builderid)
)
res = conn.execute(q, {'names': list(name_batch)})
for row in res.fetchall():
names_to_ids[row.name] = row.id
name_batch.remove(row.name)
# names now contains all the names that need insertion.
try:
insert_values = [
{'builderid': builderid, 'name': name} for name in name_batch
]
q = names_table.insert().values(insert_values)
if self.db.pool.engine.dialect.name in ['postgresql', 'mssql']:
# Use RETURNING, this way we won't need an additional select query
q = q.returning(names_table.c.id, names_table.c.name)
res = conn.execute(q)
conn.commit()
for row in res.fetchall():
names_to_ids[row.name] = row.id
name_batch.remove(row.name)
else:
conn.execute(q)
conn.commit()
except (sa.exc.IntegrityError, sa.exc.ProgrammingError):
# There was a competing addNames() call that added a name for the same
# builder. Depending on the DB driver, none or some rows were inserted, but
# we will re-check what's got inserted in the next iteration of the loop
conn.rollback()
return names_to_ids
return self.db.pool.do(thd)
def getTestNames(
self, builderid, name_prefix=None, result_spec=None
) -> defer.Deferred[list[str]]:
def thd(conn) -> list[str]:
names_table = self.db.model.test_names
q = names_table.select().where(names_table.c.builderid == builderid)
if name_prefix is not None:
q = q.where(names_table.c.name.startswith(name_prefix))
if result_spec is not None:
return result_spec.thd_execute(conn, q, lambda x: x.name)
res = conn.execute(q)
return [row.name for row in res.fetchall()]
return self.db.pool.do(thd)
@defer.inlineCallbacks
def addTestResults(self, builderid, test_result_setid, result_values):
# Adds multiple test results for a specific test result set.
# result_values is a list of dictionaries each of which must contain 'value' key and at
# least one of 'test_name', 'test_code_path'. 'line' key is optional.
# The function returns nothing.
# Build values list for insertion.
insert_values = []
insert_names = set()
insert_code_paths = set()
for result_value in result_values:
if 'value' not in result_value:
raise KeyError('Each of result_values must contain \'value\' key')
if 'test_name' not in result_value and 'test_code_path' not in result_value:
raise KeyError(
'Each of result_values must contain at least one of '
'\'test_name\' or \'test_code_path\' keys'
)
if 'test_name' in result_value:
insert_names.add(result_value['test_name'])
if 'test_code_path' in result_value:
insert_code_paths.add(result_value['test_code_path'])
code_path_to_id = yield self._add_code_paths(builderid, insert_code_paths)
name_to_id = yield self._add_names(builderid, insert_names)
for result_value in result_values:
insert_value = {
'value': result_value['value'],
'builderid': builderid,
'test_result_setid': test_result_setid,
'test_nameid': None,
'test_code_pathid': None,
'line': None,
'duration_ns': None,
}
if 'test_name' in result_value:
insert_value['test_nameid'] = name_to_id[result_value['test_name']]
if 'test_code_path' in result_value:
insert_value['test_code_pathid'] = code_path_to_id[result_value['test_code_path']]
if 'line' in result_value:
insert_value['line'] = result_value['line']
if 'duration_ns' in result_value:
insert_value['duration_ns'] = result_value['duration_ns']
insert_values.append(insert_value)
def thd(conn):
results_table = self.db.model.test_results
q = results_table.insert().values(insert_values)
conn.execute(q)
yield self.db.pool.do_with_transaction(thd)
def getTestResult(self, test_resultid: int) -> defer.Deferred[TestResultModel | None]:
def thd(conn) -> TestResultModel | None:
results_table = self.db.model.test_results
code_paths_table = self.db.model.test_code_paths
names_table = self.db.model.test_names
j = results_table.outerjoin(code_paths_table).outerjoin(names_table)
q = sa.select(results_table, code_paths_table.c.path, names_table.c.name)
q = q.select_from(j).where(results_table.c.id == test_resultid)
res = conn.execute(q)
row = res.fetchone()
if not row:
return None
return self._mode_from_row(row)
return self.db.pool.do(thd)
def getTestResults(
self, builderid: int, test_result_setid: int, result_spec=None
) -> defer.Deferred[list[TestResultModel]]:
def thd(conn) -> list[TestResultModel]:
results_table = self.db.model.test_results
code_paths_table = self.db.model.test_code_paths
names_table = self.db.model.test_names
# specify join ON clauses manually to force filtering of code_paths_table and
# names_table before join
j = results_table.outerjoin(
code_paths_table,
(results_table.c.test_code_pathid == code_paths_table.c.id)
& (code_paths_table.c.builderid == builderid),
)
j = j.outerjoin(
names_table,
(results_table.c.test_nameid == names_table.c.id)
& (names_table.c.builderid == builderid),
)
q = sa.select(results_table, code_paths_table.c.path, names_table.c.name)
q = q.select_from(j).where(
(results_table.c.builderid == builderid)
& (results_table.c.test_result_setid == test_result_setid)
)
if result_spec is not None:
return result_spec.thd_execute(conn, q, self._mode_from_row)
res = conn.execute(q)
return [self._mode_from_row(row) for row in res.fetchall()]
return self.db.pool.do(thd)
def _mode_from_row(self, row):
return TestResultModel(
id=row.id,
builderid=row.builderid,
test_result_setid=row.test_result_setid,
test_name=row.name,
test_code_path=row.path,
line=row.line,
duration_ns=row.duration_ns,
value=row.value,
)
| 13,508 | Python | .py | 266 | 36.462406 | 99 | 0.564231 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,758 | steps.py | buildbot_buildbot/master/buildbot/db/steps.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import annotations
import json
from dataclasses import dataclass
from typing import TYPE_CHECKING
import sqlalchemy as sa
from twisted.internet import defer
from buildbot.db import base
from buildbot.util import epoch2datetime
from buildbot.util.twisted import async_to_deferred
from buildbot.warnings import warn_deprecated
if TYPE_CHECKING:
import datetime
@dataclass
class UrlModel:
name: str
url: str
# For backward compatibility
def __getitem__(self, key: str):
warn_deprecated(
'4.1.0',
(
'StepsConnectorComponent '
'getStep, and getSteps '
'no longer return Step as dictionnaries. '
'Usage of [] accessor is deprecated: please access the member directly'
),
)
if hasattr(self, key):
return getattr(self, key)
raise KeyError(key)
@dataclass
class StepModel:
id: int
number: int
name: str
buildid: int
started_at: datetime.datetime | None
locks_acquired_at: datetime.datetime | None
complete_at: datetime.datetime | None
state_string: str
results: int | None
urls: list[UrlModel]
hidden: bool = False
# For backward compatibility
def __getitem__(self, key: str):
warn_deprecated(
'4.1.0',
(
'StepsConnectorComponent '
'getStep, and getSteps '
'no longer return Step as dictionnaries. '
'Usage of [] accessor is deprecated: please access the member directly'
),
)
if hasattr(self, key):
return getattr(self, key)
raise KeyError(key)
class StepsConnectorComponent(base.DBConnectorComponent):
url_lock: defer.DeferredLock | None = None
@async_to_deferred
async def getStep(
self,
stepid: int | None = None,
buildid: int | None = None,
number: int | None = None,
name: str | None = None,
) -> StepModel | None:
tbl = self.db.model.steps
if stepid is not None:
wc = tbl.c.id == stepid
else:
if buildid is None:
raise RuntimeError('must supply either stepid or buildid')
if number is not None:
wc = tbl.c.number == number
elif name is not None:
wc = tbl.c.name == name
else:
raise RuntimeError('must supply either number or name')
wc = wc & (tbl.c.buildid == buildid)
def thd(conn) -> StepModel | None:
q = self.db.model.steps.select().where(wc)
res = conn.execute(q)
row = res.fetchone()
rv = None
if row:
rv = self._model_from_row(row)
res.close()
return rv
return await self.db.pool.do(thd)
def getSteps(self, buildid: int) -> defer.Deferred[list[StepModel]]:
def thd(conn) -> list[StepModel]:
tbl = self.db.model.steps
q = tbl.select()
q = q.where(tbl.c.buildid == buildid)
q = q.order_by(tbl.c.number)
res = conn.execute(q)
return [self._model_from_row(row) for row in res.fetchall()]
return self.db.pool.do(thd)
def addStep(
self, buildid: int, name: str, state_string: str
) -> defer.Deferred[tuple[int, int, str]]:
def thd(conn) -> tuple[int, int, str]:
tbl = self.db.model.steps
# get the highest current number
r = conn.execute(sa.select(sa.func.max(tbl.c.number)).where(tbl.c.buildid == buildid))
number = r.scalar()
number = 0 if number is None else number + 1
# note that there is no chance for a race condition here,
# since only one master is inserting steps. If there is a
# conflict, then the name is likely already taken.
insert_row = {
"buildid": buildid,
"number": number,
"started_at": None,
"locks_acquired_at": None,
"complete_at": None,
"state_string": state_string,
"urls_json": '[]',
"name": name,
}
try:
r = conn.execute(self.db.model.steps.insert(), insert_row)
conn.commit()
got_id = r.inserted_primary_key[0]
except (sa.exc.IntegrityError, sa.exc.ProgrammingError):
conn.rollback()
got_id = None
if got_id:
return (got_id, number, name)
# we didn't get an id, so calculate a unique name and use that
# instead. Because names are truncated at the right to fit in a
# 50-character identifier, this isn't a simple query.
res = conn.execute(sa.select(tbl.c.name).where(tbl.c.buildid == buildid))
names = {row[0] for row in res}
num = 1
while True:
numstr = f'_{num}'
newname = name[: 50 - len(numstr)] + numstr
if newname not in names:
break
num += 1
insert_row['name'] = newname
r = conn.execute(self.db.model.steps.insert(), insert_row)
conn.commit()
got_id = r.inserted_primary_key[0]
return (got_id, number, newname)
return self.db.pool.do(thd)
def startStep(self, stepid: int, started_at: int, locks_acquired: bool) -> defer.Deferred[None]:
def thd(conn) -> None:
tbl = self.db.model.steps
q = tbl.update().where(tbl.c.id == stepid)
if locks_acquired:
conn.execute(q.values(started_at=started_at, locks_acquired_at=started_at))
else:
conn.execute(q.values(started_at=started_at))
return self.db.pool.do_with_transaction(thd)
def set_step_locks_acquired_at(
self, stepid: int, locks_acquired_at: int
) -> defer.Deferred[None]:
def thd(conn) -> None:
tbl = self.db.model.steps
q = tbl.update().where(tbl.c.id == stepid)
conn.execute(q.values(locks_acquired_at=locks_acquired_at))
return self.db.pool.do_with_transaction(thd)
def setStepStateString(self, stepid: int, state_string: str) -> defer.Deferred[None]:
def thd(conn) -> None:
tbl = self.db.model.steps
q = tbl.update().where(tbl.c.id == stepid)
conn.execute(q.values(state_string=state_string))
return self.db.pool.do_with_transaction(thd)
def addURL(self, stepid: int, name: str, url: str, _racehook=None) -> defer.Deferred[None]:
# This methods adds an URL to the db
# This is a read modify write and thus there is a possibility
# that several urls are added at the same time (e.g with a deferredlist
# at the end of a step)
# this race condition is only inside the same master, as only one master
# is supposed to add urls to a buildstep.
# so threading.lock is used, as we are in the thread pool
if self.url_lock is None:
# this runs in reactor thread, so no race here..
self.url_lock = defer.DeferredLock()
def thd(conn) -> None:
tbl = self.db.model.steps
wc = tbl.c.id == stepid
q = sa.select(tbl.c.urls_json).where(wc)
res = conn.execute(q)
row = res.fetchone()
if _racehook is not None:
_racehook()
urls = json.loads(row.urls_json)
url_item = {"name": name, "url": url}
if url_item not in urls:
urls.append(url_item)
q2 = tbl.update().where(wc)
conn.execute(q2.values(urls_json=json.dumps(urls)))
conn.commit()
return self.url_lock.run(self.db.pool.do, thd)
def finishStep(self, stepid: int, results: int, hidden: bool) -> defer.Deferred[None]:
def thd(conn) -> None:
tbl = self.db.model.steps
q = tbl.update().where(tbl.c.id == stepid)
conn.execute(
q.values(
complete_at=int(self.master.reactor.seconds()),
results=results,
hidden=1 if hidden else 0,
)
)
return self.db.pool.do_with_transaction(thd)
def _model_from_row(self, row):
return StepModel(
id=row.id,
number=row.number,
name=row.name,
buildid=row.buildid,
started_at=epoch2datetime(row.started_at),
locks_acquired_at=epoch2datetime(row.locks_acquired_at),
complete_at=epoch2datetime(row.complete_at),
state_string=row.state_string,
results=row.results,
urls=[UrlModel(item['name'], item['url']) for item in json.loads(row.urls_json)],
hidden=bool(row.hidden),
)
| 9,839 | Python | .py | 238 | 30.684874 | 100 | 0.578826 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,759 | changesources.py | buildbot_buildbot/master/buildbot/db/changesources.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import annotations
from dataclasses import dataclass
import sqlalchemy as sa
from twisted.internet import defer
from buildbot.db import NULL
from buildbot.db import base
from buildbot.util.sautils import hash_columns
from buildbot.warnings import warn_deprecated
class ChangeSourceAlreadyClaimedError(Exception):
pass
@dataclass
class ChangeSourceModel:
id: int
name: str
masterid: int | None = None
# For backward compatibility
def __getitem__(self, key: str):
warn_deprecated(
'4.1.0',
(
'ChangeSourcesConnectorComponent '
'getChangeSource, and getChangeSources '
'no longer return ChangeSource as dictionnaries. '
'Usage of [] accessor is deprecated: please access the member directly'
),
)
if hasattr(self, key):
return getattr(self, key)
raise KeyError(key)
class ChangeSourcesConnectorComponent(base.DBConnectorComponent):
def findChangeSourceId(self, name):
tbl = self.db.model.changesources
name_hash = hash_columns(name)
return self.findSomethingId(
tbl=tbl,
whereclause=(tbl.c.name_hash == name_hash),
insert_values={"name": name, "name_hash": name_hash},
)
# returns a Deferred that returns None
def setChangeSourceMaster(self, changesourceid, masterid):
def thd(conn):
cs_mst_tbl = self.db.model.changesource_masters
# handle the masterid=None case to get it out of the way
if masterid is None:
q = cs_mst_tbl.delete().where(cs_mst_tbl.c.changesourceid == changesourceid)
conn.execute(q)
conn.commit()
return
# try a blind insert..
try:
q = cs_mst_tbl.insert()
conn.execute(q, {"changesourceid": changesourceid, "masterid": masterid})
conn.commit()
except (sa.exc.IntegrityError, sa.exc.ProgrammingError) as e:
conn.rollback()
# someone already owns this changesource.
raise ChangeSourceAlreadyClaimedError from e
return self.db.pool.do(thd)
def get_change_source_master(self, changesourceid):
def thd(conn):
q = sa.select(self.db.model.changesource_masters.c.masterid).where(
self.db.model.changesource_masters.c.changesourceid == changesourceid
)
r = conn.execute(q)
row = r.fetchone()
conn.close()
if row:
return row.masterid
return None
return self.db.pool.do(thd)
@defer.inlineCallbacks
def getChangeSource(self, changesourceid):
cs = yield self.getChangeSources(_changesourceid=changesourceid)
if cs:
return cs[0]
return None
# returns a Deferred that returns a value
def getChangeSources(self, active=None, masterid=None, _changesourceid=None):
def thd(conn):
cs_tbl = self.db.model.changesources
cs_mst_tbl = self.db.model.changesource_masters
# handle the trivial case of masterid=xx and active=False
if masterid is not None and active is not None and not active:
return []
join = cs_tbl.outerjoin(cs_mst_tbl, (cs_tbl.c.id == cs_mst_tbl.c.changesourceid))
# if we're given a _changesourceid, select only that row
wc = None
if _changesourceid:
wc = cs_tbl.c.id == _changesourceid
else:
# otherwise, filter with active, if necessary
if masterid is not None:
wc = cs_mst_tbl.c.masterid == masterid
elif active:
wc = cs_mst_tbl.c.masterid != NULL
elif active is not None:
wc = cs_mst_tbl.c.masterid == NULL
q = sa.select(
cs_tbl.c.id,
cs_tbl.c.name,
cs_mst_tbl.c.masterid,
).select_from(join)
if wc is not None:
q = q.where(wc)
return [self._model_from_row(row) for row in conn.execute(q).fetchall()]
return self.db.pool.do(thd)
def _model_from_row(self, row):
return ChangeSourceModel(id=row.id, name=row.name, masterid=row.masterid)
| 5,190 | Python | .py | 122 | 32.385246 | 93 | 0.621949 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,760 | tags.py | buildbot_buildbot/master/buildbot/db/tags.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from buildbot.db import base
from buildbot.util.sautils import hash_columns
class TagsConnectorComponent(base.DBConnectorComponent):
def findTagId(self, name):
tbl = self.db.model.tags
name_hash = hash_columns(name)
return self.findSomethingId(
tbl=tbl,
whereclause=(tbl.c.name_hash == name_hash),
insert_values={"name": name, "name_hash": name_hash},
)
| 1,135 | Python | .py | 25 | 41.36 | 79 | 0.740506 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,761 | dbconfig.py | buildbot_buildbot/master/buildbot/db/dbconfig.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from contextlib import contextmanager
from sqlalchemy.exc import OperationalError
from sqlalchemy.exc import ProgrammingError
from buildbot.config.master import MasterConfig
from buildbot.db import enginestrategy
from buildbot.db import model
from buildbot.db import state
class FakeDBConnector:
def __init__(self, engine):
self.pool = FakePool(engine)
self.master = FakeMaster()
self.model = model.Model(self)
self.state = state.StateConnectorComponent(self)
@contextmanager
def connect(self):
try:
with self.pool.engine.connect() as conn:
yield conn
finally:
self.pool.engine.dispose()
class FakeCacheManager:
def get_cache(self, cache_name, miss_fn):
return None
class FakeMaster:
def __init__(self):
self.caches = FakeCacheManager()
class FakePool:
def __init__(self, engine):
self.engine = engine
class DbConfig:
def __init__(self, BuildmasterConfig, basedir, name="config"):
self.db_url = MasterConfig.getDbUrlFromConfig(BuildmasterConfig, throwErrors=False)
self.basedir = basedir
self.name = name
def getDb(self):
try:
db = FakeDBConnector(
engine=enginestrategy.create_engine(self.db_url, basedir=self.basedir)
)
except Exception:
# db_url is probably trash. Just ignore, config.py db part will
# create proper message
return None
with db.connect() as conn:
try:
self.objectid = db.state.thdGetObjectId(conn, self.name, "DbConfig")['id']
except (ProgrammingError, OperationalError):
conn.rollback()
# ProgrammingError: mysql&pg, OperationalError: sqlite
# assume db is not initialized
return None
return db
def get(self, name, default=state.StateConnectorComponent.Thunk):
db = self.getDb()
if db is not None:
with db.connect() as conn:
ret = db.state.thdGetState(conn, self.objectid, name, default=default)
else:
if default is not state.StateConnectorComponent.Thunk:
return default
raise KeyError("Db not yet initialized")
return ret
def set(self, name, value):
db = self.getDb()
if db is not None:
with db.connect() as conn:
db.state.thdSetState(conn, self.objectid, name, value)
| 3,249 | Python | .py | 81 | 32.296296 | 91 | 0.668043 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,762 | workers.py | buildbot_buildbot/master/buildbot/db/workers.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import annotations
from dataclasses import dataclass
from dataclasses import field
from typing import TYPE_CHECKING
import sqlalchemy as sa
from twisted.internet import defer
from buildbot.db import base
from buildbot.util import identifiers
from buildbot.warnings import warn_deprecated
if TYPE_CHECKING:
from typing import Any
@dataclass
class BuilderMasterModel:
builderid: int
masterid: int
# For backward compatibility
def __getitem__(self, key: str):
warn_deprecated(
'4.1.0',
(
'WorkersConnectorComponent '
'getWorker, and getWorkers '
'no longer return Worker as dictionnaries. '
'Usage of [] accessor is deprecated: please access the member directly'
),
)
if hasattr(self, key):
return getattr(self, key)
raise KeyError(key)
@dataclass
class WorkerModel:
id: int
name: str
workerinfo: dict[str, Any]
paused: bool = False
pause_reason: str | None = None
graceful: bool = False
configured_on: list[BuilderMasterModel] = field(default_factory=list)
connected_to: list[int] = field(default_factory=list)
# For backward compatibility
def __getitem__(self, key: str):
warn_deprecated(
'4.1.0',
(
'WorkersConnectorComponent '
'getWorker, and getWorkers '
'no longer return Worker as dictionnaries. '
'Usage of [] accessor is deprecated: please access the member directly'
),
)
if hasattr(self, key):
return getattr(self, key)
raise KeyError(key)
class WorkersConnectorComponent(base.DBConnectorComponent):
def findWorkerId(self, name):
tbl = self.db.model.workers
# callers should verify this and give good user error messages
assert identifiers.isIdentifier(50, name)
return self.findSomethingId(
tbl=tbl,
whereclause=(tbl.c.name == name),
insert_values={
"name": name,
"info": {},
"paused": 0,
"pause_reason": None,
"graceful": 0,
},
)
def _deleteFromConfiguredWorkers_thd(self, conn, buildermasterids, workerid=None):
cfg_tbl = self.db.model.configured_workers
# batch deletes to avoid using too many variables
for batch in self.doBatch(buildermasterids, 100):
q = cfg_tbl.delete()
q = q.where(cfg_tbl.c.buildermasterid.in_(batch))
if workerid:
q = q.where(cfg_tbl.c.workerid == workerid)
conn.execute(q).close()
# returns a Deferred which returns None
def deconfigureAllWorkersForMaster(self, masterid):
def thd(conn):
# first remove the old configured buildermasterids for this master and worker
# as sqlalchemy does not support delete with join, we need to do
# that in 2 queries
cfg_tbl = self.db.model.configured_workers
bm_tbl = self.db.model.builder_masters
j = cfg_tbl
j = j.outerjoin(bm_tbl)
q = sa.select(cfg_tbl.c.buildermasterid).select_from(j).distinct()
q = q.where(bm_tbl.c.masterid == masterid)
res = conn.execute(q)
buildermasterids = [row.buildermasterid for row in res]
res.close()
self._deleteFromConfiguredWorkers_thd(conn, buildermasterids)
return self.db.pool.do_with_transaction(thd)
# returns a Deferred that returns None
def workerConfigured(self, workerid, masterid, builderids):
def thd(conn):
cfg_tbl = self.db.model.configured_workers
bm_tbl = self.db.model.builder_masters
# get the buildermasterids that are configured
if builderids:
q = sa.select(bm_tbl.c.id).select_from(bm_tbl)
q = q.where(bm_tbl.c.masterid == masterid)
q = q.where(bm_tbl.c.builderid.in_(builderids))
res = conn.execute(q)
buildermasterids = {row.id for row in res}
res.close()
else:
buildermasterids = set([])
j = cfg_tbl
j = j.outerjoin(bm_tbl)
q = sa.select(cfg_tbl.c.buildermasterid).select_from(j).distinct()
q = q.where(bm_tbl.c.masterid == masterid)
q = q.where(cfg_tbl.c.workerid == workerid)
res = conn.execute(q)
oldbuildermasterids = {row.buildermasterid for row in res}
res.close()
todeletebuildermasterids = oldbuildermasterids - buildermasterids
toinsertbuildermasterids = buildermasterids - oldbuildermasterids
self._deleteFromConfiguredWorkers_thd(conn, todeletebuildermasterids, workerid)
# and insert the new ones
if toinsertbuildermasterids:
q = cfg_tbl.insert()
conn.execute(
q,
[
{'workerid': workerid, 'buildermasterid': buildermasterid}
for buildermasterid in toinsertbuildermasterids
],
).close()
return self.db.pool.do_with_transaction(thd)
@defer.inlineCallbacks
def getWorker(
self,
workerid: int | None = None,
name: str | None = None,
masterid: int | None = None,
builderid: int | None = None,
):
if workerid is None and name is None:
return None
workers = yield self.getWorkers(
_workerid=workerid, _name=name, masterid=masterid, builderid=builderid
)
if workers:
return workers[0]
return None
def getWorkers(
self,
_workerid: int | None = None,
_name: str | None = None,
masterid: int | None = None,
builderid: int | None = None,
paused: bool | None = None,
graceful: bool | None = None,
) -> defer.Deferred[list[WorkerModel]]:
def thd(conn) -> list[WorkerModel]:
workers_tbl = self.db.model.workers
conn_tbl = self.db.model.connected_workers
cfg_tbl = self.db.model.configured_workers
bm_tbl = self.db.model.builder_masters
# first, get the worker itself and the configured_on info
j = workers_tbl
j = j.outerjoin(cfg_tbl)
j = j.outerjoin(bm_tbl)
q = (
sa.select(
workers_tbl.c.id,
workers_tbl.c.name,
workers_tbl.c.info,
workers_tbl.c.paused,
workers_tbl.c.pause_reason,
workers_tbl.c.graceful,
bm_tbl.c.builderid,
bm_tbl.c.masterid,
)
.select_from(j)
.order_by(
workers_tbl.c.id,
)
)
if _workerid is not None:
q = q.where(workers_tbl.c.id == _workerid)
if _name is not None:
q = q.where(workers_tbl.c.name == _name)
if masterid is not None:
q = q.where(bm_tbl.c.masterid == masterid)
if builderid is not None:
q = q.where(bm_tbl.c.builderid == builderid)
if paused is not None:
q = q.where(workers_tbl.c.paused == int(paused))
if graceful is not None:
q = q.where(workers_tbl.c.graceful == int(graceful))
rv: dict[int, WorkerModel] = {}
res = None
lastId = None
for row in conn.execute(q):
if row.id != lastId:
lastId = row.id
res = self._model_from_row(row)
rv[lastId] = res
if row.builderid and row.masterid:
rv[lastId].configured_on.append(
BuilderMasterModel(builderid=row.builderid, masterid=row.masterid)
)
# now go back and get the connection info for the same set of
# workers
j = conn_tbl
if _name is not None:
# note this is not an outer join; if there are unconnected
# workers, they were captured in rv above
j = j.join(workers_tbl)
q = (
sa.select(
conn_tbl.c.workerid,
conn_tbl.c.masterid,
)
.select_from(j)
.order_by(conn_tbl.c.workerid)
.where(conn_tbl.c.workerid.in_(rv.keys()))
)
if _name is not None:
q = q.where(workers_tbl.c.name == _name)
if masterid is not None:
q = q.where(conn_tbl.c.masterid == masterid)
for row in conn.execute(q):
if row.workerid not in rv:
continue
rv[row.workerid].connected_to.append(row.masterid)
return list(rv.values())
return self.db.pool.do(thd)
# returns a Deferred that returns None
def workerConnected(self, workerid, masterid, workerinfo):
def thd(conn):
conn_tbl = self.db.model.connected_workers
q = conn_tbl.insert()
try:
conn.execute(q, {'workerid': workerid, 'masterid': masterid})
conn.commit()
except (sa.exc.IntegrityError, sa.exc.ProgrammingError):
# if the row is already present, silently fail..
conn.rollback()
bs_tbl = self.db.model.workers
q = bs_tbl.update().where(bs_tbl.c.id == workerid)
conn.execute(q.values(info=workerinfo))
conn.commit()
return self.db.pool.do(thd)
# returns a Deferred that returns None
def workerDisconnected(self, workerid, masterid):
def thd(conn):
tbl = self.db.model.connected_workers
q = tbl.delete().where(tbl.c.workerid == workerid, tbl.c.masterid == masterid)
conn.execute(q)
return self.db.pool.do_with_transaction(thd)
# returns a Deferred that returns None
def set_worker_paused(self, workerid, paused, pause_reason=None):
def thd(conn):
tbl = self.db.model.workers
q = tbl.update().where(tbl.c.id == workerid)
conn.execute(q.values(paused=int(paused), pause_reason=pause_reason))
return self.db.pool.do_with_transaction(thd)
# returns a Deferred that returns None
def set_worker_graceful(self, workerid, graceful):
def thd(conn):
tbl = self.db.model.workers
q = tbl.update().where(tbl.c.id == workerid)
conn.execute(q.values(graceful=int(graceful)))
return self.db.pool.do_with_transaction(thd)
def _model_from_row(self, row):
return WorkerModel(
id=row.id,
name=row.name,
workerinfo=row.info,
paused=bool(row.paused),
pause_reason=row.pause_reason,
graceful=bool(row.graceful),
)
| 12,088 | Python | .py | 292 | 29.660959 | 91 | 0.573411 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,763 | schedulers.py | buildbot_buildbot/master/buildbot/db/schedulers.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import annotations
from dataclasses import dataclass
from typing import TYPE_CHECKING
import sqlalchemy as sa
from twisted.internet import defer
from buildbot.db import NULL
from buildbot.db import base
from buildbot.util.sautils import hash_columns
from buildbot.warnings import warn_deprecated
if TYPE_CHECKING:
from typing import Literal
class SchedulerAlreadyClaimedError(Exception):
pass
@dataclass
class SchedulerModel:
id: int
name: str
enabled: bool = True
masterid: int | None = None
# For backward compatibility
def __getitem__(self, key: str):
warn_deprecated(
'4.1.0',
(
'SchedulersConnectorComponent '
'getScheduler, and getSchedulers '
'no longer return Scheduler as dictionnaries. '
'Usage of [] accessor is deprecated: please access the member directly'
),
)
if hasattr(self, key):
return getattr(self, key)
raise KeyError(key)
class SchedulersConnectorComponent(base.DBConnectorComponent):
def enable(self, schedulerid: int, v: bool) -> defer.Deferred[None]:
def thd(conn) -> None:
tbl = self.db.model.schedulers
q = tbl.update().where(tbl.c.id == schedulerid)
conn.execute(q.values(enabled=int(v)))
return self.db.pool.do_with_transaction(thd)
def classifyChanges(
self, schedulerid: int, classifications: dict[int, bool]
) -> defer.Deferred[None]:
def thd(conn) -> None:
tbl = self.db.model.scheduler_changes
for changeid, important in classifications.items():
# convert the 'important' value into an integer, since that
# is the column type
imp_int = int(bool(important))
self.db.upsert(
conn,
tbl,
where_values=(
(tbl.c.schedulerid, schedulerid),
(tbl.c.changeid, changeid),
),
update_values=((tbl.c.important, imp_int),),
_race_hook=None,
)
conn.commit()
return self.db.pool.do(thd)
def flushChangeClassifications(
self, schedulerid: int, less_than: int | None = None
) -> defer.Deferred[None]:
def thd(conn) -> None:
sch_ch_tbl = self.db.model.scheduler_changes
wc = sch_ch_tbl.c.schedulerid == schedulerid
if less_than is not None:
wc = wc & (sch_ch_tbl.c.changeid < less_than)
q = sch_ch_tbl.delete().where(wc)
conn.execute(q).close()
return self.db.pool.do_with_transaction(thd)
def getChangeClassifications(
self,
schedulerid: int,
branch: str | None | Literal[-1] = -1,
repository: str | None | Literal[-1] = -1,
project: str | None | Literal[-1] = -1,
codebase: str | None | Literal[-1] = -1,
) -> defer.Deferred[dict[int, bool]]:
# -1 here stands for "argument not given", since None has meaning
# as a branch
def thd(conn) -> dict[int, bool]:
sch_ch_tbl = self.db.model.scheduler_changes
ch_tbl = self.db.model.changes
wc = sch_ch_tbl.c.schedulerid == schedulerid
# may need to filter further based on branch, etc
extra_wheres = []
if branch != -1:
extra_wheres.append(ch_tbl.c.branch == branch)
if repository != -1:
extra_wheres.append(ch_tbl.c.repository == repository)
if project != -1:
extra_wheres.append(ch_tbl.c.project == project)
if codebase != -1:
extra_wheres.append(ch_tbl.c.codebase == codebase)
# if we need to filter further append those, as well as a join
# on changeid (but just once for that one)
if extra_wheres:
wc &= sch_ch_tbl.c.changeid == ch_tbl.c.changeid
for w in extra_wheres:
wc &= w
q = sa.select(sch_ch_tbl.c.changeid, sch_ch_tbl.c.important).where(wc)
return {r.changeid: bool(r.important) for r in conn.execute(q)}
return self.db.pool.do(thd)
def findSchedulerId(self, name: str) -> int:
tbl = self.db.model.schedulers
name_hash = hash_columns(name)
return self.findSomethingId(
tbl=tbl,
whereclause=(tbl.c.name_hash == name_hash),
insert_values={"name": name, "name_hash": name_hash},
)
def setSchedulerMaster(self, schedulerid: int, masterid: int | None) -> defer.Deferred[None]:
def thd(conn) -> None:
sch_mst_tbl = self.db.model.scheduler_masters
# handle the masterid=None case to get it out of the way
if masterid is None:
q = sch_mst_tbl.delete().where(sch_mst_tbl.c.schedulerid == schedulerid)
conn.execute(q).close()
conn.commit()
return None
# try a blind insert..
try:
q = sch_mst_tbl.insert()
conn.execute(q, {"schedulerid": schedulerid, "masterid": masterid}).close()
conn.commit()
except (sa.exc.IntegrityError, sa.exc.ProgrammingError) as e:
conn.rollback()
# someone already owns this scheduler, but who?
join = self.db.model.masters.outerjoin(
sch_mst_tbl, (self.db.model.masters.c.id == sch_mst_tbl.c.masterid)
)
q = (
sa.select(
self.db.model.masters.c.name,
sch_mst_tbl.c.masterid,
)
.select_from(join)
.where(sch_mst_tbl.c.schedulerid == schedulerid)
)
row = conn.execute(q).fetchone()
# ok, that was us, so we just do nothing
if row.masterid == masterid:
return None
raise SchedulerAlreadyClaimedError(f"already claimed by {row.name}") from e
return None
return self.db.pool.do(thd)
def get_scheduler_master(self, schedulerid):
def thd(conn):
q = sa.select(self.db.model.scheduler_masters.c.masterid).where(
self.db.model.scheduler_masters.c.schedulerid == schedulerid
)
r = conn.execute(q)
row = r.fetchone()
conn.close()
if row:
return row.masterid
return None
return self.db.pool.do(thd)
@defer.inlineCallbacks
def getScheduler(self, schedulerid: int):
sch = yield self.getSchedulers(_schedulerid=schedulerid)
if sch:
return sch[0]
return None
def getSchedulers(
self,
active: bool | None = None,
masterid: int | None = None,
_schedulerid: int | None = None,
) -> defer.Deferred[list[SchedulerModel]]:
def thd(conn) -> list[SchedulerModel]:
sch_tbl = self.db.model.schedulers
sch_mst_tbl = self.db.model.scheduler_masters
# handle the trivial case of masterid=xx and active=False
if masterid is not None and active is not None and not active:
return []
join = sch_tbl.outerjoin(sch_mst_tbl, (sch_tbl.c.id == sch_mst_tbl.c.schedulerid))
# if we're given a _schedulerid, select only that row
wc = None
if _schedulerid:
wc = sch_tbl.c.id == _schedulerid
else:
# otherwise, filter with active, if necessary
if masterid is not None:
wc = sch_mst_tbl.c.masterid == masterid
elif active:
wc = sch_mst_tbl.c.masterid != NULL
elif active is not None:
wc = sch_mst_tbl.c.masterid == NULL
q = sa.select(
sch_tbl.c.id,
sch_tbl.c.name,
sch_tbl.c.enabled,
sch_mst_tbl.c.masterid,
).select_from(join)
if wc is not None:
q = q.where(wc)
return [self._model_from_row(row) for row in conn.execute(q).fetchall()]
return self.db.pool.do(thd)
def _model_from_row(self, row):
return SchedulerModel(
id=row.id,
name=row.name,
enabled=bool(row.enabled),
masterid=row.masterid,
)
| 9,452 | Python | .py | 222 | 30.81982 | 97 | 0.569454 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,764 | logs.py | buildbot_buildbot/master/buildbot/db/logs.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import annotations
import dataclasses
import os
from typing import TYPE_CHECKING
import sqlalchemy as sa
from twisted.internet import defer
from twisted.internet import threads
from twisted.python import log
from twisted.python import threadpool
from buildbot.db import base
from buildbot.db.compression import BrotliCompressor
from buildbot.db.compression import BZipCompressor
from buildbot.db.compression import CompressorInterface
from buildbot.db.compression import GZipCompressor
from buildbot.db.compression import LZ4Compressor
from buildbot.db.compression import ZStdCompressor
from buildbot.db.compression.protocol import CompressObjInterface
from buildbot.util.twisted import async_to_deferred
from buildbot.warnings import warn_deprecated
if TYPE_CHECKING:
from typing import Literal
from sqlalchemy.engine import Connection as SAConnection
LogType = Literal['s', 't', 'h', 'd']
class LogSlugExistsError(KeyError):
pass
class LogCompressionFormatUnavailableError(LookupError):
pass
@dataclasses.dataclass
class LogModel:
id: int
name: str
slug: str
stepid: int
complete: bool
num_lines: int
type: LogType
# For backward compatibility
def __getitem__(self, key: str):
warn_deprecated(
'4.1.0',
(
'LogsConnectorComponent '
'getLog, getLogBySlug, and getLogs '
'no longer return Log as dictionnaries. '
'Usage of [] accessor is deprecated: please access the member directly'
),
)
if hasattr(self, key):
return getattr(self, key)
raise KeyError(key)
class RawCompressor(CompressorInterface):
name = "raw"
@staticmethod
def dumps(data: bytes) -> bytes:
return data
@staticmethod
def read(data: bytes) -> bytes:
return data
class CompressObj(CompressObjInterface):
def compress(self, data: bytes) -> bytes:
return data
def flush(self) -> bytes:
return b''
class LogsConnectorComponent(base.DBConnectorComponent):
# Postgres and MySQL will both allow bigger sizes than this. The limit
# for MySQL appears to be max_packet_size (default 1M).
# note that MAX_CHUNK_SIZE is equal to BUFFER_SIZE in buildbot_worker.runprocess
MAX_CHUNK_SIZE = 65536 # a chunk may not be bigger than this
MAX_CHUNK_LINES = 1000 # a chunk may not have more lines than this
NO_COMPRESSION_ID = 0
COMPRESSION_BYID: dict[int, type[CompressorInterface]] = {
NO_COMPRESSION_ID: RawCompressor,
1: GZipCompressor,
2: BZipCompressor,
3: LZ4Compressor,
4: ZStdCompressor,
5: BrotliCompressor,
}
COMPRESSION_MODE = {
compressor.name: (compressor_id, compressor)
for compressor_id, compressor in COMPRESSION_BYID.items()
}
def __init__(self, connector: base.DBConnector):
super().__init__(connector)
max_threads = 1
if cpu_count := os.cpu_count():
# use at most half cpu available to avoid oversubscribing
# the master on other processes
max_threads = max(int(cpu_count / 2), max_threads)
self._compression_pool = threadpool.ThreadPool(
minthreads=1,
maxthreads=max_threads,
name='DBLogCompression',
)
self._start_compression_pool()
def _start_compression_pool(self) -> None:
# keep a ref on the reactor used to start
# so we can schedule shutdown even if
# DBConnector was un-parented before
_reactor = self.master.reactor
def _start():
self._compression_pool.start()
_reactor.addSystemEventTrigger(
'during',
'shutdown',
self._compression_pool.stop,
)
_reactor.callWhenRunning(_start)
def _get_compressor(self, compressor_id: int) -> type[CompressorInterface]:
compressor = self.COMPRESSION_BYID.get(compressor_id)
if compressor is None:
msg = f"Unknown compression method ID {compressor_id}"
raise LogCompressionFormatUnavailableError(msg)
if not compressor.available:
msg = (
f"Log compression method {compressor.name} is not available. "
"You might be missing a dependency."
)
raise LogCompressionFormatUnavailableError(msg)
return compressor
def _getLog(self, whereclause) -> defer.Deferred[LogModel | None]:
def thd_getLog(conn) -> LogModel | None:
q = self.db.model.logs.select()
if whereclause is not None:
q = q.where(whereclause)
res = conn.execute(q).mappings()
row = res.fetchone()
rv = None
if row:
rv = self._model_from_row(row)
res.close()
return rv
return self.db.pool.do(thd_getLog)
def getLog(self, logid: int) -> defer.Deferred[LogModel | None]:
return self._getLog(self.db.model.logs.c.id == logid)
def getLogBySlug(self, stepid: int, slug: str) -> defer.Deferred[LogModel | None]:
tbl = self.db.model.logs
return self._getLog((tbl.c.slug == slug) & (tbl.c.stepid == stepid))
def getLogs(self, stepid: int | None = None) -> defer.Deferred[list[LogModel]]:
def thdGetLogs(conn) -> list[LogModel]:
tbl = self.db.model.logs
q = tbl.select()
if stepid is not None:
q = q.where(tbl.c.stepid == stepid)
q = q.order_by(tbl.c.id)
res = conn.execute(q).mappings()
return [self._model_from_row(row) for row in res.fetchall()]
return self.db.pool.do(thdGetLogs)
def getLogLines(self, logid: int, first_line: int, last_line: int) -> defer.Deferred[str]:
def thdGetLogLines(conn) -> str:
# get a set of chunks that completely cover the requested range
tbl = self.db.model.logchunks
q = sa.select(tbl.c.first_line, tbl.c.last_line, tbl.c.content, tbl.c.compressed)
q = q.where(tbl.c.logid == logid)
q = q.where(tbl.c.first_line <= last_line)
q = q.where(tbl.c.last_line >= first_line)
q = q.order_by(tbl.c.first_line)
rv = []
for row in conn.execute(q):
# Retrieve associated "reader" and extract the data
# Note that row.content is stored as bytes, and our caller expects unicode
data = self._get_compressor(row.compressed).read(row.content)
content = data.decode('utf-8')
if row.first_line < first_line:
idx = -1
count = first_line - row.first_line
for _ in range(count):
idx = content.index('\n', idx + 1)
content = content[idx + 1 :]
if row.last_line > last_line:
idx = len(content) + 1
count = row.last_line - last_line
for _ in range(count):
idx = content.rindex('\n', 0, idx)
content = content[:idx]
rv.append(content)
return '\n'.join(rv) + '\n' if rv else ''
return self.db.pool.do(thdGetLogLines)
def addLog(self, stepid: int, name: str, slug: str, type: LogType) -> defer.Deferred[int]:
assert type in 'tsh', "Log type must be one of t, s, or h"
def thdAddLog(conn) -> int:
try:
r = conn.execute(
self.db.model.logs.insert(),
{
"name": name,
"slug": slug,
"stepid": stepid,
"complete": 0,
"num_lines": 0,
"type": type,
},
)
conn.commit()
return r.inserted_primary_key[0]
except (sa.exc.IntegrityError, sa.exc.ProgrammingError) as e:
conn.rollback()
raise LogSlugExistsError(
f"log with slug '{slug!r}' already exists in this step"
) from e
return self.db.pool.do(thdAddLog)
def _get_configured_compressor(self) -> tuple[int, type[CompressorInterface]]:
compress_method: str = self.master.config.logCompressionMethod
return self.COMPRESSION_MODE.get(compress_method, (self.NO_COMPRESSION_ID, RawCompressor))
def thdCompressChunk(self, chunk: bytes) -> tuple[bytes, int]:
compressed_id, compressor = self._get_configured_compressor()
compressed_chunk = compressor.dumps(chunk)
# Is it useful to compress the chunk?
if len(chunk) <= len(compressed_chunk):
return chunk, self.NO_COMPRESSION_ID
return compressed_chunk, compressed_id
def thdSplitAndAppendChunk(
self, conn, logid: int, content: bytes, first_line: int
) -> tuple[int, int]:
# Break the content up into chunks. This takes advantage of the
# fact that no character but u'\n' maps to b'\n' in UTF-8.
remaining: bytes | None = content
chunk_first_line = last_line = first_line
while remaining:
chunk, remaining = self._splitBigChunk(remaining, logid)
last_line = chunk_first_line + chunk.count(b'\n')
chunk, compressed_id = self.thdCompressChunk(chunk)
res = conn.execute(
self.db.model.logchunks.insert(),
{
"logid": logid,
"first_line": chunk_first_line,
"last_line": last_line,
"content": chunk,
"compressed": compressed_id,
},
)
conn.commit()
res.close()
chunk_first_line = last_line + 1
res = conn.execute(
self.db.model.logs.update()
.where(self.db.model.logs.c.id == logid)
.values(num_lines=last_line + 1)
)
conn.commit()
res.close()
return first_line, last_line
def thdAppendLog(self, conn, logid: int, content: str) -> tuple[int, int] | None:
# check for trailing newline and strip it for storage -- chunks omit
# the trailing newline
assert content[-1] == '\n'
# Note that row.content is stored as bytes, and our caller is sending unicode
content_bytes = content[:-1].encode('utf-8')
q = sa.select(self.db.model.logs.c.num_lines)
q = q.where(self.db.model.logs.c.id == logid)
res = conn.execute(q)
num_lines = res.fetchone()
res.close()
if not num_lines:
return None # ignore a missing log
return self.thdSplitAndAppendChunk(
conn=conn, logid=logid, content=content_bytes, first_line=num_lines[0]
)
def appendLog(self, logid, content) -> defer.Deferred[tuple[int, int] | None]:
def thdappendLog(conn) -> tuple[int, int] | None:
return self.thdAppendLog(conn, logid, content)
return self.db.pool.do(thdappendLog)
def _splitBigChunk(self, content: bytes, logid: int) -> tuple[bytes, bytes | None]:
"""
Split CONTENT on a line boundary into a prefix smaller than 64k and
a suffix containing the remainder, omitting the splitting newline.
"""
# if it's small enough, just return it
if len(content) < self.MAX_CHUNK_SIZE:
return content, None
# find the last newline before the limit
i = content.rfind(b'\n', 0, self.MAX_CHUNK_SIZE)
if i != -1:
return content[:i], content[i + 1 :]
log.msg(f'truncating long line for log {logid}')
# first, truncate this down to something that decodes correctly
truncline = content[: self.MAX_CHUNK_SIZE]
while truncline:
try:
truncline.decode('utf-8')
break
except UnicodeDecodeError:
truncline = truncline[:-1]
# then find the beginning of the next line
i = content.find(b'\n', self.MAX_CHUNK_SIZE)
if i == -1:
return truncline, None
return truncline, content[i + 1 :]
def finishLog(self, logid: int) -> defer.Deferred[None]:
def thdfinishLog(conn) -> None:
tbl = self.db.model.logs
q = tbl.update().where(tbl.c.id == logid)
conn.execute(q.values(complete=1))
return self.db.pool.do_with_transaction(thdfinishLog)
@async_to_deferred
async def compressLog(self, logid: int, force: bool = False) -> int:
"""
returns the size (in bytes) saved.
"""
tbl = self.db.model.logchunks
def _thd_gather_chunks_to_process(conn: SAConnection) -> list[tuple[int, int]]:
"""
returns the total size of chunks and a list of chunks to group.
chunks list is empty if not force, and no chunks would be grouped.
"""
q = (
sa.select(
tbl.c.first_line,
tbl.c.last_line,
sa.func.length(tbl.c.content),
)
.where(tbl.c.logid == logid)
.order_by(tbl.c.first_line)
)
rows = conn.execute(q)
# get the first chunk to seed new_chunks list
first_chunk = next(rows, None)
if first_chunk is None:
# no chunks in log, early out
return []
grouped_chunks: list[tuple[int, int]] = [
(first_chunk.first_line, first_chunk.last_line)
]
# keep track of how many chunks we use now
# to compare with grouped chunks and
# see if we need to do some work
# start at 1 since we already queries one above
current_chunk_count = 1
current_group_new_size = first_chunk.length_1
# first pass, we fetch the full list of chunks (without content) and find out
# the chunk groups which could use some gathering.
for row in rows:
current_chunk_count += 1
chunk_first_line: int = row.first_line
chunk_last_line: int = row.last_line
chunk_size: int = row.length_1
group_first_line, _group_last_line = grouped_chunks[-1]
can_merge_chunks = (
# note that we count the compressed size for efficiency reason
# unlike to the on-the-flow chunk splitter
current_group_new_size + chunk_size <= self.MAX_CHUNK_SIZE
and (chunk_last_line - group_first_line) <= self.MAX_CHUNK_LINES
)
if can_merge_chunks:
# merge chunks, since we ordered the query by 'first_line'
# and we assume that chunks are contiguous, it's pretty easy
grouped_chunks[-1] = (group_first_line, chunk_last_line)
current_group_new_size += chunk_size
else:
grouped_chunks.append((chunk_first_line, chunk_last_line))
current_group_new_size = chunk_size
rows.close()
if not force and current_chunk_count <= len(grouped_chunks):
return []
return grouped_chunks
def _thd_get_chunks_content(
conn: SAConnection,
first_line: int,
last_line: int,
) -> list[tuple[int, bytes]]:
q = (
sa.select(tbl.c.content, tbl.c.compressed)
.where(tbl.c.logid == logid)
.where(tbl.c.first_line >= first_line)
.where(tbl.c.last_line <= last_line)
.order_by(tbl.c.first_line)
)
rows = conn.execute(q)
content = [(row.compressed, row.content) for row in rows]
rows.close()
return content
def _thd_replace_chunks_by_new_grouped_chunk(
conn: SAConnection,
first_line: int,
last_line: int,
new_compressed_id: int,
new_content: bytes,
) -> None:
# Transaction is necessary so that readers don't see disappeared chunks
with conn.begin():
# we remove the chunks that we are compressing
deletion_query = (
tbl.delete()
.where(tbl.c.logid == logid)
.where(tbl.c.first_line >= first_line)
.where(tbl.c.last_line <= last_line)
)
conn.execute(deletion_query).close()
# and we recompress them in one big chunk
conn.execute(
tbl.insert(),
{
"logid": logid,
"first_line": first_line,
"last_line": last_line,
"content": new_content,
"compressed": new_compressed_id,
},
).close()
conn.commit()
def _thd_recompress_chunks(
compressed_chunks: list[tuple[int, bytes]],
compress_obj: CompressObjInterface,
) -> tuple[bytes, int]:
"""This has to run in the compression thread pool"""
# decompress this group of chunks. Note that the content is binary bytes.
# no need to decode anything as we are going to put in back stored as bytes anyway
chunks: list[bytes] = []
bytes_saved = 0
for idx, (chunk_compress_id, chunk_content) in enumerate(compressed_chunks):
bytes_saved += len(chunk_content)
# trailing line-ending is stripped from chunks
# need to add it back, except for the last one
if idx != 0:
chunks.append(compress_obj.compress(b'\n'))
uncompressed_content = self._get_compressor(chunk_compress_id).read(chunk_content)
chunks.append(compress_obj.compress(uncompressed_content))
chunks.append(compress_obj.flush())
new_content = b''.join(chunks)
bytes_saved -= len(new_content)
return new_content, bytes_saved
chunk_groups = await self.db.pool.do(_thd_gather_chunks_to_process)
if not chunk_groups:
return 0
total_bytes_saved: int = 0
compressed_id, compressor = self._get_configured_compressor()
compress_obj = compressor.CompressObj()
for group_first_line, group_last_line in chunk_groups:
compressed_chunks = await self.db.pool.do(
_thd_get_chunks_content,
first_line=group_first_line,
last_line=group_last_line,
)
new_content, bytes_saved = await threads.deferToThreadPool(
self.master.reactor,
self._compression_pool,
_thd_recompress_chunks,
compressed_chunks=compressed_chunks,
compress_obj=compress_obj,
)
total_bytes_saved += bytes_saved
await self.db.pool.do(
_thd_replace_chunks_by_new_grouped_chunk,
first_line=group_first_line,
last_line=group_last_line,
new_compressed_id=compressed_id,
new_content=new_content,
)
return total_bytes_saved
def deleteOldLogChunks(self, older_than_timestamp: int) -> defer.Deferred[int]:
def thddeleteOldLogs(conn) -> int:
model = self.db.model
res = conn.execute(sa.select(sa.func.count(model.logchunks.c.logid)))
count1 = res.fetchone()[0]
res.close()
# update log types older than timestamps
# we do it first to avoid having UI discrepancy
# N.B.: we utilize the fact that steps.id is auto-increment, thus steps.started_at
# times are effectively sorted and we only need to find the steps.id at the upper
# bound of steps to update.
# SELECT steps.id from steps WHERE steps.started_at < older_than_timestamp ORDER BY
# steps.id DESC LIMIT 1;
res = conn.execute(
sa.select(model.steps.c.id)
.where(model.steps.c.started_at < older_than_timestamp)
.order_by(model.steps.c.id.desc())
.limit(1)
)
res_list = res.fetchone()
stepid_max = None
if res_list:
stepid_max = res_list[0]
res.close()
# UPDATE logs SET logs.type = 'd' WHERE logs.stepid <= stepid_max AND type != 'd';
if stepid_max:
res = conn.execute(
model.logs.update()
.where(sa.and_(model.logs.c.stepid <= stepid_max, model.logs.c.type != 'd'))
.values(type='d')
)
conn.commit()
res.close()
# query all logs with type 'd' and delete their chunks.
if self.db._engine.dialect.name == 'sqlite':
# sqlite does not support delete with a join, so for this case we use a subquery,
# which is much slower
q = sa.select(model.logs.c.id)
q = q.select_from(model.logs)
q = q.where(model.logs.c.type == 'd')
# delete their logchunks
q = model.logchunks.delete().where(model.logchunks.c.logid.in_(q))
else:
q = model.logchunks.delete()
q = q.where(model.logs.c.id == model.logchunks.c.logid)
q = q.where(model.logs.c.type == 'd')
res = conn.execute(q)
conn.commit()
res.close()
res = conn.execute(sa.select(sa.func.count(model.logchunks.c.logid)))
count2 = res.fetchone()[0]
res.close()
return count1 - count2
return self.db.pool.do(thddeleteOldLogs)
def _model_from_row(self, row):
return LogModel(
id=row.id,
name=row.name,
slug=row.slug,
stepid=row.stepid,
complete=bool(row.complete),
num_lines=row.num_lines,
type=row.type,
)
| 23,552 | Python | .py | 528 | 32.318182 | 98 | 0.570494 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,765 | projects.py | buildbot_buildbot/master/buildbot/db/projects.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import annotations
from dataclasses import dataclass
from twisted.internet import defer
from buildbot.db import base
from buildbot.util.sautils import hash_columns
from buildbot.warnings import warn_deprecated
@dataclass
class ProjectModel:
id: int
name: str
slug: str
description: str | None
description_format: str | None
description_html: str | None
# For backward compatibility
def __getitem__(self, key: str):
warn_deprecated(
'4.1.0',
(
'ProjectsConnectorComponent '
'get_project, get_projects, and get_active_projects '
'no longer return Project as dictionnaries. '
'Usage of [] accessor is deprecated: please access the member directly'
),
)
if hasattr(self, key):
return getattr(self, key)
raise KeyError(key)
class ProjectsConnectorComponent(base.DBConnectorComponent):
def find_project_id(self, name: str, auto_create: bool = True) -> defer.Deferred[int | None]:
name_hash = hash_columns(name)
return self.findSomethingId(
tbl=self.db.model.projects,
whereclause=(self.db.model.projects.c.name_hash == name_hash),
insert_values={
"name": name,
"slug": name,
"name_hash": name_hash,
},
autoCreate=auto_create,
)
def get_project(self, projectid: int) -> defer.Deferred[ProjectModel | None]:
def thd(conn) -> ProjectModel | None:
q = self.db.model.projects.select().where(
self.db.model.projects.c.id == projectid,
)
res = conn.execute(q)
row = res.fetchone()
rv = None
if row:
rv = self._model_from_row(row)
res.close()
return rv
return self.db.pool.do(thd)
def get_projects(self) -> defer.Deferred[list[ProjectModel]]:
def thd(conn) -> list[ProjectModel]:
tbl = self.db.model.projects
q = tbl.select()
q = q.order_by(tbl.c.name)
res = conn.execute(q)
return [self._model_from_row(row) for row in res.fetchall()]
return self.db.pool.do(thd)
def get_active_projects(self) -> defer.Deferred[list[ProjectModel]]:
def thd(conn) -> list[ProjectModel]:
projects_tbl = self.db.model.projects
builders_tbl = self.db.model.builders
bm_tbl = self.db.model.builder_masters
q = projects_tbl.select().join(builders_tbl).join(bm_tbl).order_by(projects_tbl.c.name)
res = conn.execute(q)
return [self._model_from_row(row) for row in res.fetchall()]
return self.db.pool.do(thd)
# returns a Deferred that returns a value
def update_project_info(
self,
projectid: int,
slug: str,
description: str | None,
description_format: str | None,
description_html: str | None,
) -> defer.Deferred[None]:
def thd(conn) -> None:
q = self.db.model.projects.update().where(self.db.model.projects.c.id == projectid)
conn.execute(
q.values(
slug=slug,
description=description,
description_format=description_format,
description_html=description_html,
)
).close()
return self.db.pool.do_with_transaction(thd)
def _model_from_row(self, row):
return ProjectModel(
id=row.id,
name=row.name,
slug=row.slug,
description=row.description,
description_format=row.description_format,
description_html=row.description_html,
)
| 4,580 | Python | .py | 114 | 30.666667 | 99 | 0.613861 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,766 | masters.py | buildbot_buildbot/master/buildbot/db/masters.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import annotations
import dataclasses
from typing import TYPE_CHECKING
import sqlalchemy as sa
from twisted.python import deprecate
from twisted.python import versions
from buildbot.db import base
from buildbot.util import epoch2datetime
from buildbot.util.sautils import hash_columns
from buildbot.warnings import warn_deprecated
if TYPE_CHECKING:
import datetime
from twisted.internet import defer
@dataclasses.dataclass
class MasterModel:
id: int
name: str
active: bool
last_active: datetime.datetime
# For backward compatibility
def __getitem__(self, key: str):
warn_deprecated(
'4.1.0',
(
'MastersConnectorComponent '
'getMaster, and getMasters '
'no longer return Master as dictionnaries. '
'Usage of [] accessor is deprecated: please access the member directly'
),
)
if hasattr(self, key):
return getattr(self, key)
raise KeyError(key)
@deprecate.deprecated(versions.Version("buildbot", 4, 1, 0), MasterModel)
class MasterDict(dict):
pass
class MastersConnectorComponent(base.DBConnectorComponent):
data2db = {"masterid": "id", "link": "id"}
def findMasterId(self, name: str) -> defer.Deferred[int]:
tbl = self.db.model.masters
name_hash = hash_columns(name)
return self.findSomethingId(
tbl=tbl,
whereclause=(tbl.c.name_hash == name_hash),
insert_values={
"name": name,
"name_hash": name_hash,
"active": 0, # initially inactive
"last_active": int(self.master.reactor.seconds()),
},
)
def setMasterState(self, masterid: int, active: bool) -> defer.Deferred[bool]:
def thd(conn) -> bool:
tbl = self.db.model.masters
whereclause = tbl.c.id == masterid
# get the old state
r = conn.execute(sa.select(tbl.c.active).where(whereclause))
rows = r.fetchall()
r.close()
if not rows:
return False # can't change a row that doesn't exist..
was_active = bool(rows[0].active)
if not active:
# if we're marking inactive, then delete any links to this
# master
sch_mst_tbl = self.db.model.scheduler_masters
q = sch_mst_tbl.delete().where(sch_mst_tbl.c.masterid == masterid)
conn.execute(q)
conn.commit()
# set the state (unconditionally, just to be safe)
q = tbl.update().where(whereclause)
q = q.values(active=1 if active else 0)
if active:
q = q.values(last_active=int(self.master.reactor.seconds()))
conn.execute(q)
conn.commit()
# return True if there was a change in state
return was_active != bool(active)
return self.db.pool.do(thd)
def getMaster(self, masterid: int) -> defer.Deferred[MasterModel | None]:
def thd(conn) -> MasterModel | None:
tbl = self.db.model.masters
res = conn.execute(tbl.select().where(tbl.c.id == masterid))
row = res.fetchone()
rv = None
if row:
rv = self._model_from_row(row)
res.close()
return rv
return self.db.pool.do(thd)
def getMasters(self) -> defer.Deferred[list[MasterModel]]:
def thd(conn) -> list[MasterModel]:
tbl = self.db.model.masters
return [self._model_from_row(row) for row in conn.execute(tbl.select()).fetchall()]
return self.db.pool.do(thd)
def setAllMastersActiveLongTimeAgo(self) -> defer.Deferred[None]:
def thd(conn) -> None:
tbl = self.db.model.masters
q = tbl.update().values(active=1, last_active=0)
conn.execute(q)
return self.db.pool.do(thd)
def _model_from_row(self, row):
return MasterModel(
id=row.id,
name=row.name,
active=bool(row.active),
last_active=epoch2datetime(row.last_active),
)
| 4,972 | Python | .py | 122 | 31.540984 | 95 | 0.61971 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,767 | __init__.py | buildbot_buildbot/master/buildbot/db/__init__.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
# a NULL constant to use in sqlalchemy whereclauses e.g. (tbl.c.results == NULL)
# so that pep8 is happy
NULL: None = None
| 829 | Python | .py | 17 | 47.705882 | 80 | 0.780518 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,768 | builders.py | buildbot_buildbot/master/buildbot/db/builders.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import annotations
from collections import defaultdict
from dataclasses import dataclass
from dataclasses import field
import sqlalchemy as sa
from twisted.internet import defer
from buildbot.db import base
from buildbot.util.sautils import hash_columns
from buildbot.warnings import warn_deprecated
@dataclass
class BuilderModel:
id: int
name: str
description: str | None = None
description_format: str | None = None
description_html: str | None = None
projectid: int | None = None
tags: list[str] = field(default_factory=list)
masterids: list[int] = field(default_factory=list)
# For backward compatibility
def __getitem__(self, key: str):
warn_deprecated(
'4.1.0',
(
'BuildersConnectorComponent getBuilder and getBuilders '
'no longer return Builder as dictionnaries. '
'Usage of [] accessor is deprecated: please access the member directly'
),
)
if hasattr(self, key):
return getattr(self, key)
raise KeyError(key)
class BuildersConnectorComponent(base.DBConnectorComponent):
def findBuilderId(self, name, autoCreate=True):
tbl = self.db.model.builders
name_hash = hash_columns(name)
return self.findSomethingId(
tbl=tbl,
whereclause=(tbl.c.name_hash == name_hash),
insert_values={"name": name, "name_hash": name_hash},
autoCreate=autoCreate,
)
@defer.inlineCallbacks
def updateBuilderInfo(
self, builderid, description, description_format, description_html, projectid, tags
):
# convert to tag IDs first, as necessary
def toTagid(tag):
if isinstance(tag, int):
return defer.succeed(tag)
ssConnector = self.master.db.tags
return ssConnector.findTagId(tag)
tagsids = [
r[1]
for r in (
yield defer.DeferredList(
[toTagid(tag) for tag in tags], fireOnOneErrback=True, consumeErrors=True
)
)
]
def thd(conn):
builders_tbl = self.db.model.builders
builders_tags_tbl = self.db.model.builders_tags
transaction = conn.begin()
q = builders_tbl.update().where(builders_tbl.c.id == builderid)
conn.execute(
q.values(
description=description,
description_format=description_format,
description_html=description_html,
projectid=projectid,
)
).close()
# remove previous builders_tags
conn.execute(
builders_tags_tbl.delete().where(builders_tags_tbl.c.builderid == builderid)
).close()
# add tag ids
if tagsids:
conn.execute(
builders_tags_tbl.insert(),
[{"builderid": builderid, "tagid": tagid} for tagid in tagsids],
).close()
transaction.commit()
return (yield self.db.pool.do(thd))
@defer.inlineCallbacks
def getBuilder(self, builderid: int):
bldrs: list[BuilderModel] = yield self.getBuilders(_builderid=builderid)
if bldrs:
return bldrs[0]
return None
# returns a Deferred that returns None
def addBuilderMaster(self, builderid=None, masterid=None):
def thd(conn, no_recurse=False):
try:
tbl = self.db.model.builder_masters
q = tbl.insert()
conn.execute(q.values(builderid=builderid, masterid=masterid))
conn.commit()
except (sa.exc.IntegrityError, sa.exc.ProgrammingError):
conn.rollback()
return self.db.pool.do(thd)
# returns a Deferred that returns None
def removeBuilderMaster(self, builderid=None, masterid=None):
def thd(conn, no_recurse=False):
tbl = self.db.model.builder_masters
conn.execute(
tbl.delete().where(tbl.c.builderid == builderid, tbl.c.masterid == masterid)
)
return self.db.pool.do_with_transaction(thd)
def getBuilders(
self,
masterid: int | None = None,
projectid: int | None = None,
workerid: int | None = None,
_builderid: int | None = None,
) -> defer.Deferred[list[BuilderModel]]:
def thd(conn) -> list[BuilderModel]:
bldr_tbl = self.db.model.builders
bm_tbl = self.db.model.builder_masters
builders_tags_tbl = self.db.model.builders_tags
tags_tbl = self.db.model.tags
configured_workers_tbl = self.db.model.configured_workers
j = bldr_tbl.outerjoin(bm_tbl)
# if we want to filter by masterid, we must join to builder_masters
# again, so we can still get the full set of masters for each
# builder
if masterid is not None:
limiting_bm_tbl = bm_tbl.alias('limiting_bm')
j = j.join(limiting_bm_tbl, onclause=bldr_tbl.c.id == limiting_bm_tbl.c.builderid)
if workerid is not None:
j = j.join(configured_workers_tbl)
q = (
sa.select(
bldr_tbl.c.id,
bldr_tbl.c.name,
bldr_tbl.c.description,
bldr_tbl.c.description_format,
bldr_tbl.c.description_html,
bldr_tbl.c.projectid,
bm_tbl.c.masterid,
)
.select_from(j)
.order_by(bldr_tbl.c.id, bm_tbl.c.masterid)
)
if masterid is not None:
# filter the masterid from the limiting table
q = q.where(limiting_bm_tbl.c.masterid == masterid)
if projectid is not None:
q = q.where(bldr_tbl.c.projectid == projectid)
if workerid is not None:
q = q.where(configured_workers_tbl.c.workerid == workerid)
if _builderid is not None:
q = q.where(bldr_tbl.c.id == _builderid)
# build up a intermediate builder id -> tag names map (fixes performance issue #3396)
bldr_id_to_tags = defaultdict(list)
bldr_q = sa.select(builders_tags_tbl.c.builderid, tags_tbl.c.name)
bldr_q = bldr_q.select_from(tags_tbl.join(builders_tags_tbl))
for bldr_id, tag in conn.execute(bldr_q).fetchall():
bldr_id_to_tags[bldr_id].append(tag)
# now group those by builderid, aggregating by masterid
rv: list[BuilderModel] = []
last: BuilderModel | None = None
for row in conn.execute(q).fetchall():
if not last or row.id != last.id:
last = BuilderModel(
id=row.id,
name=row.name,
description=row.description,
description_format=row.description_format,
description_html=row.description_html,
projectid=row.projectid,
tags=bldr_id_to_tags[row.id],
)
rv.append(last)
if row.masterid:
last.masterids.append(row.masterid)
return rv
return self.db.pool.do(thd)
| 8,293 | Python | .py | 193 | 31.103627 | 98 | 0.584934 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,769 | base.py | buildbot_buildbot/master/buildbot/db/base.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import annotations
import hashlib
import itertools
from typing import TYPE_CHECKING
import sqlalchemy as sa
from buildbot.util import unicode2bytes
from buildbot.util.sautils import hash_columns
if TYPE_CHECKING:
from buildbot.db.connector import DBConnector
class DBConnectorComponent:
# A fixed component of the DBConnector, handling one particular aspect of
# the database. Instances of subclasses are assigned to attributes of the
# DBConnector object, so that they are available at e.g.,
# C{master.db.model} or C{master.db.changes}. This parent class takes care
# of the necessary backlinks and other housekeeping.
connector: DBConnector | None = None
data2db: dict[str, str] = {}
def __init__(self, connector: DBConnector):
self.db = connector
# set up caches
for method in dir(self.__class__):
o = getattr(self, method)
if isinstance(o, CachedMethod):
setattr(self, method, o.get_cached_method(self))
@property
def master(self):
return self.db.master
_isCheckLengthNecessary: bool | None = None
def checkLength(self, col, value):
if not self._isCheckLengthNecessary:
if self.db.pool.engine.dialect.name == 'mysql':
self._isCheckLengthNecessary = True
else:
# not necessary, so just stub out the method
self.checkLength = lambda col, value: None
return
assert col.type.length, f"column {col} does not have a length"
if value and len(value) > col.type.length:
raise RuntimeError(
f"value for column {col} is greater than max of {col.type.length} "
f"characters: {value}"
)
def ensureLength(self, col, value):
assert col.type.length, f"column {col} does not have a length"
if value and len(value) > col.type.length:
value = (
value[: col.type.length // 2]
+ hashlib.sha1(unicode2bytes(value)).hexdigest()[: col.type.length // 2]
)
return value
# returns a Deferred that returns a value
def findSomethingId(self, tbl, whereclause, insert_values, _race_hook=None, autoCreate=True):
d = self.findOrCreateSomethingId(tbl, whereclause, insert_values, _race_hook, autoCreate)
d.addCallback(lambda pair: pair[0])
return d
def findOrCreateSomethingId(
self, tbl, whereclause, insert_values, _race_hook=None, autoCreate=True
):
"""
Find a matching row and if one cannot be found optionally create it.
Returns a deferred which resolves to the pair (id, found) where
id is the primary key of the matching row and `found` is True if
a match was found. `found` will be false if a new row was created.
"""
def thd(conn, no_recurse=False):
# try to find the master
q = sa.select(tbl.c.id)
if whereclause is not None:
q = q.where(whereclause)
r = conn.execute(q)
row = r.fetchone()
r.close()
# found it!
if row:
return row.id, True
if not autoCreate:
return None, False
if _race_hook is not None:
_race_hook(conn)
try:
r = conn.execute(tbl.insert(), [insert_values])
conn.commit()
return r.inserted_primary_key[0], False
except (sa.exc.IntegrityError, sa.exc.ProgrammingError):
conn.rollback()
# try it all over again, in case there was an overlapping,
# identical call, but only retry once.
if no_recurse:
raise
return thd(conn, no_recurse=True)
return self.db.pool.do(thd)
def hashColumns(self, *args):
return hash_columns(*args)
def doBatch(self, batch, batch_n=500):
iterator = iter(batch)
while True:
batch = list(itertools.islice(iterator, batch_n))
if not batch:
break
yield batch
class CachedMethod:
def __init__(self, cache_name, method):
self.cache_name = cache_name
self.method = method
def get_cached_method(self, component):
meth = self.method
meth_name = meth.__name__
cache = component.db.master.caches.get_cache(
self.cache_name, lambda key: meth(component, key)
)
def wrap(key, no_cache=0):
if no_cache:
return meth(component, key)
return cache.get(key)
wrap.__name__ = meth_name + " (wrapped)"
wrap.__module__ = meth.__module__
wrap.__doc__ = meth.__doc__
wrap.cache = cache
return wrap
def cached(cache_name):
return lambda method: CachedMethod(cache_name, method)
| 5,717 | Python | .py | 135 | 33.044444 | 97 | 0.624076 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,770 | exceptions.py | buildbot_buildbot/master/buildbot/db/exceptions.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
class DatabaseNotReadyError(Exception):
pass
| 756 | Python | .py | 16 | 45.875 | 79 | 0.791328 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,771 | test_result_sets.py | buildbot_buildbot/master/buildbot/db/test_result_sets.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import annotations
from dataclasses import dataclass
from twisted.internet import defer
from twisted.python import deprecate
from twisted.python import versions
from buildbot.db import base
from buildbot.warnings import warn_deprecated
@dataclass
class TestResultSetModel:
id: int
builderid: int
buildid: int
stepid: int
description: str | None
category: str
value_unit: str
tests_passed: int | None
tests_failed: int | None
complete: bool = False
# For backward compatibility
def __getitem__(self, key: str):
warn_deprecated(
'4.1.0',
(
'TestResultSetsConnectorComponent '
'getTestResultSet, and getTestResultSets '
'no longer return TestResultSet as dictionnaries. '
'Usage of [] accessor is deprecated: please access the member directly'
),
)
if hasattr(self, key):
return getattr(self, key)
raise KeyError(key)
@deprecate.deprecated(versions.Version("buildbot", 4, 1, 0), TestResultSetModel)
class TestResultSetDict(TestResultSetModel):
pass
class TestResultSetAlreadyCompleted(Exception):
pass
class TestResultSetsConnectorComponent(base.DBConnectorComponent):
def addTestResultSet(
self, builderid, buildid, stepid, description, category, value_unit
) -> defer.Deferred[int]:
# Returns the id of the new result set
def thd(conn) -> int:
sets_table = self.db.model.test_result_sets
insert_values = {
'builderid': builderid,
'buildid': buildid,
'stepid': stepid,
'description': description,
'category': category,
'value_unit': value_unit,
'complete': 0,
}
q = sets_table.insert().values(insert_values)
r = conn.execute(q)
conn.commit()
return r.inserted_primary_key[0]
return self.db.pool.do(thd)
def getTestResultSet(self, test_result_setid: int) -> defer.Deferred[TestResultSetModel | None]:
def thd(conn) -> TestResultSetModel | None:
sets_table = self.db.model.test_result_sets
q = sets_table.select().where(sets_table.c.id == test_result_setid)
res = conn.execute(q)
row = res.fetchone()
if not row:
return None
return self._model_from_row(row)
return self.db.pool.do(thd)
def getTestResultSets(
self,
builderid: int,
buildid: int | None = None,
stepid: int | None = None,
complete: bool | None = None,
result_spec=None,
) -> defer.Deferred[list[TestResultSetModel]]:
def thd(conn) -> list[TestResultSetModel]:
sets_table = self.db.model.test_result_sets
q = sets_table.select().where(sets_table.c.builderid == builderid)
if buildid is not None:
q = q.where(sets_table.c.buildid == buildid)
if stepid is not None:
q = q.where(sets_table.c.stepid == stepid)
if complete is not None:
q = q.where(sets_table.c.complete == (1 if complete else 0))
if result_spec is not None:
return result_spec.thd_execute(conn, q, self._model_from_row)
res = conn.execute(q)
return [self._model_from_row(row) for row in res.fetchall()]
return self.db.pool.do(thd)
def completeTestResultSet(
self, test_result_setid, tests_passed=None, tests_failed=None
) -> defer.Deferred[None]:
def thd(conn) -> None:
sets_table = self.db.model.test_result_sets
values = {'complete': 1}
if tests_passed is not None:
values['tests_passed'] = tests_passed
if tests_failed is not None:
values['tests_failed'] = tests_failed
q = sets_table.update().values(values)
q = q.where((sets_table.c.id == test_result_setid) & (sets_table.c.complete == 0))
res = conn.execute(q)
conn.commit()
if res.rowcount == 0:
raise TestResultSetAlreadyCompleted(
f'Test result set {test_result_setid} '
f'is already completed or does not exist'
)
return self.db.pool.do(thd)
def _model_from_row(self, row):
return TestResultSetModel(
id=row.id,
builderid=row.builderid,
buildid=row.buildid,
stepid=row.stepid,
description=row.description,
category=row.category,
value_unit=row.value_unit,
tests_passed=row.tests_passed,
tests_failed=row.tests_failed,
complete=bool(row.complete),
)
| 5,640 | Python | .py | 138 | 31.26087 | 100 | 0.617464 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,772 | build_data.py | buildbot_buildbot/master/buildbot/db/build_data.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import annotations
from dataclasses import dataclass
import sqlalchemy as sa
from twisted.internet import defer
from twisted.python import deprecate
from twisted.python import versions
from buildbot.db import NULL
from buildbot.db import base
from buildbot.warnings import warn_deprecated
@dataclass
class BuildDataModel:
buildid: int
name: str
length: int
source: str
value: bytes | None
# For backward compatibility
def __getitem__(self, key: str):
warn_deprecated(
'4.1.0',
(
'BuildDataConnectorComponent getBuildData, getBuildDataNoValue, and getAllBuildDataNoValues '
'no longer return BuildData as dictionnaries. '
'Usage of [] accessor is deprecated: please access the member directly'
),
)
if hasattr(self, key):
return getattr(self, key)
raise KeyError(key)
@deprecate.deprecated(versions.Version("buildbot", 4, 1, 0), BuildDataModel)
class BuildDataDict(BuildDataModel):
pass
class BuildDataConnectorComponent(base.DBConnectorComponent):
def _insert_race_hook(self, conn):
# called so tests can simulate a race condition during insertion
pass
def setBuildData(
self, buildid: int, name: str, value: bytes, source: str
) -> defer.Deferred[None]:
def thd(conn) -> None:
build_data_table = self.db.model.build_data
retry = True
while retry:
try:
self.db.upsert(
conn,
build_data_table,
where_values=(
(build_data_table.c.buildid, buildid),
(build_data_table.c.name, name),
),
update_values=(
(build_data_table.c.value, value),
(build_data_table.c.length, len(value)),
(build_data_table.c.source, source),
),
_race_hook=self._insert_race_hook,
)
conn.commit()
except (sa.exc.IntegrityError, sa.exc.ProgrammingError):
# there's been a competing insert, retry
conn.rollback()
if not retry:
raise
finally:
retry = False
return self.db.pool.do(thd)
def getBuildData(self, buildid: int, name: str) -> defer.Deferred[BuildDataModel | None]:
def thd(conn) -> BuildDataModel | None:
build_data_table = self.db.model.build_data
q = build_data_table.select().where(
(build_data_table.c.buildid == buildid) & (build_data_table.c.name == name)
)
res = conn.execute(q)
row = res.fetchone()
if not row:
return None
return self._model_from_row(row, value=row.value)
return self.db.pool.do(thd)
def getBuildDataNoValue(self, buildid: int, name: str) -> defer.Deferred[BuildDataModel | None]:
def thd(conn) -> BuildDataModel | None:
build_data_table = self.db.model.build_data
q = sa.select(
build_data_table.c.buildid,
build_data_table.c.name,
build_data_table.c.length,
build_data_table.c.source,
)
q = q.where((build_data_table.c.buildid == buildid) & (build_data_table.c.name == name))
res = conn.execute(q)
row = res.fetchone()
if not row:
return None
return self._model_from_row(row, value=None)
return self.db.pool.do(thd)
def getAllBuildDataNoValues(self, buildid: int) -> defer.Deferred[list[BuildDataModel]]:
def thd(conn) -> list[BuildDataModel]:
build_data_table = self.db.model.build_data
q = sa.select(
build_data_table.c.buildid,
build_data_table.c.name,
build_data_table.c.length,
build_data_table.c.source,
)
q = q.where(build_data_table.c.buildid == buildid)
return [self._model_from_row(row, value=None) for row in conn.execute(q).fetchall()]
return self.db.pool.do(thd)
def deleteOldBuildData(self, older_than_timestamp: int) -> defer.Deferred[int]:
build_data = self.db.model.build_data
builds = self.db.model.builds
def count_build_datum(conn) -> int:
res = conn.execute(sa.select(sa.func.count(build_data.c.id)))
count = res.fetchone()[0]
res.close()
return count
def thd(conn) -> int:
count_before = count_build_datum(conn)
if self.db._engine.dialect.name == 'sqlite':
# sqlite does not support delete with a join, so for this case we use a subquery,
# which is much slower
q = sa.select(builds.c.id)
q = q.where(
(builds.c.complete_at >= older_than_timestamp) | (builds.c.complete_at == NULL)
)
q = build_data.delete().where(build_data.c.buildid.notin_(q))
else:
q = build_data.delete()
q = q.where(builds.c.id == build_data.c.buildid)
q = q.where(
(builds.c.complete_at >= older_than_timestamp) | (builds.c.complete_at == NULL)
)
res = conn.execute(q)
conn.commit()
res.close()
count_after = count_build_datum(conn)
return count_before - count_after
return self.db.pool.do(thd)
def _model_from_row(self, row, value: bytes | None):
return BuildDataModel(
buildid=row.buildid,
name=row.name,
length=row.length,
source=row.source,
value=value,
)
| 6,826 | Python | .py | 159 | 30.943396 | 109 | 0.573583 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,773 | model.py | buildbot_buildbot/master/buildbot/db/model.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import annotations
from typing import TYPE_CHECKING
import alembic
import alembic.config
import sqlalchemy as sa
from twisted.internet import defer
from twisted.python import log
from twisted.python import util
from buildbot.db import base
from buildbot.db.migrate_utils import test_unicode
from buildbot.db.types.json import JsonObject
from buildbot.util import sautils
if TYPE_CHECKING:
from sqlalchemy.engine import Connection as SQLAConnection
from sqlalchemy.engine.reflection import Inspector
class UpgradeFromBefore0p9Error(Exception):
def __init__(self):
message = """You are trying to upgrade a buildbot 0.8.x master to buildbot 0.9.x or newer.
This is not supported. Please start from a clean database
http://docs.buildbot.net/latest/manual/upgrading/0.9-upgrade.html"""
# Call the base class constructor with the parameters it needs
super().__init__(message)
class UpgradeFromBefore3p0Error(Exception):
def __init__(self):
message = """You are trying to upgrade to Buildbot 3.0 or newer from Buildbot 2.x or older.
This is only supported via an intermediate upgrade to newest Buildbot 2.10.x that is
available. Please first upgrade to 2.10.x and then try to upgrade to this version.
http://docs.buildbot.net/latest/manual/upgrading/3.0-upgrade.html"""
super().__init__(message)
class Model(base.DBConnectorComponent):
property_name_length = 256
property_source_length = 256
hash_length = 40
#
# schema
#
metadata = sa.MetaData()
# NOTES
# * server_defaults here are included to match those added by the migration
# scripts, but they should not be depended on - all code accessing these
# tables should supply default values as necessary. The defaults are
# required during migration when adding non-nullable columns to existing
# tables.
#
# * dates are stored as unix timestamps (UTC-ish epoch time)
#
# * sqlalchemy does not handle sa.Boolean very well on MySQL or Postgres;
# use sa.SmallInteger instead
#
# * BuildRequest.canBeCollapsed() depends on buildrequest.id being auto-incremented which is
# sqlalchemy default.
# Tables related to build requests
# --------------------------------
# A BuildRequest is a request for a particular build to be performed. Each
# BuildRequest is a part of a Buildset. BuildRequests are claimed by
# masters, to avoid multiple masters running the same build.
buildrequests = sautils.Table(
'buildrequests',
metadata,
sa.Column('id', sa.Integer, primary_key=True),
sa.Column(
'buildsetid',
sa.Integer,
sa.ForeignKey('buildsets.id', ondelete='CASCADE'),
nullable=False,
),
sa.Column(
'builderid',
sa.Integer,
sa.ForeignKey('builders.id', ondelete='CASCADE'),
nullable=False,
),
sa.Column('priority', sa.Integer, nullable=False, server_default=sa.DefaultClause("0")),
# if this is zero, then the build is still pending
sa.Column('complete', sa.Integer, server_default=sa.DefaultClause("0")),
# results is only valid when complete == 1; 0 = SUCCESS, 1 = WARNINGS,
# etc - see master/buildbot/status/builder.py
sa.Column('results', sa.SmallInteger),
# time the buildrequest was created
sa.Column('submitted_at', sa.Integer, nullable=False),
# time the buildrequest was completed, or NULL
sa.Column('complete_at', sa.Integer),
# boolean indicating whether there is a step blocking, waiting for this
# request to complete
sa.Column('waited_for', sa.SmallInteger, server_default=sa.DefaultClause("0")),
)
# Each row in this table represents a claimed build request, where the
# claim is made by the master referenced by masterid.
buildrequest_claims = sautils.Table(
'buildrequest_claims',
metadata,
sa.Column(
'brid',
sa.Integer,
sa.ForeignKey('buildrequests.id', ondelete='CASCADE'),
nullable=False,
),
sa.Column(
'masterid',
sa.Integer,
sa.ForeignKey('masters.id', ondelete='CASCADE'),
index=True,
nullable=False,
),
sa.Column('claimed_at', sa.Integer, nullable=False),
)
# Tables related to builds
# ------------------------
# This table contains the build properties
build_properties = sautils.Table(
'build_properties',
metadata,
sa.Column(
'buildid', sa.Integer, sa.ForeignKey('builds.id', ondelete='CASCADE'), nullable=False
),
sa.Column('name', sa.String(property_name_length), nullable=False),
# JSON encoded value
sa.Column('value', sa.Text, nullable=False),
sa.Column('source', sa.String(property_source_length), nullable=False),
)
# This table contains transient build state.
build_data = sautils.Table(
'build_data',
metadata,
sa.Column('id', sa.Integer, primary_key=True),
sa.Column(
'buildid', sa.Integer, sa.ForeignKey('builds.id', ondelete='CASCADE'), nullable=False
),
sa.Column('name', sa.String(256), nullable=False),
sa.Column(
'value',
sa.LargeBinary().with_variant(sa.dialects.mysql.LONGBLOB, "mysql"),
nullable=False,
),
sa.Column('length', sa.Integer, nullable=False),
sa.Column('source', sa.String(256), nullable=False),
)
# This table contains basic information about each build.
builds = sautils.Table(
'builds',
metadata,
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('number', sa.Integer, nullable=False),
sa.Column(
'builderid',
sa.Integer,
sa.ForeignKey('builders.id', ondelete='CASCADE'),
nullable=False,
),
# note that there is 1:N relationship here.
# In case of worker loss, build has results RETRY
# and buildrequest is unclaimed.
# We use use_alter to prevent circular reference
# (buildrequests -> buildsets -> builds).
sa.Column(
'buildrequestid',
sa.Integer,
sa.ForeignKey(
'buildrequests.id', use_alter=True, name='buildrequestid', ondelete='CASCADE'
),
nullable=False,
),
# worker which performed this build
# keep nullable to support worker-free builds
sa.Column(
'workerid', sa.Integer, sa.ForeignKey('workers.id', ondelete='SET NULL'), nullable=True
),
# master which controlled this build
sa.Column(
'masterid', sa.Integer, sa.ForeignKey('masters.id', ondelete='CASCADE'), nullable=False
),
# start/complete times
sa.Column('started_at', sa.Integer, nullable=False),
sa.Column('complete_at', sa.Integer),
# Contains total duration that completed steps spent waiting for locks. Currently running
# step is not included.
sa.Column("locks_duration_s", sa.Integer, nullable=False),
sa.Column('state_string', sa.Text, nullable=False),
sa.Column('results', sa.Integer),
)
# Tables related to steps
# -----------------------
steps = sautils.Table(
'steps',
metadata,
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('number', sa.Integer, nullable=False),
sa.Column('name', sa.String(50), nullable=False),
sa.Column(
'buildid', sa.Integer, sa.ForeignKey('builds.id', ondelete='CASCADE'), nullable=False
),
sa.Column('started_at', sa.Integer),
sa.Column("locks_acquired_at", sa.Integer),
sa.Column('complete_at', sa.Integer),
sa.Column('state_string', sa.Text, nullable=False),
sa.Column('results', sa.Integer),
sa.Column('urls_json', sa.Text, nullable=False),
sa.Column('hidden', sa.SmallInteger, nullable=False, server_default='0'),
)
# Tables related to logs
# ----------------------
logs = sautils.Table(
'logs',
metadata,
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('name', sa.Text, nullable=False),
sa.Column('slug', sa.String(50), nullable=False),
sa.Column(
'stepid', sa.Integer, sa.ForeignKey('steps.id', ondelete='CASCADE'), nullable=False
),
sa.Column('complete', sa.SmallInteger, nullable=False),
sa.Column('num_lines', sa.Integer, nullable=False),
# 's' = stdio, 't' = text, 'h' = html, 'd' = deleted
sa.Column('type', sa.String(1), nullable=False),
)
logchunks = sautils.Table(
'logchunks',
metadata,
sa.Column(
'logid', sa.Integer, sa.ForeignKey('logs.id', ondelete='CASCADE'), nullable=False
),
# 0-based line number range in this chunk (inclusive); note that for
# HTML logs, this counts lines of HTML, not lines of rendered output
sa.Column('first_line', sa.Integer, nullable=False),
sa.Column('last_line', sa.Integer, nullable=False),
# log contents, including a terminating newline, encoded in utf-8 or,
# if 'compressed' is not 0, compressed with gzip, bzip2, lz4, br or zstd
sa.Column('content', sa.LargeBinary(65536)),
sa.Column('compressed', sa.SmallInteger, nullable=False),
)
# Tables related to buildsets
# ---------------------------
# This table contains input properties for buildsets
buildset_properties = sautils.Table(
'buildset_properties',
metadata,
sa.Column(
'buildsetid',
sa.Integer,
sa.ForeignKey('buildsets.id', ondelete='CASCADE'),
nullable=False,
),
sa.Column('property_name', sa.String(property_name_length), nullable=False),
# JSON-encoded tuple of (value, source)
sa.Column('property_value', sa.Text, nullable=False),
)
# This table represents Buildsets - sets of BuildRequests that share the
# same original cause and source information.
buildsets = sautils.Table(
'buildsets',
metadata,
sa.Column('id', sa.Integer, primary_key=True),
# a simple external identifier to track down this buildset later, e.g.,
# for try requests
sa.Column('external_idstring', sa.String(256)),
# a short string giving the reason the buildset was created
sa.Column('reason', sa.String(256)),
sa.Column('submitted_at', sa.Integer, nullable=False),
# if this is zero, then the build set is still pending
sa.Column(
'complete', sa.SmallInteger, nullable=False, server_default=sa.DefaultClause("0")
),
sa.Column('complete_at', sa.Integer),
# results is only valid when complete == 1; 0 = SUCCESS, 1 = WARNINGS,
# etc - see master/buildbot/status/builder.py
sa.Column('results', sa.SmallInteger),
# optional parent build, we use use_alter to prevent circular reference
# http://docs.sqlalchemy.org/en/latest/orm/relationships.html#rows-that-point-to-themselves-mutually-dependent-rows
sa.Column(
'parent_buildid',
sa.Integer,
sa.ForeignKey('builds.id', use_alter=True, name='parent_buildid', ondelete='SET NULL'),
nullable=True,
),
# text describing what is the relationship with the build
# could be 'triggered from', 'rebuilt from', 'inherited from'
sa.Column('parent_relationship', sa.Text),
# optional rebuilt build id
sa.Column(
'rebuilt_buildid',
sa.Integer,
sa.ForeignKey('builds.id', use_alter=True, name='rebuilt_buildid', ondelete='SET NULL'),
nullable=True,
),
)
# Tables related to change sources
# --------------------------------
# The changesources table gives a unique identifier to each ChangeSource. It
# also links to other tables used to ensure only one master runs each
# changesource
changesources = sautils.Table(
'changesources',
metadata,
sa.Column("id", sa.Integer, primary_key=True),
# name for this changesource, as given in the configuration, plus a hash
# of that name used for a unique index
sa.Column('name', sa.Text, nullable=False),
sa.Column('name_hash', sa.String(hash_length), nullable=False),
)
# This links changesources to the master where they are running. A changesource
# linked to a master that is inactive can be unlinked by any master. This
# is a separate table so that we can "claim" changesources on a master by
# inserting; this has better support in database servers for ensuring that
# exactly one claim succeeds.
changesource_masters = sautils.Table(
'changesource_masters',
metadata,
sa.Column(
'changesourceid',
sa.Integer,
sa.ForeignKey('changesources.id', ondelete='CASCADE'),
nullable=False,
primary_key=True,
),
sa.Column(
'masterid', sa.Integer, sa.ForeignKey('masters.id', ondelete='CASCADE'), nullable=False
),
)
# Tables related to workers
# -------------------------
workers = sautils.Table(
"workers",
metadata,
sa.Column("id", sa.Integer, primary_key=True),
sa.Column("name", sa.String(50), nullable=False),
sa.Column("info", JsonObject, nullable=False),
sa.Column("paused", sa.SmallInteger, nullable=False, server_default="0"),
sa.Column("pause_reason", sa.Text, nullable=True),
sa.Column("graceful", sa.SmallInteger, nullable=False, server_default="0"),
)
# link workers to all builder/master pairs for which they are
# configured
configured_workers = sautils.Table(
'configured_workers',
metadata,
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column(
'buildermasterid',
sa.Integer,
sa.ForeignKey('builder_masters.id', ondelete='CASCADE'),
nullable=False,
),
sa.Column(
'workerid', sa.Integer, sa.ForeignKey('workers.id', ondelete='CASCADE'), nullable=False
),
)
# link workers to the masters they are currently connected to
connected_workers = sautils.Table(
'connected_workers',
metadata,
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column(
'masterid', sa.Integer, sa.ForeignKey('masters.id', ondelete='CASCADE'), nullable=False
),
sa.Column(
'workerid', sa.Integer, sa.ForeignKey('workers.id', ondelete='CASCADE'), nullable=False
),
)
# Tables related to changes
# ----------------------------
# Files touched in changes
change_files = sautils.Table(
'change_files',
metadata,
sa.Column(
'changeid',
sa.Integer,
sa.ForeignKey('changes.changeid', ondelete='CASCADE'),
nullable=False,
),
sa.Column('filename', sa.String(1024), nullable=False),
)
# Properties for changes
change_properties = sautils.Table(
'change_properties',
metadata,
sa.Column(
'changeid',
sa.Integer,
sa.ForeignKey('changes.changeid', ondelete='CASCADE'),
nullable=False,
),
sa.Column('property_name', sa.String(property_name_length), nullable=False),
# JSON-encoded tuple of (value, source)
sa.Column('property_value', sa.Text, nullable=False),
)
# users associated with this change; this allows multiple users for
# situations where a version-control system can represent both an author
# and committer, for example.
change_users = sautils.Table(
"change_users",
metadata,
sa.Column(
'changeid',
sa.Integer,
sa.ForeignKey('changes.changeid', ondelete='CASCADE'),
nullable=False,
),
# uid for the author of the change with the given changeid
sa.Column(
'uid', sa.Integer, sa.ForeignKey('users.uid', ondelete='CASCADE'), nullable=False
),
)
# Changes to the source code, produced by ChangeSources
changes = sautils.Table(
'changes',
metadata,
# changeid also serves as 'change number'
sa.Column('changeid', sa.Integer, primary_key=True),
# author's name (usually an email address)
sa.Column('author', sa.String(255), nullable=False),
# committer's name
sa.Column('committer', sa.String(255), nullable=True),
# commit comment
sa.Column('comments', sa.Text, nullable=False),
# The branch where this change occurred. When branch is NULL, that
# means the main branch (trunk, master, etc.)
sa.Column('branch', sa.String(255)),
# revision identifier for this change
sa.Column('revision', sa.String(255)), # CVS uses NULL
sa.Column('revlink', sa.String(256)),
# this is the timestamp of the change - it is usually copied from the
# version-control system, and may be long in the past or even in the
# future!
sa.Column('when_timestamp', sa.Integer, nullable=False),
# an arbitrary string used for filtering changes
sa.Column('category', sa.String(255)),
# repository specifies, along with revision and branch, the
# source tree in which this change was detected.
sa.Column('repository', sa.String(length=512), nullable=False, server_default=''),
# codebase is a logical name to specify what is in the repository
sa.Column('codebase', sa.String(256), nullable=False, server_default=sa.DefaultClause("")),
# project names the project this source code represents. It is used
# later to filter changes
sa.Column('project', sa.String(length=512), nullable=False, server_default=''),
# the sourcestamp this change brought the codebase to
sa.Column(
'sourcestampid',
sa.Integer,
sa.ForeignKey('sourcestamps.id', ondelete='CASCADE'),
nullable=False,
),
# The parent of the change
# Even if for the moment there's only 1 parent for a change, we use plural here because
# somedays a change will have multiple parent. This way we don't need
# to change the API
sa.Column(
'parent_changeids',
sa.Integer,
sa.ForeignKey('changes.changeid', ondelete='SET NULL'),
nullable=True,
),
)
# Tables related to sourcestamps
# ------------------------------
# Patches for SourceStamps that were generated through the try mechanism
patches = sautils.Table(
'patches',
metadata,
sa.Column('id', sa.Integer, primary_key=True),
# number of directory levels to strip off (patch -pN)
sa.Column('patchlevel', sa.Integer, nullable=False),
# base64-encoded version of the patch file
sa.Column('patch_base64', sa.Text, nullable=False),
# patch author, if known
sa.Column('patch_author', sa.Text, nullable=False),
# patch comment
sa.Column('patch_comment', sa.Text, nullable=False),
# subdirectory in which the patch should be applied; NULL for top-level
sa.Column('subdir', sa.Text),
)
# A sourcestamp identifies a particular instance of the source code.
# Ideally, this would always be absolute, but in practice source stamps can
# also mean "latest" (when revision is NULL), which is of course a
# time-dependent definition.
sourcestamps = sautils.Table(
'sourcestamps',
metadata,
sa.Column('id', sa.Integer, primary_key=True),
# hash of the branch, revision, patchid, repository, codebase, and
# project, using hash_columns.
sa.Column('ss_hash', sa.String(hash_length), nullable=False),
# the branch to check out. When branch is NULL, that means
# the main branch (trunk, master, etc.)
sa.Column('branch', sa.String(256)),
# the revision to check out, or the latest if NULL
sa.Column('revision', sa.String(256)),
# the patch to apply to generate this source code
sa.Column(
'patchid', sa.Integer, sa.ForeignKey('patches.id', ondelete='CASCADE'), nullable=True
),
# the repository from which this source should be checked out
sa.Column('repository', sa.String(length=512), nullable=False, server_default=''),
# codebase is a logical name to specify what is in the repository
sa.Column('codebase', sa.String(256), nullable=False, server_default=sa.DefaultClause("")),
# the project this source code represents
sa.Column('project', sa.String(length=512), nullable=False, server_default=''),
# the time this sourcetamp was first seen (the first time it was added)
sa.Column('created_at', sa.Integer, nullable=False),
)
# a many-to-may relationship between buildsets and sourcestamps
buildset_sourcestamps = sautils.Table(
'buildset_sourcestamps',
metadata,
sa.Column('id', sa.Integer, primary_key=True),
sa.Column(
'buildsetid',
sa.Integer,
sa.ForeignKey('buildsets.id', ondelete='CASCADE'),
nullable=False,
),
sa.Column(
'sourcestampid',
sa.Integer,
sa.ForeignKey('sourcestamps.id', ondelete='CASCADE'),
nullable=False,
),
)
# Tables related to schedulers
# ----------------------------
# The schedulers table gives a unique identifier to each scheduler. It
# also links to other tables used to ensure only one master runs each
# scheduler, and to track changes that a scheduler may trigger a build for
# later.
schedulers = sautils.Table(
'schedulers',
metadata,
sa.Column("id", sa.Integer, primary_key=True),
# name for this scheduler, as given in the configuration, plus a hash
# of that name used for a unique index
sa.Column('name', sa.Text, nullable=False),
sa.Column('name_hash', sa.String(hash_length), nullable=False),
sa.Column('enabled', sa.SmallInteger, server_default=sa.DefaultClause("1")),
)
# This links schedulers to the master where they are running. A scheduler
# linked to a master that is inactive can be unlinked by any master. This
# is a separate table so that we can "claim" schedulers on a master by
# inserting; this has better support in database servers for ensuring that
# exactly one claim succeeds. The ID column is present for external users;
# see bug #1053.
scheduler_masters = sautils.Table(
'scheduler_masters',
metadata,
sa.Column(
'schedulerid',
sa.Integer,
sa.ForeignKey('schedulers.id', ondelete='CASCADE'),
nullable=False,
primary_key=True,
),
sa.Column(
'masterid', sa.Integer, sa.ForeignKey('masters.id', ondelete='CASCADE'), nullable=False
),
)
# This table references "classified" changes that have not yet been
# "processed". That is, the scheduler has looked at these changes and
# determined that something should be done, but that hasn't happened yet.
# Rows are deleted from this table as soon as the scheduler is done with
# the change.
scheduler_changes = sautils.Table(
'scheduler_changes',
metadata,
sa.Column(
'schedulerid',
sa.Integer,
sa.ForeignKey('schedulers.id', ondelete='CASCADE'),
nullable=False,
),
sa.Column(
'changeid',
sa.Integer,
sa.ForeignKey('changes.changeid', ondelete='CASCADE'),
nullable=False,
),
# true (nonzero) if this change is important to this scheduler
sa.Column('important', sa.Integer),
)
# Tables related to projects
# --------------------------
projects = sautils.Table(
'projects',
metadata,
sa.Column('id', sa.Integer, primary_key=True),
# project name
sa.Column('name', sa.Text, nullable=False),
# sha1 of name; used for a unique index
sa.Column('name_hash', sa.String(hash_length), nullable=False),
# project slug, potentially shown in the URLs
sa.Column('slug', sa.String(50), nullable=False),
# project description
sa.Column('description', sa.Text, nullable=True),
# the format of project description
sa.Column('description_format', sa.Text, nullable=True),
# project description rendered as html if description_format is not NULL
sa.Column('description_html', sa.Text, nullable=True),
)
# Tables related to builders
# --------------------------
builders = sautils.Table(
'builders',
metadata,
sa.Column('id', sa.Integer, primary_key=True),
# builder's name
sa.Column('name', sa.Text, nullable=False),
# builder's description
sa.Column('description', sa.Text, nullable=True),
# the format of builder description
sa.Column('description_format', sa.Text, nullable=True),
# builder description rendered as html if description_format is not NULL
sa.Column('description_html', sa.Text, nullable=True),
# builder's project
sa.Column(
'projectid',
sa.Integer,
sa.ForeignKey('projects.id', name="fk_builders_projectid", ondelete='SET NULL'),
nullable=True,
),
# sha1 of name; used for a unique index
sa.Column('name_hash', sa.String(hash_length), nullable=False),
)
# This links builders to the master where they are running. A builder
# linked to a master that is inactive can be unlinked by any master. Note
# that builders can run on multiple masters at the same time.
builder_masters = sautils.Table(
'builder_masters',
metadata,
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column(
'builderid',
sa.Integer,
sa.ForeignKey('builders.id', ondelete='CASCADE'),
nullable=False,
),
sa.Column(
'masterid', sa.Integer, sa.ForeignKey('masters.id', ondelete='CASCADE'), nullable=False
),
)
# Tables related to tags
# ----------------------
tags = sautils.Table(
'tags',
metadata,
sa.Column('id', sa.Integer, primary_key=True),
# tag's name
sa.Column('name', sa.Text, nullable=False),
# sha1 of name; used for a unique index
sa.Column('name_hash', sa.String(hash_length), nullable=False),
)
# a many-to-may relationship between builders and tags
builders_tags = sautils.Table(
'builders_tags',
metadata,
sa.Column('id', sa.Integer, primary_key=True),
sa.Column(
'builderid',
sa.Integer,
sa.ForeignKey('builders.id', ondelete='CASCADE'),
nullable=False,
),
sa.Column(
'tagid', sa.Integer, sa.ForeignKey('tags.id', ondelete='CASCADE'), nullable=False
),
)
# Tables related to test results
# ------------------------------
# Represents a single test result set. A step can any number of test result sets,
# each of which may contain any number of test results.
test_result_sets = sautils.Table(
'test_result_sets',
metadata,
sa.Column('id', sa.Integer, primary_key=True),
# In the future we will want to rearrange the underlying data in the database according
# to (builderid, buildid) tuple, so that huge number of entries in the table does not
# reduce the efficiency of retrieval of data for a particular build.
sa.Column(
'builderid',
sa.Integer,
sa.ForeignKey('builders.id', ondelete='CASCADE'),
nullable=False,
),
sa.Column(
'buildid', sa.Integer, sa.ForeignKey('builds.id', ondelete='CASCADE'), nullable=False
),
sa.Column(
'stepid', sa.Integer, sa.ForeignKey('steps.id', ondelete='CASCADE'), nullable=False
),
# The free-form description of the source of the test data that represent the test result
# set.
sa.Column('description', sa.Text, nullable=True),
sa.Column('category', sa.Text, nullable=False),
sa.Column('value_unit', sa.Text, nullable=False),
# The number of passed tests in cases when the pass or fail criteria depends only on how
# that single test runs.
sa.Column('tests_passed', sa.Integer, nullable=True),
# The number of failed tests in cases when the pass or fail criteria depends only on how
# that single test runs.
sa.Column('tests_failed', sa.Integer, nullable=True),
# true when all test results associated with test result set have been generated.
sa.Column('complete', sa.SmallInteger, nullable=False),
)
# Represents a test result. A single test result set will represent thousands of test results
# in any significant codebase that's tested.
#
# A common table is used for all tests results regardless of what data they carry. Most serious
# database engines will be able to optimize nullable fields out, so extra columns are almost
# free when not used in such cases.
test_results = sautils.Table(
'test_results',
metadata,
sa.Column('id', sa.Integer, primary_key=True),
# The builder ID of the test result set that the test result belongs to.
# This is included for future partitioning support.
sa.Column(
'builderid',
sa.Integer,
sa.ForeignKey('builders.id', ondelete='CASCADE'),
nullable=False,
),
sa.Column(
'test_result_setid',
sa.Integer,
sa.ForeignKey('test_result_sets.id', ondelete='CASCADE'),
nullable=False,
),
sa.Column(
'test_nameid',
sa.Integer,
sa.ForeignKey('test_names.id', ondelete='CASCADE'),
nullable=True,
),
sa.Column(
'test_code_pathid',
sa.Integer,
sa.ForeignKey('test_code_paths.id', ondelete='CASCADE'),
nullable=True,
),
# The code line that the test originated from
sa.Column('line', sa.Integer, nullable=True),
# The duration of the test execution itself
sa.Column('duration_ns', sa.Integer, nullable=True),
# The result of the test converted to a string.
sa.Column('value', sa.Text, nullable=False),
)
# Represents the test names of test results.
test_names = sautils.Table(
'test_names',
metadata,
sa.Column('id', sa.Integer, primary_key=True),
# The builder ID of the test result set that the test result belongs to.
# This is included for future partitioning support and also for querying all test names
# for a builder.
sa.Column(
'builderid',
sa.Integer,
sa.ForeignKey('builders.id', ondelete='CASCADE'),
nullable=False,
),
sa.Column('name', sa.Text, nullable=False),
)
# Represents the file paths of test results.
test_code_paths = sautils.Table(
'test_code_paths',
metadata,
sa.Column('id', sa.Integer, primary_key=True),
# The builder ID of the test result set that the test result belongs to.
# This is included for future partitioning support
sa.Column(
'builderid',
sa.Integer,
sa.ForeignKey('builders.id', ondelete='CASCADE'),
nullable=False,
),
sa.Column('path', sa.Text, nullable=False),
)
# Tables related to objects
# -------------------------
# This table uniquely identifies objects that need to maintain state across
# invocations.
objects = sautils.Table(
"objects",
metadata,
# unique ID for this object
sa.Column("id", sa.Integer, primary_key=True),
# object's user-given name
sa.Column('name', sa.String(128), nullable=False),
# object's class name, basically representing a "type" for the state
sa.Column('class_name', sa.String(128), nullable=False),
)
# This table stores key/value pairs for objects, where the key is a string
# and the value is a JSON string.
object_state = sautils.Table(
"object_state",
metadata,
# object for which this value is set
sa.Column(
'objectid', sa.Integer, sa.ForeignKey('objects.id', ondelete='CASCADE'), nullable=False
),
# name for this value (local to the object)
sa.Column("name", sa.String(length=255), nullable=False),
# value, as a JSON string
sa.Column("value_json", sa.Text, nullable=False),
)
# Tables related to users
# -----------------------
# This table identifies individual users, and contains buildbot-specific
# information about those users.
users = sautils.Table(
"users",
metadata,
# unique user id number
sa.Column("uid", sa.Integer, primary_key=True),
# identifier (nickname) for this user; used for display
sa.Column("identifier", sa.String(255), nullable=False),
# username portion of user credentials for authentication
sa.Column("bb_username", sa.String(128)),
# password portion of user credentials for authentication
sa.Column("bb_password", sa.String(128)),
)
# This table stores information identifying a user that's related to a
# particular interface - a version-control system, status plugin, etc.
users_info = sautils.Table(
"users_info",
metadata,
# unique user id number
sa.Column(
'uid', sa.Integer, sa.ForeignKey('users.uid', ondelete='CASCADE'), nullable=False
),
# type of user attribute, such as 'git'
sa.Column("attr_type", sa.String(128), nullable=False),
# data for given user attribute, such as a commit string or password
sa.Column("attr_data", sa.String(128), nullable=False),
)
# Tables related to masters
# -------------------------
masters = sautils.Table(
"masters",
metadata,
# unique id per master
sa.Column('id', sa.Integer, primary_key=True),
# master's name (generally in the form hostname:basedir)
sa.Column('name', sa.Text, nullable=False),
# sha1 of name; used for a unique index
sa.Column('name_hash', sa.String(hash_length), nullable=False),
# true if this master is running
sa.Column('active', sa.Integer, nullable=False),
# updated periodically by a running master, so silently failed masters
# can be detected by other masters
sa.Column('last_active', sa.Integer, nullable=False),
)
# Indexes
# -------
sa.Index('buildrequests_buildsetid', buildrequests.c.buildsetid)
sa.Index('buildrequests_builderid', buildrequests.c.builderid)
sa.Index('buildrequests_complete', buildrequests.c.complete)
sa.Index('build_properties_buildid', build_properties.c.buildid)
sa.Index('build_data_buildid_name', build_data.c.buildid, build_data.c.name, unique=True)
sa.Index('builds_buildrequestid', builds.c.buildrequestid)
sa.Index('buildsets_complete', buildsets.c.complete)
sa.Index('buildsets_submitted_at', buildsets.c.submitted_at)
sa.Index('buildset_properties_buildsetid', buildset_properties.c.buildsetid)
sa.Index('workers_name', workers.c.name, unique=True)
sa.Index('changes_branch', changes.c.branch)
sa.Index('changes_revision', changes.c.revision)
sa.Index('changes_author', changes.c.author)
sa.Index('changes_category', changes.c.category)
sa.Index('changes_when_timestamp', changes.c.when_timestamp)
sa.Index('change_files_changeid', change_files.c.changeid)
sa.Index('change_properties_changeid', change_properties.c.changeid)
sa.Index('changes_sourcestampid', changes.c.sourcestampid)
sa.Index('changesource_name_hash', changesources.c.name_hash, unique=True)
sa.Index('scheduler_name_hash', schedulers.c.name_hash, unique=True)
sa.Index('scheduler_changes_schedulerid', scheduler_changes.c.schedulerid)
sa.Index('scheduler_changes_changeid', scheduler_changes.c.changeid)
sa.Index(
'scheduler_changes_unique',
scheduler_changes.c.schedulerid,
scheduler_changes.c.changeid,
unique=True,
)
sa.Index('projects_name_hash', projects.c.name_hash, unique=True)
sa.Index('builder_name_hash', builders.c.name_hash, unique=True)
sa.Index('builders_projectid', builders.c.projectid)
sa.Index('builder_masters_builderid', builder_masters.c.builderid)
sa.Index('builder_masters_masterid', builder_masters.c.masterid)
sa.Index(
'builder_masters_identity',
builder_masters.c.builderid,
builder_masters.c.masterid,
unique=True,
)
sa.Index('tag_name_hash', tags.c.name_hash, unique=True)
sa.Index('builders_tags_builderid', builders_tags.c.builderid)
sa.Index('builders_tags_unique', builders_tags.c.builderid, builders_tags.c.tagid, unique=True)
sa.Index('configured_workers_buildmasterid', configured_workers.c.buildermasterid)
sa.Index('configured_workers_workers', configured_workers.c.workerid)
sa.Index(
'configured_workers_identity',
configured_workers.c.buildermasterid,
configured_workers.c.workerid,
unique=True,
)
sa.Index('connected_workers_masterid', connected_workers.c.masterid)
sa.Index('connected_workers_workers', connected_workers.c.workerid)
sa.Index(
'connected_workers_identity',
connected_workers.c.masterid,
connected_workers.c.workerid,
unique=True,
)
sa.Index('users_identifier', users.c.identifier, unique=True)
sa.Index('users_info_uid', users_info.c.uid)
sa.Index('users_info_uid_attr_type', users_info.c.uid, users_info.c.attr_type, unique=True)
sa.Index('users_info_attrs', users_info.c.attr_type, users_info.c.attr_data, unique=True)
sa.Index('change_users_changeid', change_users.c.changeid)
sa.Index('users_bb_user', users.c.bb_username, unique=True)
sa.Index('object_identity', objects.c.name, objects.c.class_name, unique=True)
sa.Index('name_per_object', object_state.c.objectid, object_state.c.name, unique=True)
sa.Index('master_name_hashes', masters.c.name_hash, unique=True)
sa.Index('buildrequest_claims_brids', buildrequest_claims.c.brid, unique=True)
sa.Index('sourcestamps_ss_hash_key', sourcestamps.c.ss_hash, unique=True)
sa.Index('buildset_sourcestamps_buildsetid', buildset_sourcestamps.c.buildsetid)
sa.Index(
'buildset_sourcestamps_unique',
buildset_sourcestamps.c.buildsetid,
buildset_sourcestamps.c.sourcestampid,
unique=True,
)
sa.Index('builds_number', builds.c.builderid, builds.c.number, unique=True)
sa.Index('builds_workerid', builds.c.workerid)
sa.Index('builds_masterid', builds.c.masterid)
sa.Index('steps_number', steps.c.buildid, steps.c.number, unique=True)
sa.Index('steps_name', steps.c.buildid, steps.c.name, unique=True)
sa.Index('steps_started_at', steps.c.started_at)
sa.Index('logs_slug', logs.c.stepid, logs.c.slug, unique=True)
sa.Index('logchunks_firstline', logchunks.c.logid, logchunks.c.first_line)
sa.Index('logchunks_lastline', logchunks.c.logid, logchunks.c.last_line)
sa.Index(
'test_names_name', test_names.c.builderid, test_names.c.name, mysql_length={'name': 255}
)
sa.Index(
'test_code_paths_path',
test_code_paths.c.builderid,
test_code_paths.c.path,
mysql_length={'path': 255},
)
# MySQL creates indexes for foreign keys, and these appear in the
# reflection. This is a list of (table, index) names that should be
# expected on this platform
implied_indexes = [
('change_users', {"unique": False, "column_names": ['uid'], "name": 'uid'}),
('sourcestamps', {"unique": False, "column_names": ['patchid'], "name": 'patchid'}),
('scheduler_masters', {"unique": False, "column_names": ['masterid'], "name": 'masterid'}),
(
'changesource_masters',
{"unique": False, "column_names": ['masterid'], "name": 'masterid'},
),
(
'buildset_sourcestamps',
{"unique": False, "column_names": ['sourcestampid'], "name": 'sourcestampid'},
),
(
'buildsets',
{"unique": False, "column_names": ['parent_buildid'], "name": 'parent_buildid'},
),
(
'buildsets',
{"unique": False, "column_names": ['rebuilt_buildid'], "name": 'rebuilt_buildid'},
),
('builders_tags', {"unique": False, "column_names": ['tagid'], "name": 'tagid'}),
(
'changes',
{"unique": False, "column_names": ['parent_changeids'], "name": 'parent_changeids'},
),
(
'test_result_sets',
{
'name': 'builderid',
'column_names': ['builderid'],
'unique': False,
},
),
(
'test_result_sets',
{
'name': 'buildid',
'column_names': ['buildid'],
'unique': False,
},
),
(
'test_result_sets',
{
'name': 'stepid',
'column_names': ['stepid'],
'unique': False,
},
),
(
'test_results',
{
'name': 'test_result_setid',
'column_names': ['test_result_setid'],
'unique': False,
},
),
(
'test_results',
{
'name': 'test_code_pathid',
'column_names': ['test_code_pathid'],
'unique': False,
},
),
(
'test_results',
{
'name': 'builderid',
'column_names': ['builderid'],
'unique': False,
},
),
(
'test_results',
{
'name': 'test_nameid',
'column_names': ['test_nameid'],
'unique': False,
},
),
]
# Migration support
# -----------------
# Buildbot has historically used 3 database migration systems:
# - homegrown system that used "version" table to track versions
# - SQLAlchemy-migrate that used "migrate_version" table to track versions
# - alembic that uses "alembic_version" table to track versions (current)
# We need to detect each case and tell the user how to upgrade.
config_path = util.sibpath(__file__, "migrations/alembic.ini")
def table_exists(self, conn: SQLAConnection, table: str):
inspector: Inspector = sa.inspect(conn.engine)
return inspector.has_table(table)
def migrate_get_version(self, conn):
r = conn.execute(sa.text("select version from migrate_version limit 1"))
version = r.scalar()
r.close()
return version
def alembic_get_scripts(self):
alembic_config = alembic.config.Config(self.config_path)
return alembic.script.ScriptDirectory.from_config(alembic_config)
def alembic_stamp(self, conn, alembic_scripts, revision):
context = alembic.runtime.migration.MigrationContext.configure(conn)
context.stamp(alembic_scripts, revision)
conn.commit()
@defer.inlineCallbacks
def is_current(self):
def thd(conn):
if not self.table_exists(conn, 'alembic_version'):
return False
alembic_scripts = self.alembic_get_scripts()
current_script_rev_head = alembic_scripts.get_current_head()
context = alembic.runtime.migration.MigrationContext.configure(conn)
current_rev = context.get_current_revision()
return current_rev == current_script_rev_head
ret = yield self.db.pool.do(thd)
return ret
# returns a Deferred that returns None
def create(self):
# this is nice and simple, but used only for tests
def thd(engine):
self.metadata.create_all(bind=engine)
return self.db.pool.do_with_engine(thd)
@defer.inlineCallbacks
def upgrade(self):
# the upgrade process must run in a db thread
def thd(conn):
alembic_scripts = self.alembic_get_scripts()
current_script_rev_head = alembic_scripts.get_current_head()
if self.table_exists(conn, 'version'):
raise UpgradeFromBefore0p9Error()
if self.table_exists(conn, 'migrate_version'):
version = self.migrate_get_version(conn)
if version < 40:
raise UpgradeFromBefore0p9Error()
last_sqlalchemy_migrate_version = 58
if version != last_sqlalchemy_migrate_version:
raise UpgradeFromBefore3p0Error()
self.alembic_stamp(conn, alembic_scripts, alembic_scripts.get_base())
conn.execute(sa.text('drop table migrate_version'))
conn.commit()
if not self.table_exists(conn, 'alembic_version'):
log.msg("Initializing empty database")
# Do some tests first
test_unicode(conn)
Model.metadata.create_all(conn)
conn.commit()
self.alembic_stamp(conn, alembic_scripts, current_script_rev_head)
return
def upgrade(rev, context):
log.msg(f'Upgrading from {rev} to {current_script_rev_head}')
return alembic_scripts._upgrade_revs(current_script_rev_head, rev)
context = alembic.runtime.migration.MigrationContext.configure(
conn, opts={'fn': upgrade}
)
current_rev = context.get_current_revision()
if current_rev == current_script_rev_head:
log.msg('Upgrading database: the current database schema is already the newest')
return
log.msg('Upgrading database')
with sautils.withoutSqliteForeignKeys(conn):
with alembic.operations.Operations.context(context):
with context.begin_transaction():
context.run_migrations()
log.msg('Upgrading database: done')
yield self.db.pool.do(thd)
| 48,612 | Python | .py | 1,128 | 34.20922 | 123 | 0.620832 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,774 | builds.py | buildbot_buildbot/master/buildbot/db/builds.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import annotations
import json
from dataclasses import dataclass
from typing import TYPE_CHECKING
import sqlalchemy as sa
from twisted.internet import defer
from buildbot.db import NULL
from buildbot.db import base
from buildbot.util import epoch2datetime
from buildbot.warnings import warn_deprecated
if TYPE_CHECKING:
import datetime
from typing import Sequence
from buildbot.data.resultspec import ResultSpec
from buildbot.db.sourcestamps import SourceStampModel
@dataclass
class BuildModel:
id: int
number: int
builderid: int
buildrequestid: int
workerid: int | None
masterid: int
started_at: datetime.datetime
complete_at: datetime.datetime | None
locks_duration_s: int | None
state_string: str
results: int | None
# For backward compatibility
def __getitem__(self, key: str):
warn_deprecated(
'4.1.0',
(
'BuildsConnectorComponent getBuild, '
'getBuildByNumber, getPrevSuccessfulBuild, '
'getBuildsForChange, getBuilds, '
'_getRecentBuilds, and _getBuild '
'no longer return Build as dictionnaries. '
'Usage of [] accessor is deprecated: please access the member directly'
),
)
if hasattr(self, key):
return getattr(self, key)
raise KeyError(key)
class BuildsConnectorComponent(base.DBConnectorComponent):
def _getBuild(self, whereclause) -> defer.Deferred[BuildModel | None]:
def thd(conn) -> BuildModel | None:
q = self.db.model.builds.select()
if whereclause is not None:
q = q.where(whereclause)
res = conn.execute(q)
row = res.fetchone()
rv = None
if row:
rv = self._model_from_row(row)
res.close()
return rv
return self.db.pool.do(thd)
def getBuild(self, buildid: int) -> defer.Deferred[BuildModel | None]:
return self._getBuild(self.db.model.builds.c.id == buildid)
def getBuildByNumber(self, builderid: int, number: int) -> defer.Deferred[BuildModel | None]:
return self._getBuild(
(self.db.model.builds.c.builderid == builderid)
& (self.db.model.builds.c.number == number)
)
def _getRecentBuilds(self, whereclause, offset=0, limit=1) -> defer.Deferred[list[BuildModel]]:
def thd(conn) -> list[BuildModel]:
tbl = self.db.model.builds
q = tbl.select()
if whereclause is not None:
q = q.where(
whereclause,
)
q = (
q.order_by(
sa.desc(tbl.c.complete_at),
)
.offset(offset)
.limit(limit)
)
res = conn.execute(q)
return list(self._model_from_row(row) for row in res.fetchall())
return self.db.pool.do(thd)
@defer.inlineCallbacks
def getPrevSuccessfulBuild(
self, builderid: int, number: int, ssBuild: Sequence[SourceStampModel]
):
gssfb = self.master.db.sourcestamps.getSourceStampsForBuild
rv = None
tbl = self.db.model.builds
offset = 0
increment = 1000
matchssBuild = {(ss.repository, ss.branch, ss.codebase) for ss in ssBuild}
while rv is None:
# Get some recent successful builds on the same builder
prevBuilds = yield self._getRecentBuilds(
whereclause=(
(tbl.c.builderid == builderid) & (tbl.c.number < number) & (tbl.c.results == 0)
),
offset=offset,
limit=increment,
)
if not prevBuilds:
break
for prevBuild in prevBuilds:
prevssBuild = {
(ss.repository, ss.branch, ss.codebase) for ss in (yield gssfb(prevBuild.id))
}
if prevssBuild == matchssBuild:
# A successful build with the same
# repository/branch/codebase was found !
rv = prevBuild
break
offset += increment
return rv
def getBuildsForChange(self, changeid: int) -> defer.Deferred[list[BuildModel]]:
assert changeid > 0
def thd(conn) -> list[BuildModel]:
# Get builds for the change
changes_tbl = self.db.model.changes
bsets_tbl = self.db.model.buildsets
bsss_tbl = self.db.model.buildset_sourcestamps
reqs_tbl = self.db.model.buildrequests
builds_tbl = self.db.model.builds
from_clause = changes_tbl.join(
bsss_tbl, changes_tbl.c.sourcestampid == bsss_tbl.c.sourcestampid
)
from_clause = from_clause.join(bsets_tbl, bsss_tbl.c.buildsetid == bsets_tbl.c.id)
from_clause = from_clause.join(reqs_tbl, bsets_tbl.c.id == reqs_tbl.c.buildsetid)
from_clause = from_clause.join(builds_tbl, reqs_tbl.c.id == builds_tbl.c.buildrequestid)
q = (
sa.select(builds_tbl)
.select_from(from_clause)
.where(changes_tbl.c.changeid == changeid)
)
res = conn.execute(q)
return [self._model_from_row(row) for row in res.fetchall()]
return self.db.pool.do(thd)
def getBuilds(
self,
builderid: int | None = None,
buildrequestid: int | None = None,
workerid: int | None = None,
complete: bool | None = None,
resultSpec: ResultSpec | None = None,
) -> defer.Deferred[list[BuildModel]]:
def thd(conn) -> list[BuildModel]:
tbl = self.db.model.builds
q = tbl.select()
if builderid is not None:
q = q.where(tbl.c.builderid == builderid)
if buildrequestid is not None:
q = q.where(tbl.c.buildrequestid == buildrequestid)
if workerid is not None:
q = q.where(tbl.c.workerid == workerid)
if complete is not None:
if complete:
q = q.where(tbl.c.complete_at != NULL)
else:
q = q.where(tbl.c.complete_at == NULL)
if resultSpec is not None:
return resultSpec.thd_execute(conn, q, self._model_from_row)
res = conn.execute(q)
return [self._model_from_row(row) for row in res.fetchall()]
return self.db.pool.do(thd)
# returns a Deferred that returns a value
def addBuild(
self, builderid, buildrequestid, workerid, masterid, state_string, _race_hook=None
):
started_at = int(self.master.reactor.seconds())
def thd(conn):
tbl = self.db.model.builds
# get the highest current number
r = conn.execute(
sa.select(sa.func.max(tbl.c.number)).where(tbl.c.builderid == builderid)
)
number = r.scalar()
new_number = 1 if number is None else number + 1
# insert until we are successful..
while True:
if _race_hook:
_race_hook(conn)
try:
r = conn.execute(
self.db.model.builds.insert(),
{
"number": new_number,
"builderid": builderid,
"buildrequestid": buildrequestid,
"workerid": workerid,
"masterid": masterid,
"started_at": started_at,
"complete_at": None,
"locks_duration_s": 0,
"state_string": state_string,
},
)
conn.commit()
except (sa.exc.IntegrityError, sa.exc.ProgrammingError) as e:
conn.rollback()
# pg 9.5 gives this error which makes it pass some build
# numbers
if 'duplicate key value violates unique constraint "builds_pkey"' not in str(e):
new_number += 1
continue
return r.inserted_primary_key[0], new_number
return self.db.pool.do(thd)
# returns a Deferred that returns None
def setBuildStateString(self, buildid, state_string):
def thd(conn):
tbl = self.db.model.builds
q = tbl.update().where(tbl.c.id == buildid)
conn.execute(q.values(state_string=state_string))
return self.db.pool.do_with_transaction(thd)
# returns a Deferred that returns None
def finishBuild(self, buildid, results):
def thd(conn):
tbl = self.db.model.builds
q = tbl.update().where(tbl.c.id == buildid)
conn.execute(q.values(complete_at=int(self.master.reactor.seconds()), results=results))
return self.db.pool.do_with_transaction(thd)
# returns a Deferred that returns a value
def getBuildProperties(self, bid, resultSpec=None):
def thd(conn):
bp_tbl = self.db.model.build_properties
q = sa.select(
bp_tbl.c.name,
bp_tbl.c.value,
bp_tbl.c.source,
).where(bp_tbl.c.buildid == bid)
props = []
if resultSpec is not None:
data = resultSpec.thd_execute(conn, q, lambda x: x)
else:
data = conn.execute(q)
for row in data:
prop = (json.loads(row.value), row.source)
props.append((row.name, prop))
return dict(props)
return self.db.pool.do(thd)
@defer.inlineCallbacks
def setBuildProperty(self, bid, name, value, source):
"""A kind of create_or_update, that's between one or two queries per
call"""
def thd(conn):
bp_tbl = self.db.model.build_properties
self.checkLength(bp_tbl.c.name, name)
self.checkLength(bp_tbl.c.source, source)
whereclause = sa.and_(bp_tbl.c.buildid == bid, bp_tbl.c.name == name)
q = sa.select(bp_tbl.c.value, bp_tbl.c.source).where(whereclause)
prop = conn.execute(q).fetchone()
value_js = json.dumps(value)
if prop is None:
conn.execute(
bp_tbl.insert(),
{"buildid": bid, "name": name, "value": value_js, "source": source},
)
elif (prop.value != value_js) or (prop.source != source):
conn.execute(
bp_tbl.update().where(whereclause), {"value": value_js, "source": source}
)
yield self.db.pool.do_with_transaction(thd)
@defer.inlineCallbacks
def add_build_locks_duration(self, buildid, duration_s):
def thd(conn):
builds_tbl = self.db.model.builds
conn.execute(
builds_tbl.update()
.where(builds_tbl.c.id == buildid)
.values(locks_duration_s=builds_tbl.c.locks_duration_s + duration_s)
)
yield self.db.pool.do_with_transaction(thd)
def _model_from_row(self, row):
return BuildModel(
id=row.id,
number=row.number,
builderid=row.builderid,
buildrequestid=row.buildrequestid,
workerid=row.workerid,
masterid=row.masterid,
started_at=epoch2datetime(row.started_at),
complete_at=epoch2datetime(row.complete_at),
locks_duration_s=row.locks_duration_s,
state_string=row.state_string,
results=row.results,
)
| 12,721 | Python | .py | 300 | 30.196667 | 100 | 0.566729 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,775 | sourcestamps.py | buildbot_buildbot/master/buildbot/db/sourcestamps.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import annotations
import base64
from dataclasses import dataclass
from typing import TYPE_CHECKING
import sqlalchemy as sa
from twisted.internet import defer
from twisted.python import deprecate
from twisted.python import log
from twisted.python import versions
from buildbot.db import base
from buildbot.util import bytes2unicode
from buildbot.util import epoch2datetime
from buildbot.util import unicode2bytes
from buildbot.util.sautils import hash_columns
from buildbot.warnings import warn_deprecated
if TYPE_CHECKING:
import datetime
@dataclass
class PatchModel:
patchid: int
body: bytes
level: int
author: str
comment: str
subdir: str | None = None
@dataclass
class SourceStampModel:
ssid: int
branch: str | None
revision: str | None
repository: str
created_at: datetime.datetime
codebase: str = ''
project: str = ''
patch: PatchModel | None = None
# For backward compatibility from when SsDict inherited from Dict
def __getitem__(self, key: str):
warn_deprecated(
'4.1.0',
(
'SourceStampsConnectorComponent '
'getSourceStamp, get_sourcestamps_for_buildset, '
'getSourceStampsForBuild, and getSourceStamps'
'no longer return SourceStamp as dictionnaries. '
'Usage of [] accessor is deprecated: please access the member directly'
),
)
if hasattr(self, key):
return getattr(self, key)
# moved to PatchModel object
patch_key = {
'patchid': 'patchid',
'patch_body': 'body',
'patch_level': 'level',
'patch_author': 'author',
'patch_comment': 'comment',
'patch_subdir': 'subdir',
}.get(key)
if patch_key is not None:
if self.patch is None:
return None
return getattr(self.patch, patch_key)
raise KeyError(key)
@deprecate.deprecated(versions.Version("buildbot", 4, 1, 0), SourceStampModel)
class SsDict(SourceStampModel):
pass
class SourceStampsConnectorComponent(base.DBConnectorComponent):
@defer.inlineCallbacks
def findSourceStampId(
self,
branch=None,
revision=None,
repository=None,
project=None,
codebase=None,
patch_body=None,
patch_level=None,
patch_author=None,
patch_comment=None,
patch_subdir=None,
):
sourcestampid, _ = yield self.findOrCreateId(
branch,
revision,
repository,
project,
codebase,
patch_body,
patch_level,
patch_author,
patch_comment,
patch_subdir,
)
return sourcestampid
@defer.inlineCallbacks
def findOrCreateId(
self,
branch=None,
revision=None,
repository=None,
project=None,
codebase=None,
patch_body=None,
patch_level=None,
patch_author=None,
patch_comment=None,
patch_subdir=None,
):
tbl = self.db.model.sourcestamps
assert codebase is not None, "codebase cannot be None"
assert project is not None, "project cannot be None"
assert repository is not None, "repository cannot be None"
self.checkLength(tbl.c.branch, branch)
self.checkLength(tbl.c.revision, revision)
self.checkLength(tbl.c.repository, repository)
self.checkLength(tbl.c.project, project)
# get a patchid, if we have a patch
def thd(conn):
patchid = None
if patch_body:
patch_body_bytes = unicode2bytes(patch_body)
patch_base64_bytes = base64.b64encode(patch_body_bytes)
ins = self.db.model.patches.insert()
r = conn.execute(
ins,
{
"patchlevel": patch_level,
"patch_base64": bytes2unicode(patch_base64_bytes),
"patch_author": patch_author,
"patch_comment": patch_comment,
"subdir": patch_subdir,
},
)
conn.commit()
patchid = r.inserted_primary_key[0]
return patchid
patchid = yield self.db.pool.do(thd)
ss_hash = hash_columns(branch, revision, repository, project, codebase, patchid)
sourcestampid, found = yield self.findOrCreateSomethingId(
tbl=tbl,
whereclause=tbl.c.ss_hash == ss_hash,
insert_values={
'branch': branch,
'revision': revision,
'repository': repository,
'codebase': codebase,
'project': project,
'patchid': patchid,
'ss_hash': ss_hash,
'created_at': int(self.master.reactor.seconds()),
},
)
return sourcestampid, found
# returns a Deferred that returns a value
@base.cached("ssdicts")
def getSourceStamp(self, ssid) -> defer.Deferred[SourceStampModel | None]:
def thd(conn) -> SourceStampModel | None:
tbl = self.db.model.sourcestamps
q = tbl.select().where(tbl.c.id == ssid)
res = conn.execute(q)
row = res.fetchone()
if not row:
return None
model = self._rowToModel_thd(conn, row)
res.close()
return model
return self.db.pool.do(thd)
# returns a Deferred that returns a value
def get_sourcestamps_for_buildset(self, buildsetid) -> defer.Deferred[list[SourceStampModel]]:
def thd(conn) -> list[SourceStampModel]:
bsets_tbl = self.db.model.buildsets
bsss_tbl = self.db.model.buildset_sourcestamps
sstamps_tbl = self.db.model.sourcestamps
from_clause = bsets_tbl.join(bsss_tbl, bsets_tbl.c.id == bsss_tbl.c.buildsetid).join(
sstamps_tbl, bsss_tbl.c.sourcestampid == sstamps_tbl.c.id
)
q = sa.select(sstamps_tbl).select_from(from_clause).where(bsets_tbl.c.id == buildsetid)
res = conn.execute(q)
return [self._rowToModel_thd(conn, row) for row in res.fetchall()]
return self.db.pool.do(thd)
# returns a Deferred that returns a value
def getSourceStampsForBuild(self, buildid) -> defer.Deferred[list[SourceStampModel]]:
assert buildid > 0
def thd(conn) -> list[SourceStampModel]:
# Get SourceStamps for the build
builds_tbl = self.db.model.builds
reqs_tbl = self.db.model.buildrequests
bsets_tbl = self.db.model.buildsets
bsss_tbl = self.db.model.buildset_sourcestamps
sstamps_tbl = self.db.model.sourcestamps
from_clause = builds_tbl.join(reqs_tbl, builds_tbl.c.buildrequestid == reqs_tbl.c.id)
from_clause = from_clause.join(bsets_tbl, reqs_tbl.c.buildsetid == bsets_tbl.c.id)
from_clause = from_clause.join(bsss_tbl, bsets_tbl.c.id == bsss_tbl.c.buildsetid)
from_clause = from_clause.join(
sstamps_tbl, bsss_tbl.c.sourcestampid == sstamps_tbl.c.id
)
q = sa.select(sstamps_tbl).select_from(from_clause).where(builds_tbl.c.id == buildid)
res = conn.execute(q)
return [self._rowToModel_thd(conn, row) for row in res.fetchall()]
return self.db.pool.do(thd)
# returns a Deferred that returns a value
def getSourceStamps(self) -> defer.Deferred[list[SourceStampModel]]:
def thd(conn) -> list[SourceStampModel]:
tbl = self.db.model.sourcestamps
q = tbl.select()
res = conn.execute(q)
return [self._rowToModel_thd(conn, row) for row in res.fetchall()]
return self.db.pool.do(thd)
def _rowToModel_thd(self, conn, row) -> SourceStampModel:
ssid = row.id
model = SourceStampModel(
ssid=ssid,
branch=row.branch,
revision=row.revision,
repository=row.repository,
codebase=row.codebase,
project=row.project,
created_at=epoch2datetime(row.created_at),
)
patchid = row.patchid
# fetch the patch, if necessary
if patchid is not None:
tbl = self.db.model.patches
q = tbl.select().where(tbl.c.id == patchid)
res = conn.execute(q)
row = res.fetchone()
if row:
model.patch = PatchModel(
patchid=patchid,
body=base64.b64decode(row.patch_base64),
level=row.patchlevel,
author=row.patch_author,
comment=row.patch_comment,
subdir=row.subdir,
)
else:
log.msg(f'patchid {patchid}, referenced from ssid {ssid}, not found')
res.close()
return model
| 9,904 | Python | .py | 253 | 28.881423 | 99 | 0.602539 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,776 | changes.py | buildbot_buildbot/master/buildbot/db/changes.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
"""
Support for changes in the database
"""
from __future__ import annotations
import json
from dataclasses import dataclass
from dataclasses import field
from typing import TYPE_CHECKING
import sqlalchemy as sa
from twisted.internet import defer
from twisted.python import deprecate
from twisted.python import log
from twisted.python import versions
from buildbot.db import base
from buildbot.util import datetime2epoch
from buildbot.util import epoch2datetime
from buildbot.warnings import warn_deprecated
if TYPE_CHECKING:
import datetime
from typing import Any
from typing import Iterable
from typing import Literal
@dataclass
class ChangeModel:
changeid: int
author: str
committer: str | None
comments: str
branch: str | None
revision: str | None
revlink: str | None
when_timestamp: datetime.datetime
category: str | None
sourcestampid: int
parent_changeids: list[int] = field(default_factory=list)
repository: str = ''
codebase: str = ''
project: str = ''
files: list[str] = field(default_factory=list)
properties: dict[str, tuple[Any, Literal["Change"]]] = field(default_factory=dict)
# For backward compatibility
def __getitem__(self, key: str):
warn_deprecated(
'4.1.0',
(
'ChangesConnectorComponent '
'getChange, getChangesForBuild, getChangeFromSSid, and getChanges '
'no longer return Change as dictionnaries. '
'Usage of [] accessor is deprecated: please access the member directly'
),
)
if hasattr(self, key):
return getattr(self, key)
raise KeyError(key)
@deprecate.deprecated(versions.Version("buildbot", 4, 1, 0), ChangeModel)
class ChDict(ChangeModel):
pass
class ChangesConnectorComponent(base.DBConnectorComponent):
def getParentChangeIds(
self, branch: str | None, repository: str, project: str, codebase: str
) -> defer.Deferred[list[int]]:
def thd(conn) -> list[int]:
changes_tbl = self.db.model.changes
q = (
sa.select(
changes_tbl.c.changeid,
)
.where(
changes_tbl.c.branch == branch,
changes_tbl.c.repository == repository,
changes_tbl.c.project == project,
changes_tbl.c.codebase == codebase,
)
.order_by(
sa.desc(changes_tbl.c.changeid),
)
.limit(1)
)
parent_id = conn.scalar(q)
return [parent_id] if parent_id else []
return self.db.pool.do(thd)
@defer.inlineCallbacks
def addChange(
self,
author: str | None = None,
committer: str | None = None,
files: list[str] | None = None,
comments: str | None = None,
is_dir: None = None,
revision: str | None = None,
when_timestamp: datetime.datetime | None = None,
branch: str | None = None,
category: str | None = None,
revlink: str | None = '',
properties: dict[str, tuple[Any, Literal['Change']]] | None = None,
repository: str = '',
codebase: str = '',
project: str = '',
uid: int | None = None,
_test_changeid: int | None = None,
):
assert project is not None, "project must be a string, not None"
assert repository is not None, "repository must be a string, not None"
if is_dir is not None:
log.msg("WARNING: change source is providing deprecated value is_dir (ignored)")
if when_timestamp is None:
when_timestamp = epoch2datetime(self.master.reactor.seconds())
if properties is None:
properties = {}
# verify that source is 'Change' for each property
for pv in properties.values():
assert pv[1] == 'Change', "properties must be qualified with source 'Change'"
ch_tbl = self.db.model.changes
self.checkLength(ch_tbl.c.author, author)
self.checkLength(ch_tbl.c.committer, committer)
self.checkLength(ch_tbl.c.branch, branch)
self.checkLength(ch_tbl.c.revision, revision)
self.checkLength(ch_tbl.c.revlink, revlink)
self.checkLength(ch_tbl.c.category, category)
self.checkLength(ch_tbl.c.repository, repository)
self.checkLength(ch_tbl.c.project, project)
# calculate the sourcestamp first, before adding it
ssid = yield self.db.sourcestamps.findSourceStampId(
revision=revision,
branch=branch,
repository=repository,
codebase=codebase,
project=project,
)
parent_changeids = yield self.getParentChangeIds(branch, repository, project, codebase)
# Someday, changes will have multiple parents.
# But for the moment, a Change can only have 1 parent
parent_changeid = parent_changeids[0] if parent_changeids else None
def thd(conn) -> int:
# note that in a read-uncommitted database like SQLite this
# transaction does not buy atomicity - other database users may
# still come across a change without its files, properties,
# etc. That's OK, since we don't announce the change until it's
# all in the database, but beware.
transaction = conn.begin()
insert_value = {
"author": author,
"committer": committer,
"comments": comments,
"branch": branch,
"revision": revision,
"revlink": revlink,
"when_timestamp": datetime2epoch(when_timestamp),
"category": category,
"repository": repository,
"codebase": codebase,
"project": project,
"sourcestampid": ssid,
"parent_changeids": parent_changeid,
}
if _test_changeid is not None:
insert_value['changeid'] = _test_changeid
r = conn.execute(ch_tbl.insert(), [insert_value])
changeid = r.inserted_primary_key[0]
if files:
tbl = self.db.model.change_files
for f in files:
self.checkLength(tbl.c.filename, f)
conn.execute(tbl.insert(), [{"changeid": changeid, "filename": f} for f in files])
if properties:
tbl = self.db.model.change_properties
inserts = [
{"changeid": changeid, "property_name": k, "property_value": json.dumps(v)}
for k, v in properties.items()
]
for i in inserts:
self.checkLength(tbl.c.property_name, i['property_name'])
conn.execute(tbl.insert(), inserts)
if uid:
ins = self.db.model.change_users.insert()
conn.execute(ins, {"changeid": changeid, "uid": uid})
transaction.commit()
return changeid
return (yield self.db.pool.do(thd))
@base.cached("chdicts")
def getChange(self, changeid: int) -> defer.Deferred[ChangeModel | None]:
assert changeid >= 0
def thd(conn) -> ChangeModel | None:
# get the row from the 'changes' table
changes_tbl = self.db.model.changes
q = changes_tbl.select().where(changes_tbl.c.changeid == changeid)
rp = conn.execute(q)
row = rp.fetchone()
if not row:
return None
# and fetch the ancillary data (files, properties)
return self._thd_model_from_row(conn, row)
return self.db.pool.do(thd)
@defer.inlineCallbacks
def getChangesForBuild(self, buildid: int):
assert buildid > 0
gssfb = self.master.db.sourcestamps.getSourceStampsForBuild
changes: list[ChangeModel] = []
currentBuild = yield self.master.db.builds.getBuild(buildid)
fromChanges: dict[str, ChangeModel | None] = {}
toChanges: dict[str, ChangeModel] = {}
ssBuild = yield gssfb(buildid)
for ss in ssBuild:
fromChanges[ss.codebase] = yield self.getChangeFromSSid(ss.ssid)
# Get the last successful build on the same builder
previousBuild = yield self.master.db.builds.getPrevSuccessfulBuild(
currentBuild.builderid, currentBuild.number, ssBuild
)
if previousBuild:
for ss in (yield gssfb(previousBuild.id)):
ss_change = yield self.getChangeFromSSid(ss.ssid)
if ss_change:
toChanges[ss.codebase] = ss_change
# For each codebase, append changes until we match the parent
for cb, change in fromChanges.items():
if not change:
continue
to_cb_change = toChanges.get(cb)
to_cb_changeid = to_cb_change.changeid if to_cb_change is not None else None
if to_cb_changeid is not None and to_cb_changeid == change.changeid:
continue
changes.append(change)
while change.parent_changeids and to_cb_changeid not in change.parent_changeids:
# For the moment, a Change only have 1 parent.
change = yield self.master.db.changes.getChange(change.parent_changeids[0])
# http://trac.buildbot.net/ticket/3461 sometimes,
# parent_changeids could be corrupted
if change is None:
break
changes.append(change)
return changes
def getChangeFromSSid(self, sourcestampid: int) -> defer.Deferred[ChangeModel | None]:
assert sourcestampid >= 0
def thd(conn) -> ChangeModel | None:
# get the row from the 'changes' table
changes_tbl = self.db.model.changes
q = changes_tbl.select().where(changes_tbl.c.sourcestampid == sourcestampid)
# if there are multiple changes for this ssid, get the most recent one
q = q.order_by(changes_tbl.c.changeid.desc())
q = q.limit(1)
rp = conn.execute(q)
row = rp.fetchone()
if not row:
return None
# and fetch the ancillary data (files, properties)
return self._thd_model_from_row(conn, row)
return self.db.pool.do(thd)
def getChangeUids(self, changeid: int) -> defer.Deferred[list[int]]:
assert changeid >= 0
def thd(conn) -> list[int]:
cu_tbl = self.db.model.change_users
q = cu_tbl.select().where(cu_tbl.c.changeid == changeid)
res = conn.execute(q)
rows = res.fetchall()
row_uids = [row.uid for row in rows]
return row_uids
return self.db.pool.do(thd)
def _getDataFromRow(self, row):
return row.changeid
def getChanges(self, resultSpec=None) -> defer.Deferred[Iterable[int]]:
def thd(conn) -> Iterable[int]:
# get the changeids from the 'changes' table
changes_tbl = self.db.model.changes
if resultSpec is not None:
q = changes_tbl.select()
return reversed(resultSpec.thd_execute(conn, q, self._getDataFromRow))
q = sa.select(changes_tbl.c.changeid)
rp = conn.execute(q)
changeids = [self._getDataFromRow(row) for row in rp]
rp.close()
return list(changeids)
d = self.db.pool.do(thd)
# then turn those into changes, using the cache
@d.addCallback
def get_changes(changeids):
return defer.gatherResults([self.getChange(changeid) for changeid in changeids])
return d
def getChangesCount(self) -> defer.Deferred[int]:
def thd(conn) -> int:
changes_tbl = self.db.model.changes
q = sa.select(sa.func.count()).select_from(changes_tbl)
rp = conn.execute(q)
r = 0
for row in rp:
r = row[0]
rp.close()
return int(r)
return self.db.pool.do(thd)
def getLatestChangeid(self) -> defer.Deferred[int | None]:
def thd(conn) -> int:
changes_tbl = self.db.model.changes
q = (
sa.select(
changes_tbl.c.changeid,
)
.order_by(
sa.desc(changes_tbl.c.changeid),
)
.limit(1)
)
return conn.scalar(q)
return self.db.pool.do(thd)
# utility methods
@defer.inlineCallbacks
def pruneChanges(self, changeHorizon: int):
"""
Called periodically by DBConnector, this method deletes changes older
than C{changeHorizon}.
"""
if not changeHorizon:
return
def thd(conn) -> None:
changes_tbl = self.db.model.changes
# First, get the list of changes to delete. This could be written
# as a subquery but then that subquery would be run for every
# table, which is very inefficient; also, MySQL's subquery support
# leaves much to be desired, and doesn't support this particular
# form.
q = (
sa.select(
changes_tbl.c.changeid,
)
.order_by(
sa.desc(changes_tbl.c.changeid),
)
.offset(changeHorizon)
)
res = conn.execute(q)
ids_to_delete = [r.changeid for r in res]
# and delete from all relevant tables, in dependency order
for table_name in (
'scheduler_changes',
'change_files',
'change_properties',
'changes',
'change_users',
):
remaining = ids_to_delete[:]
while remaining:
batch = remaining[:100]
remaining = remaining[100:]
table = self.db.model.metadata.tables[table_name]
conn.execute(table.delete().where(table.c.changeid.in_(batch)))
yield self.db.pool.do_with_transaction(thd)
def _thd_model_from_row(self, conn, ch_row) -> ChangeModel:
# This method must be run in a db.pool thread
change_files_tbl = self.db.model.change_files
change_properties_tbl = self.db.model.change_properties
if ch_row.parent_changeids:
parent_changeids = [ch_row.parent_changeids]
else:
parent_changeids = []
chdict = ChangeModel(
changeid=ch_row.changeid,
parent_changeids=parent_changeids,
author=ch_row.author,
committer=ch_row.committer,
comments=ch_row.comments,
revision=ch_row.revision,
when_timestamp=epoch2datetime(ch_row.when_timestamp),
branch=ch_row.branch,
category=ch_row.category,
revlink=ch_row.revlink,
repository=ch_row.repository,
codebase=ch_row.codebase,
project=ch_row.project,
sourcestampid=int(ch_row.sourcestampid),
)
query = change_files_tbl.select().where(change_files_tbl.c.changeid == ch_row.changeid)
rows = conn.execute(query)
chdict.files.extend(r.filename for r in rows)
# and properties must be given without a source, so strip that, but
# be flexible in case users have used a development version where the
# change properties were recorded incorrectly
def split_vs(vs) -> tuple[Any, Literal["Change"]]:
try:
v, s = vs
if s != "Change":
v, s = vs, "Change"
except (ValueError, TypeError):
v, s = vs, "Change"
return v, s
query = change_properties_tbl.select().where(
change_properties_tbl.c.changeid == ch_row.changeid
)
rows = conn.execute(query)
for r in rows:
try:
v, s = split_vs(json.loads(r.property_value))
chdict.properties[r.property_name] = (v, s)
except ValueError:
pass
return chdict
| 17,304 | Python | .py | 407 | 31.199017 | 98 | 0.586851 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,777 | enginestrategy.py | buildbot_buildbot/master/buildbot/db/enginestrategy.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
"""
A wrapper around `sqlalchemy.create_engine` that handles all of the
special cases that Buildbot needs. Those include:
- pool_recycle for MySQL
- %(basedir) substitution
- optimal thread pool size calculation
"""
import os
import sqlalchemy as sa
from sqlalchemy.engine import url
from sqlalchemy.pool import NullPool
from twisted.python import log
# from http://www.mail-archive.com/[email protected]/msg15079.html
class ReconnectingListener:
def __init__(self):
self.retried = False
class Strategy:
def set_up(self, u, engine):
pass
def should_retry(self, operational_error):
try:
text = operational_error.args[0]
return 'Lost connection' in text or 'database is locked' in text
except Exception:
return False
class SqlLiteStrategy(Strategy):
def set_up(self, u, engine: sa.engine.base.Engine):
"""Special setup for sqlite engines"""
def connect_listener_enable_fk(connection, record):
# fk must be enabled for all connections
if not getattr(engine, "fk_disabled", False):
return # http://trac.buildbot.net/ticket/3490#ticket
# connection.execute('pragma foreign_keys=ON')
sa.event.listen(engine.pool, 'connect', connect_listener_enable_fk)
# try to enable WAL logging
if u.database:
def connect_listener(connection, record):
connection.execute("pragma checkpoint_fullfsync = off")
sa.event.listen(engine.pool, 'connect', connect_listener)
log.msg("setting database journal mode to 'wal'")
try:
with engine.connect() as conn:
conn.exec_driver_sql("pragma journal_mode = wal")
except Exception:
log.msg("failed to set journal mode - database may fail")
class MySQLStrategy(Strategy):
disconnect_error_codes = (2006, 2013, 2014, 2045, 2055)
deadlock_error_codes = (1213,)
def in_error_codes(self, args, error_codes):
if args:
return args[0] in error_codes
return False
def is_disconnect(self, args):
return self.in_error_codes(args, self.disconnect_error_codes)
def is_deadlock(self, args):
return self.in_error_codes(args, self.deadlock_error_codes)
def set_up(self, u, engine):
"""Special setup for mysql engines"""
# add the reconnecting PoolListener that will detect a
# disconnected connection and automatically start a new
# one. This provides a measure of additional safety over
# the pool_recycle parameter, and is useful when e.g., the
# mysql server goes away
def checkout_listener(dbapi_con, con_record, con_proxy):
try:
cursor = dbapi_con.cursor()
cursor.execute("SELECT 1")
except dbapi_con.OperationalError as ex:
if self.is_disconnect(ex.args):
# sqlalchemy will re-create the connection
log.msg('connection will be removed')
raise sa.exc.DisconnectionError() from ex
log.msg(f'exception happened {ex}')
raise
sa.event.listen(engine.pool, 'checkout', checkout_listener)
def should_retry(self, ex):
return any([
self.is_disconnect(ex.orig.args),
self.is_deadlock(ex.orig.args),
super().should_retry(ex),
])
def sa_url_set_attr(u, attr, value):
if hasattr(u, 'set'):
return u.set(**{attr: value})
setattr(u, attr, value)
return u
def special_case_sqlite(u, kwargs):
"""For sqlite, percent-substitute %(basedir)s and use a full
path to the basedir. If using a memory database, force the
pool size to be 1."""
max_conns = 1
# when given a database path, stick the basedir in there
if u.database:
# Use NullPool instead of the sqlalchemy-0.6.8-default
# SingletonThreadPool for sqlite to suppress the error in
# http://groups.google.com/group/sqlalchemy/msg/f8482e4721a89589,
# which also explains that NullPool is the new default in
# sqlalchemy 0.7 for non-memory SQLite databases.
kwargs.setdefault('poolclass', NullPool)
database = u.database
database = database % {"basedir": kwargs['basedir']}
if not os.path.isabs(database[0]):
database = os.path.join(kwargs['basedir'], database)
u = sa_url_set_attr(u, 'database', database)
else:
# For in-memory database SQLAlchemy will use SingletonThreadPool
# and we will run connection creation and all queries in the single
# thread.
# However connection destruction will be run from the main
# thread, which is safe in our case, but not safe in general,
# so SQLite will emit warning about it.
# Silence that warning.
kwargs.setdefault('connect_args', {})['check_same_thread'] = False
# ignore serializing access to the db
if 'serialize_access' in u.query:
query = dict(u.query)
query.pop('serialize_access')
u = sa_url_set_attr(u, 'query', query)
return u, kwargs, max_conns
def special_case_mysql(u, kwargs):
"""For mysql, take max_idle out of the query arguments, and
use its value for pool_recycle. Also, force use_unicode and
charset to be True and 'utf8', failing if they were set to
anything else."""
query = dict(u.query)
kwargs['pool_recycle'] = int(query.pop('max_idle', 3600))
# default to the MyISAM storage engine
storage_engine = query.pop('storage_engine', 'MyISAM')
kwargs['connect_args'] = {'init_command': f'SET default_storage_engine={storage_engine}'}
if 'use_unicode' in query:
if query['use_unicode'] != "True":
raise TypeError("Buildbot requires use_unicode=True " + "(and adds it automatically)")
else:
query['use_unicode'] = "True"
if 'charset' in query:
if query['charset'] != "utf8":
raise TypeError("Buildbot requires charset=utf8 " + "(and adds it automatically)")
else:
query['charset'] = 'utf8'
u = sa_url_set_attr(u, 'query', query)
return u, kwargs, None
def get_drivers_strategy(drivername):
if drivername.startswith('sqlite'):
return SqlLiteStrategy()
elif drivername.startswith('mysql'):
return MySQLStrategy()
return Strategy()
def create_engine(name_or_url, **kwargs):
if 'basedir' not in kwargs:
raise TypeError('no basedir supplied to create_engine')
max_conns = None
# apply special cases
u = url.make_url(name_or_url)
if u.drivername.startswith('sqlite'):
u, kwargs, max_conns = special_case_sqlite(u, kwargs)
elif u.drivername.startswith('mysql'):
u, kwargs, max_conns = special_case_mysql(u, kwargs)
# remove the basedir as it may confuse sqlalchemy
kwargs.pop('basedir')
# calculate the maximum number of connections from the pool parameters,
# if it hasn't already been specified
if max_conns is None:
max_conns = kwargs.get('pool_size', 5) + kwargs.get('max_overflow', 10)
driver_strategy = get_drivers_strategy(u.drivername)
engine = sa.create_engine(u, **kwargs, future=True)
driver_strategy.set_up(u, engine)
engine.should_retry = driver_strategy.should_retry
# annotate the engine with the optimal thread pool size; this is used
# by DBConnector to configure the surrounding thread pool
engine.optimal_thread_pool_size = max_conns
return engine
| 8,388 | Python | .py | 185 | 37.8 | 98 | 0.666667 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,778 | script.py.mako | buildbot_buildbot/master/buildbot/db/migrations/script.py.mako | """${message}
Revision ID: ${up_revision}
Revises: ${down_revision | comma,n}
Create Date: ${create_date}
"""
from alembic import op
import sqlalchemy as sa
${imports if imports else ""}
# revision identifiers, used by Alembic.
revision = ${repr(up_revision)}
down_revision = ${repr(down_revision)}
branch_labels = ${repr(branch_labels)}
depends_on = ${repr(depends_on)}
def upgrade():
${upgrades if upgrades else "pass"}
def downgrade():
${downgrades if downgrades else "pass"}
| 494 | Python | .py | 17 | 27.176471 | 43 | 0.725532 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,779 | env.py | buildbot_buildbot/master/buildbot/db/migrations/env.py | from logging.config import fileConfig
from alembic import context
from sqlalchemy import engine_from_config
from sqlalchemy import pool
from buildbot.db import model
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(str(config.config_file_name))
target_metadata = model.Model.metadata
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url,
target_metadata=target_metadata,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(connection=connection, target_metadata=target_metadata)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| 1,764 | Python | .py | 48 | 31.6875 | 81 | 0.727166 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,780 | 065_v4.0_add_buildsets_rebuilt_buildid.py | buildbot_buildbot/master/buildbot/db/migrations/versions/065_v4.0_add_buildsets_rebuilt_buildid.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
"""add rebuilt_buildid column to buildsets table
Revision ID: 065
Revises: 064
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '065'
down_revision = '064'
branch_labels = None
depends_on = None
def upgrade():
with op.batch_alter_table("buildsets") as batch_op:
batch_op.add_column(
sa.Column(
"rebuilt_buildid",
sa.Integer,
sa.ForeignKey(
"builds.id", use_alter=True, name="rebuilt_buildid", ondelete='SET NULL'
),
nullable=True,
),
)
def downgrade():
op.drop_column("buildsets", "rebuilt_buildid")
| 1,416 | Python | .py | 39 | 31.282051 | 92 | 0.696637 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,781 | 060_v3.9_add_builder_projects.py | buildbot_buildbot/master/buildbot/db/migrations/versions/060_v3.9_add_builder_projects.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
"""add builder projects
Revision ID: 060
Revises: 059
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '060'
down_revision = '059'
branch_labels = None
depends_on = None
def upgrade():
hash_length = 40
op.create_table(
"projects",
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('name', sa.Text, nullable=False),
sa.Column('name_hash', sa.String(hash_length), nullable=False),
sa.Column('slug', sa.String(50), nullable=False),
sa.Column('description', sa.Text, nullable=True),
mysql_DEFAULT_CHARSET='utf8',
)
with op.batch_alter_table("builders") as batch_op:
batch_op.add_column(
sa.Column(
'projectid',
sa.Integer,
sa.ForeignKey('projects.id', name="fk_builders_projectid", ondelete='SET NULL'),
nullable=True,
),
)
op.create_index('builders_projectid', "builders", ["projectid"])
op.create_index('projects_name_hash', "projects", ["name_hash"], unique=True)
def downgrade():
op.drop_index("builders_projectid")
op.drop_column("builders", "project")
op.drop_table("projects")
op.drop_index("projects_name_hash")
| 1,994 | Python | .py | 52 | 33.269231 | 96 | 0.688083 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,782 | 066_v4.0_add_build_locks_duration_s.py | buildbot_buildbot/master/buildbot/db/migrations/versions/066_v4.0_add_build_locks_duration_s.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
"""add pause_reason column to workers table
Revision ID: 066
Revises: 065
"""
import sqlalchemy as sa
from alembic import op
from buildbot.util import sautils
# revision identifiers, used by Alembic.
revision = "066"
down_revision = "065"
branch_labels = None
depends_on = None
def upgrade():
op.add_column("builds", sa.Column("locks_duration_s", sa.Integer, nullable=True))
metadata = sa.MetaData()
builds_tbl = sautils.Table(
"builds", metadata, sa.Column("locks_duration_s", sa.Integer, nullable=True)
)
op.execute(builds_tbl.update().values({builds_tbl.c.locks_duration_s: 0}))
with op.batch_alter_table("builds") as batch_op:
batch_op.alter_column("locks_duration_s", existing_type=sa.Integer, nullable=False)
def downgrade():
op.drop_column("builds", "locks_duration_s")
| 1,539 | Python | .py | 37 | 39.054054 | 91 | 0.754869 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,783 | 061_v3.9_add_builder_description_format.py | buildbot_buildbot/master/buildbot/db/migrations/versions/061_v3.9_add_builder_description_format.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
"""add builder description format
Revision ID: 061
Revises: 060
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '061'
down_revision = '060'
branch_labels = None
depends_on = None
def upgrade():
with op.batch_alter_table("builders") as batch_op:
batch_op.add_column(
sa.Column('description_format', sa.Text, nullable=True),
)
batch_op.add_column(
sa.Column('description_html', sa.Text, nullable=True),
)
def downgrade():
op.drop_column("builders", "description_format")
op.drop_column("builders", "description_html")
| 1,353 | Python | .py | 36 | 34.444444 | 79 | 0.740826 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,784 | 059_v3.4_alembic_initial.py | buildbot_buildbot/master/buildbot/db/migrations/versions/059_v3.4_alembic_initial.py | """initial
Revision ID: 059
Revises: (none)
Create Date: 2021-09-07 20:00:00.000000
This empty Alembic revision is used as a placeholder revision for upgrades from older versions
of the database.
"""
# revision identifiers, used by Alembic.
revision = '059'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
pass
def downgrade():
pass
| 375 | Python | .py | 16 | 21.5 | 94 | 0.772727 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,785 | 062_v3.9_add_project_description_format.py | buildbot_buildbot/master/buildbot/db/migrations/versions/062_v3.9_add_project_description_format.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
"""add project description format
Revision ID: 062
Revises: 061
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '062'
down_revision = '061'
branch_labels = None
depends_on = None
def upgrade():
with op.batch_alter_table("projects") as batch_op:
batch_op.add_column(
sa.Column('description_format', sa.Text, nullable=True),
)
batch_op.add_column(
sa.Column('description_html', sa.Text, nullable=True),
)
def downgrade():
op.drop_column("projects", "description_format")
op.drop_column("projects", "description_html")
| 1,353 | Python | .py | 36 | 34.444444 | 79 | 0.740826 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,786 | 063_v3.10_add_steps_locks_acquired_at.py | buildbot_buildbot/master/buildbot/db/migrations/versions/063_v3.10_add_steps_locks_acquired_at.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
"""add locks_acquired_at column to steps table
Revision ID: 063
Revises: 062
"""
import sqlalchemy as sa
from alembic import op
from buildbot.util import sautils
# revision identifiers, used by Alembic.
revision = '063'
down_revision = '062'
branch_labels = None
depends_on = None
def upgrade():
op.add_column("steps", sa.Column("locks_acquired_at", sa.Integer, nullable=True))
metadata = sa.MetaData()
steps_tbl = sautils.Table(
'steps',
metadata,
sa.Column("started_at", sa.Integer),
sa.Column("locks_acquired_at", sa.Integer),
)
op.execute(steps_tbl.update().values({steps_tbl.c.locks_acquired_at: steps_tbl.c.started_at}))
def downgrade():
op.drop_column("steps", "locks_acquired_at")
| 1,462 | Python | .py | 38 | 35.684211 | 98 | 0.745751 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,787 | 064_v3.10_add_worker_pause_reason.py | buildbot_buildbot/master/buildbot/db/migrations/versions/064_v3.10_add_worker_pause_reason.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
"""add pause_reason column to workers table
Revision ID: 064
Revises: 063
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '064'
down_revision = '063'
branch_labels = None
depends_on = None
def upgrade():
op.add_column("workers", sa.Column("pause_reason", sa.Text, nullable=True))
def downgrade():
op.drop_column("workers", "pause_reason")
| 1,116 | Python | .py | 29 | 36.896552 | 79 | 0.774583 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,788 | json.py | buildbot_buildbot/master/buildbot/db/types/json.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import json
from sqlalchemy.types import Text
from sqlalchemy.types import TypeDecorator
class JsonObject(TypeDecorator):
"""Represents an immutable json-encoded string."""
cache_ok = True
impl = Text
def process_bind_param(self, value, dialect):
if value is not None:
value = json.dumps(value)
return value
def process_result_value(self, value, dialect):
if value is not None:
value = json.loads(value)
else:
value = {}
return value
| 1,244 | Python | .py | 31 | 35.741935 | 79 | 0.730897 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,789 | protocol.py | buildbot_buildbot/master/buildbot/db/compression/protocol.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import annotations
from abc import abstractmethod
from typing import TYPE_CHECKING
from typing import Protocol
if TYPE_CHECKING:
from typing import ClassVar
class CompressObjInterface(Protocol):
def __init__(self) -> None:
pass
@abstractmethod
def compress(self, data: bytes) -> bytes:
raise NotImplementedError
@abstractmethod
def flush(self) -> bytes:
raise NotImplementedError
class CompressorInterface(Protocol):
name: ClassVar[str]
available: ClassVar[bool] = True
CompressObj: ClassVar[type[CompressObjInterface]]
@staticmethod
@abstractmethod
def dumps(data: bytes) -> bytes:
raise NotImplementedError
@staticmethod
@abstractmethod
def read(data: bytes) -> bytes:
raise NotImplementedError
| 1,530 | Python | .py | 41 | 33.585366 | 79 | 0.758971 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,790 | lz4.py | buildbot_buildbot/master/buildbot/db/compression/lz4.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import annotations
from buildbot.db.compression.protocol import CompressObjInterface
from buildbot.db.compression.protocol import CompressorInterface
try:
import lz4.block
HAS_LZ4 = True
except ImportError:
HAS_LZ4 = False
class LZ4Compressor(CompressorInterface):
name = "lz4"
available = HAS_LZ4
@staticmethod
def dumps(data: bytes) -> bytes:
return lz4.block.compress(data)
@staticmethod
def read(data: bytes) -> bytes:
return lz4.block.decompress(data)
# LZ4.block does not have a compress object,
# still implement the interface for compatibility
class CompressObj(CompressObjInterface):
def __init__(self) -> None:
self._buffer: list[bytes] = []
def compress(self, data: bytes) -> bytes:
self._buffer.append(data)
return b''
def flush(self) -> bytes:
compressed_buffer = LZ4Compressor.dumps(b''.join(self._buffer))
self._buffer = []
return compressed_buffer
| 1,756 | Python | .py | 43 | 35.860465 | 79 | 0.719154 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,791 | zstd.py | buildbot_buildbot/master/buildbot/db/compression/zstd.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import annotations
from buildbot.db.compression.protocol import CompressObjInterface
from buildbot.db.compression.protocol import CompressorInterface
try:
import zstandard
HAS_ZSTD = True
except ImportError:
HAS_ZSTD = False
class ZStdCompressor(CompressorInterface):
name = "zstd"
available = HAS_ZSTD
COMPRESS_LEVEL = 9
@staticmethod
def dumps(data: bytes) -> bytes:
return zstandard.compress(data, level=ZStdCompressor.COMPRESS_LEVEL)
@staticmethod
def read(data: bytes) -> bytes:
# data compressed with streaming APIs will not
# contains the content size in it's frame header
# which is expected by ZstdDecompressor.decompress
# use ZstdDecompressionObj instead
# see: https://github.com/indygreg/python-zstandard/issues/150
decompress_obj = zstandard.ZstdDecompressor().decompressobj()
return decompress_obj.decompress(data) + decompress_obj.flush()
class CompressObj(CompressObjInterface):
def __init__(self) -> None:
# zstd compressor is safe to re-use
# Note that it's not thread safe
self._compressor = zstandard.ZstdCompressor(level=ZStdCompressor.COMPRESS_LEVEL)
self._create_compressobj()
def _create_compressobj(self) -> None:
self._compressobj = self._compressor.compressobj()
def compress(self, data: bytes) -> bytes:
return self._compressobj.compress(data)
def flush(self) -> bytes:
try:
return self._compressobj.flush(flush_mode=zstandard.COMPRESSOBJ_FLUSH_FINISH)
finally:
# recreate compressobj so this instance can be re-used
self._create_compressobj()
| 2,491 | Python | .py | 54 | 39.62963 | 93 | 0.713284 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,792 | brotli.py | buildbot_buildbot/master/buildbot/db/compression/brotli.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import annotations
from buildbot.db.compression.protocol import CompressObjInterface
from buildbot.db.compression.protocol import CompressorInterface
try:
import brotli
MODE_TEXT = brotli.MODE_TEXT
HAS_BROTLI = True
except ImportError:
HAS_BROTLI = False
MODE_TEXT = None
class BrotliCompressor(CompressorInterface):
name = "br"
available = HAS_BROTLI
COMPRESS_QUALITY = 11
MODE = MODE_TEXT
@staticmethod
def dumps(data: bytes) -> bytes:
return brotli.compress(
data,
mode=BrotliCompressor.MODE,
quality=BrotliCompressor.COMPRESS_QUALITY,
)
@staticmethod
def read(data: bytes) -> bytes:
return brotli.decompress(data)
class CompressObj(CompressObjInterface):
def __init__(self) -> None:
self._create_compressobj()
def _create_compressobj(self) -> None:
self._compressobj = brotli.Compressor(
mode=BrotliCompressor.MODE,
quality=BrotliCompressor.COMPRESS_QUALITY,
)
def compress(self, data: bytes) -> bytes:
return self._compressobj.process(data)
def flush(self) -> bytes:
try:
return self._compressobj.finish()
finally:
# recreate compressobj so this instance can be re-used
self._create_compressobj()
| 2,137 | Python | .py | 55 | 32.145455 | 79 | 0.690039 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,793 | native.py | buildbot_buildbot/master/buildbot/db/compression/native.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import annotations
import bz2
import zlib
from buildbot.db.compression.protocol import CompressObjInterface
from buildbot.db.compression.protocol import CompressorInterface
class GZipCompressor(CompressorInterface):
name = "gz"
COMPRESS_LEVEL = zlib.Z_BEST_COMPRESSION
@staticmethod
def dumps(data: bytes) -> bytes:
return zlib.compress(data, level=GZipCompressor.COMPRESS_LEVEL)
@staticmethod
def read(data: bytes) -> bytes:
return zlib.decompress(data)
class CompressObj(CompressObjInterface):
def __init__(self) -> None:
self._create_compressobj()
def _create_compressobj(self) -> None:
self._compressobj = zlib.compressobj(level=GZipCompressor.COMPRESS_LEVEL)
def compress(self, data: bytes) -> bytes:
return self._compressobj.compress(data)
def flush(self) -> bytes:
try:
return self._compressobj.flush(zlib.Z_FINISH)
finally:
# recreate compressobj so this instance can be re-used
self._create_compressobj()
class BZipCompressor(CompressorInterface):
name = "bz2"
COMPRESS_LEVEL = 9
@staticmethod
def dumps(data: bytes) -> bytes:
return bz2.compress(data, BZipCompressor.COMPRESS_LEVEL)
@staticmethod
def read(data: bytes) -> bytes:
return bz2.decompress(data)
class CompressObj(CompressObjInterface):
def __init__(self) -> None:
self._create_compressobj()
def _create_compressobj(self) -> None:
self._compressobj = bz2.BZ2Compressor(BZipCompressor.COMPRESS_LEVEL)
def compress(self, data: bytes) -> bytes:
return self._compressobj.compress(data)
def flush(self) -> bytes:
try:
return self._compressobj.flush()
finally:
# recreate compressobj so this instance can be re-used
self._create_compressobj()
| 2,718 | Python | .py | 63 | 35.968254 | 85 | 0.690205 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,794 | __init__.py | buildbot_buildbot/master/buildbot/db/compression/__init__.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from buildbot.db.compression.brotli import BrotliCompressor
from buildbot.db.compression.lz4 import LZ4Compressor
from buildbot.db.compression.native import BZipCompressor
from buildbot.db.compression.native import GZipCompressor
from buildbot.db.compression.protocol import CompressorInterface
from buildbot.db.compression.zstd import ZStdCompressor
__all__ = [
'BrotliCompressor',
'BZipCompressor',
'CompressorInterface',
'GZipCompressor',
'LZ4Compressor',
'ZStdCompressor',
]
| 1,210 | Python | .py | 28 | 41.285714 | 79 | 0.802542 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,795 | errors.py | buildbot_buildbot/master/buildbot/config/errors.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import annotations
import contextlib
class ConfigErrors(Exception):
def __init__(self, errors=None):
if errors is None:
errors = []
self.errors = errors[:]
def __str__(self):
return "\n".join(self.errors)
def addError(self, msg):
self.errors.append(msg)
def merge(self, errors):
self.errors.extend(errors.errors)
def __bool__(self):
return bool(len(self.errors))
_errors: ConfigErrors | None = None
def error(error, always_raise=False):
if _errors is not None and not always_raise:
_errors.addError(error)
else:
raise ConfigErrors([error])
@contextlib.contextmanager
def capture_config_errors(raise_on_error=False):
global _errors
prev_errors = _errors
_errors = errors = ConfigErrors()
try:
yield errors
except ConfigErrors as e:
errors.merge(e)
finally:
_errors = prev_errors
if raise_on_error and errors:
raise errors
| 1,721 | Python | .py | 48 | 31.125 | 79 | 0.706876 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,796 | checks.py | buildbot_buildbot/master/buildbot/config/checks.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from buildbot.config.errors import error
def check_param_length(value, name, max_length):
if isinstance(value, str) and len(value) > max_length:
error(f"{name} '{value}' exceeds maximum length of {max_length}")
qualified_name = f"{type(value).__module__}.{type(value).__name__}"
if qualified_name == 'buildbot.process.properties.Interpolate':
if value.args:
interpolations = tuple([''] * len(value.args))
else:
interpolations = {k: '' for k in value.interpolations}
shortest_value = value.fmtstring % interpolations
if len(shortest_value) > max_length:
error(
f"{name} '{value}' (shortest interpolation) exceeds maximum length of "
f"{max_length}"
)
def check_param_type(value, default_value, class_inst, name, types, types_msg):
if isinstance(value, types):
return value
error(f"{class_inst.__name__} argument {name} must be an instance of {types_msg}")
return default_value
def check_param_bool(value, class_inst, name):
return check_param_type(value, False, class_inst, name, (bool,), "bool")
def check_param_str(value, class_inst, name):
return check_param_type(value, "(unknown)", class_inst, name, (str,), "str")
def check_param_str_none(value, class_inst, name):
return check_param_type(value, "(unknown)", class_inst, name, (str, type(None)), "str or None")
def check_param_int(value, class_inst, name):
return check_param_type(value, 0, class_inst, name, (int,), "int")
def check_param_int_none(value, class_inst, name):
return check_param_type(value, None, class_inst, name, (int, type(None)), "int or None")
def check_param_number_none(value, class_inst, name):
return check_param_type(
value, 0, class_inst, name, (int, float, type(None)), "int or float or None"
)
def check_markdown_support(class_inst):
try:
import markdown # pylint: disable=import-outside-toplevel
_ = markdown
return True
except ImportError: # pragma: no cover
error(
f"{class_inst.__name__}: Markdown library is required in order to use "
"markdown format ('pip install Markdown')"
)
return False
| 2,977 | Python | .py | 60 | 43.8 | 99 | 0.684047 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,797 | builder.py | buildbot_buildbot/master/buildbot/config/builder.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from buildbot.config.checks import check_markdown_support
from buildbot.config.checks import check_param_length
from buildbot.config.checks import check_param_str_none
from buildbot.config.errors import error
from buildbot.db.model import Model
from buildbot.util import bytes2unicode
from buildbot.util import config as util_config
from buildbot.util import safeTranslate
RESERVED_UNDERSCORE_NAMES = ["__Janitor"]
class BuilderConfig(util_config.ConfiguredMixin):
def __init__(
self,
name=None,
workername=None,
workernames=None,
builddir=None,
workerbuilddir=None,
factory=None,
tags=None,
nextWorker=None,
nextBuild=None,
locks=None,
env=None,
properties=None,
collapseRequests=None,
description=None,
description_format=None,
canStartBuild=None,
defaultProperties=None,
project=None,
):
# name is required, and can't start with '_'
if not name or type(name) not in (bytes, str):
error("builder's name is required")
name = '<unknown>'
elif name[0] == '_' and name not in RESERVED_UNDERSCORE_NAMES:
error(f"builder names must not start with an underscore: '{name}'")
try:
self.name = bytes2unicode(name, encoding="ascii")
except UnicodeDecodeError:
error("builder names must be unicode or ASCII")
if not isinstance(project, (type(None), str)):
error("builder project must be None or str")
project = None
self.project = project
# factory is required
if factory is None:
error(f"builder '{name}' has no factory")
from buildbot.process.factory import BuildFactory
if factory is not None and not isinstance(factory, BuildFactory):
error(f"builder '{name}'s factory is not a BuildFactory instance")
self.factory = factory
# workernames can be a single worker name or a list, and should also
# include workername, if given
if isinstance(workernames, str):
workernames = [workernames]
if workernames:
if not isinstance(workernames, list):
error(f"builder '{name}': workernames must be a list or a string")
else:
workernames = []
if workername:
if not isinstance(workername, str):
error(f"builder '{name}': workername must be a string but it is {workername!r}")
workernames = [*workernames, workername]
if not workernames:
error(f"builder '{name}': at least one workername is required")
self.workernames = workernames
# builddir defaults to name
if builddir is None:
builddir = safeTranslate(name)
builddir = bytes2unicode(builddir)
self.builddir = builddir
# workerbuilddir defaults to builddir
if workerbuilddir is None:
workerbuilddir = builddir
self.workerbuilddir = workerbuilddir
# remainder are optional
if tags:
if not isinstance(tags, list):
error(f"builder '{name}': tags must be a list")
bad_tags = any(tag for tag in tags if not isinstance(tag, str))
if bad_tags:
error(f"builder '{name}': tags list contains something that is not a string")
if len(tags) != len(set(tags)):
dupes = " ".join({x for x in tags if tags.count(x) > 1})
error(f"builder '{name}': tags list contains duplicate tags: {dupes}")
else:
tags = []
self.tags = tags
self.nextWorker = nextWorker
if nextWorker and not callable(nextWorker):
error('nextWorker must be a callable')
self.nextBuild = nextBuild
if nextBuild and not callable(nextBuild):
error('nextBuild must be a callable')
self.canStartBuild = canStartBuild
if canStartBuild and not callable(canStartBuild):
error('canStartBuild must be a callable')
self.locks = locks or []
self.env = env or {}
if not isinstance(self.env, dict):
error("builder's env must be a dictionary")
self.properties = properties or {}
for property_name in self.properties:
check_param_length(
property_name, f'Builder {self.name} property', Model.property_name_length
)
self.defaultProperties = defaultProperties or {}
for property_name in self.defaultProperties:
check_param_length(
property_name, f'Builder {self.name} default property', Model.property_name_length
)
self.collapseRequests = collapseRequests
self.description = check_param_str_none(description, self.__class__, "description")
self.description_format = check_param_str_none(
description_format, self.__class__, "description_format"
)
if self.description_format is None:
pass
elif self.description_format == "markdown":
if not check_markdown_support(self.__class__): # pragma: no cover
self.description_format = None
else:
error("builder description format must be None or \"markdown\"")
self.description_format = None
def getConfigDict(self):
# note: this method will disappear eventually - put your smarts in the
# constructor!
rv = {
'name': self.name,
'workernames': self.workernames,
'factory': self.factory,
'builddir': self.builddir,
'workerbuilddir': self.workerbuilddir,
}
if self.project:
rv['project'] = self.project
if self.tags:
rv['tags'] = self.tags
if self.nextWorker:
rv['nextWorker'] = self.nextWorker
if self.nextBuild:
rv['nextBuild'] = self.nextBuild
if self.locks:
rv['locks'] = self.locks
if self.env:
rv['env'] = self.env
if self.properties:
rv['properties'] = self.properties
if self.defaultProperties:
rv['defaultProperties'] = self.defaultProperties
if self.collapseRequests is not None:
rv['collapseRequests'] = self.collapseRequests
if self.description:
rv['description'] = self.description
if self.description_format:
rv['description_format'] = self.description_format
return rv
| 7,385 | Python | .py | 173 | 33.063584 | 98 | 0.629939 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,798 | __init__.py | buildbot_buildbot/master/buildbot/config/__init__.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from .builder import BuilderConfig # noqa: F401
from .errors import ConfigErrors # noqa: F401
from .errors import error # noqa: F401
| 843 | Python | .py | 17 | 48.470588 | 79 | 0.786408 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
4,799 | master.py | buildbot_buildbot/master/buildbot/config/master.py | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import datetime
import os
import re
import sys
import traceback
import warnings
from typing import ClassVar
from typing import Sequence
from twisted.python import failure
from twisted.python import log
from twisted.python.compat import execfile
from zope.interface import implementer
from buildbot import interfaces
from buildbot import locks
from buildbot import util
from buildbot.config.builder import BuilderConfig
from buildbot.config.errors import ConfigErrors
from buildbot.config.errors import capture_config_errors
from buildbot.config.errors import error
from buildbot.db.compression import ZStdCompressor
from buildbot.interfaces import IRenderable
from buildbot.process.project import Project
from buildbot.revlinks import default_revlink_matcher
from buildbot.util import ComparableMixin
from buildbot.util import identifiers as util_identifiers
from buildbot.util import service as util_service
from buildbot.warnings import ConfigWarning
from buildbot.www import auth
from buildbot.www import avatar
from buildbot.www.authz import authz
DEFAULT_DB_URL = 'sqlite:///state.sqlite'
_in_unit_tests = False
def set_is_in_unit_tests(in_tests):
global _in_unit_tests
_in_unit_tests = in_tests
def get_is_in_unit_tests():
return _in_unit_tests
def _default_log_compression_method():
if ZStdCompressor.available:
return ZStdCompressor.name
return 'gz'
def loadConfigDict(basedir, configFileName):
if not os.path.isdir(basedir):
raise ConfigErrors([f"basedir '{basedir}' does not exist"])
filename = os.path.join(basedir, configFileName)
if not os.path.exists(filename):
raise ConfigErrors([f"configuration file '{filename}' does not exist"])
try:
with open(filename, encoding='utf-8'):
pass
except OSError as e:
raise ConfigErrors([f"unable to open configuration file {filename!r}: {e}"]) from e
log.msg(f"Loading configuration from {filename!r}")
# execute the config file
localDict = {
'basedir': os.path.expanduser(basedir),
'__file__': os.path.abspath(filename),
}
old_sys_path = sys.path[:]
sys.path.append(basedir)
try:
try:
execfile(filename, localDict)
except ConfigErrors:
raise
except SyntaxError:
error(
f"encountered a SyntaxError while parsing config file:\n{traceback.format_exc()} ",
always_raise=True,
)
except Exception:
log.err(failure.Failure(), 'error while parsing config file:')
error(
f"error while parsing config file: {sys.exc_info()[1]} (traceback in logfile)",
always_raise=True,
)
finally:
sys.path[:] = old_sys_path
if 'BuildmasterConfig' not in localDict:
error(
f"Configuration file {filename!r} does not define 'BuildmasterConfig'",
always_raise=True,
)
return filename, localDict['BuildmasterConfig']
@implementer(interfaces.IConfigLoader)
class FileLoader(ComparableMixin):
compare_attrs: ClassVar[Sequence[str]] = ['basedir', 'configFileName']
def __init__(self, basedir, configFileName):
self.basedir = basedir
self.configFileName = configFileName
def loadConfig(self):
# from here on out we can batch errors together for the user's
# convenience
with capture_config_errors(raise_on_error=True):
filename, config_dict = loadConfigDict(self.basedir, self.configFileName)
config = MasterConfig.loadFromDict(config_dict, filename)
return config
class MasterConfig(util.ComparableMixin):
def __init__(self):
# local import to avoid circular imports
from buildbot.process import properties
# default values for all attributes
# global
self.title = 'Buildbot'
self.titleURL = 'http://buildbot.net/'
self.buildbotURL = 'http://localhost:8080/'
self.changeHorizon = None
self.logCompressionLimit = 4 * 1024
self.logCompressionMethod = _default_log_compression_method()
self.logEncoding = 'utf-8'
self.logMaxSize = None
self.logMaxTailSize = None
self.properties = properties.Properties()
self.collapseRequests = None
self.codebaseGenerator = None
self.prioritizeBuilders = None
self.select_next_worker = None
self.multiMaster = False
self.manhole = None
self.protocols = {}
self.buildbotNetUsageData = "basic"
self.validation = {
"branch": re.compile(r'^[\w.+/~-]*$'),
"revision": re.compile(r'^[ \w\.\-/]*$'),
"property_name": re.compile(r'^[\w\.\-/~:]*$'),
"property_value": re.compile(r'^[\w\.\-/~:]*$'),
}
self.db = {"db_url": DEFAULT_DB_URL}
self.mq = {"type": 'simple'}
self.metrics = None
self.caches = {"Builds": 15, "Changes": 10}
self.schedulers = {}
self.secretsProviders = []
self.builders = []
self.workers = []
self.change_sources = []
self.machines = []
self.projects = []
self.status = []
self.user_managers = []
self.revlink = default_revlink_matcher
self.www = {
"port": None,
"plugins": {},
"auth": auth.NoAuth(),
"authz": authz.Authz(),
"avatar_methods": avatar.AvatarGravatar(),
"logfileName": 'http.log',
}
self.services = {}
_known_config_keys = set([
"buildbotNetUsageData",
"buildbotURL",
"buildCacheSize",
"builders",
"caches",
"change_source",
"codebaseGenerator",
"configurators",
"changeCacheSize",
"changeHorizon",
'db',
"db_url",
"logCompressionLimit",
"logCompressionMethod",
"logEncoding",
"logMaxSize",
"logMaxTailSize",
"manhole",
"machines",
"collapseRequests",
"metrics",
"mq",
"multiMaster",
"prioritizeBuilders",
"projects",
"properties",
"protocols",
"revlink",
"schedulers",
"secretsProviders",
"select_next_worker",
"services",
"title",
"titleURL",
"user_managers",
"validation",
"www",
"workers",
])
compare_attrs: ClassVar[Sequence[str]] = list(_known_config_keys)
def preChangeGenerator(self, **kwargs):
return {
'author': kwargs.get('author', None),
'files': kwargs.get('files', None),
'comments': kwargs.get('comments', None),
'revision': kwargs.get('revision', None),
'when_timestamp': kwargs.get('when_timestamp', None),
'branch': kwargs.get('branch', None),
'category': kwargs.get('category', None),
'revlink': kwargs.get('revlink', ''),
'properties': kwargs.get('properties', {}),
'repository': kwargs.get('repository', ''),
'project': kwargs.get('project', ''),
'codebase': kwargs.get('codebase', None),
}
@classmethod
def loadFromDict(cls, config_dict, filename):
# warning, all of this is loaded from a thread
with capture_config_errors(raise_on_error=True):
# check for unknown keys
unknown_keys = set(config_dict.keys()) - cls._known_config_keys
if unknown_keys:
if len(unknown_keys) == 1:
error(f'Unknown BuildmasterConfig key {unknown_keys.pop()}')
else:
error(f"Unknown BuildmasterConfig keys {', '.join(sorted(unknown_keys))}")
# instantiate a new config object, which will apply defaults
# automatically
config = cls()
# and defer the rest to sub-functions, for code clarity
config.run_configurators(filename, config_dict)
config.load_global(filename, config_dict)
config.load_validation(filename, config_dict)
config.load_db(filename, config_dict)
config.load_mq(filename, config_dict)
config.load_metrics(filename, config_dict)
config.load_secrets(filename, config_dict)
config.load_caches(filename, config_dict)
config.load_schedulers(filename, config_dict)
config.load_projects(filename, config_dict)
config.load_builders(filename, config_dict)
config.load_workers(filename, config_dict)
config.load_change_sources(filename, config_dict)
config.load_machines(filename, config_dict)
config.load_user_managers(filename, config_dict)
config.load_www(filename, config_dict)
config.load_services(filename, config_dict)
# run some sanity checks
config.check_single_master()
config.check_schedulers()
config.check_locks()
config.check_projects()
config.check_builders()
config.check_ports()
config.check_machines()
return config
def run_configurators(self, filename, config_dict):
for configurator in config_dict.get('configurators', []):
interfaces.IConfigurator(configurator).configure(config_dict)
def load_global(self, filename, config_dict):
def copy_param(name, check_type=None, check_type_name=None, can_be_callable=False):
if name in config_dict:
v = config_dict[name]
else:
return
if (
v is not None
and check_type
and not (isinstance(v, check_type) or (can_be_callable and callable(v)))
):
error(f"c['{name}'] must be {check_type_name}")
else:
setattr(self, name, v)
def copy_int_param(name):
copy_param(name, check_type=int, check_type_name='an int')
def copy_str_param(name):
copy_param(name, check_type=(str,), check_type_name='a string')
def copy_str_url_param_with_trailing_slash(name):
copy_str_param(name)
url = getattr(self, name, None)
if url is not None and not url.endswith('/'):
setattr(self, name, url + '/')
copy_str_param('title')
max_title_len = 18
if len(self.title) > max_title_len:
# Warn if the title length limiting logic in www/base/src/app/app.route.js
# would hide the title.
warnings.warn(
'WARNING: Title is too long to be displayed. ' + '"Buildbot" will be used instead.',
category=ConfigWarning,
stacklevel=1,
)
copy_str_url_param_with_trailing_slash('titleURL')
copy_str_url_param_with_trailing_slash('buildbotURL')
def copy_str_or_callable_param(name):
copy_param(
name,
check_type=(str,),
check_type_name='a string or callable',
can_be_callable=True,
)
if "buildbotNetUsageData" not in config_dict:
if get_is_in_unit_tests():
self.buildbotNetUsageData = None
else:
warnings.warn(
'`buildbotNetUsageData` is not configured and defaults to basic.\n'
'This parameter helps the buildbot development team to understand'
' the installation base.\n'
'No personal information is collected.\n'
'Only installation software version info and plugin usage is sent.\n'
'You can `opt-out` by setting this variable to None.\n'
'Or `opt-in` for more information by setting it to "full".\n',
category=ConfigWarning,
stacklevel=1,
)
copy_str_or_callable_param('buildbotNetUsageData')
copy_int_param('changeHorizon')
copy_int_param('logCompressionLimit')
self.logCompressionMethod = config_dict.get(
'logCompressionMethod',
_default_log_compression_method(),
)
if self.logCompressionMethod not in ('raw', 'bz2', 'gz', 'lz4', 'zstd', 'br'):
error("c['logCompressionMethod'] must be 'raw', 'bz2', 'gz', 'lz4', 'br' or 'zstd'")
if self.logCompressionMethod == "lz4":
try:
import lz4 # pylint: disable=import-outside-toplevel
_ = lz4
except ImportError:
error(
"To set c['logCompressionMethod'] to 'lz4' "
"you must install the lz4 library ('pip install lz4')"
)
elif self.logCompressionMethod == "zstd":
try:
import zstandard # pylint: disable=import-outside-toplevel
_ = zstandard
except ImportError:
error(
"To set c['logCompressionMethod'] to 'zstd' "
"you must install the zstandard Buildbot extra ('pip install buildbot[zstd]')"
)
elif self.logCompressionMethod == "br":
try:
import brotli # pylint: disable=import-outside-toplevel
_ = brotli
except ImportError:
error(
"To set c['logCompressionMethod'] to 'br' "
"you must install the brotli Buildbot extra ('pip install buildbot[brotli]')"
)
copy_int_param('logMaxSize')
copy_int_param('logMaxTailSize')
copy_param('logEncoding')
properties = config_dict.get('properties', {})
if not isinstance(properties, dict):
error("c['properties'] must be a dictionary")
else:
self.properties.update(properties, filename)
collapseRequests = config_dict.get('collapseRequests')
if collapseRequests not in (None, True, False) and not callable(collapseRequests):
error("collapseRequests must be a callable, True, or False")
else:
self.collapseRequests = collapseRequests
codebaseGenerator = config_dict.get('codebaseGenerator')
if codebaseGenerator is not None and not callable(codebaseGenerator):
error("codebaseGenerator must be a callable accepting a dict and returning a str")
else:
self.codebaseGenerator = codebaseGenerator
prioritizeBuilders = config_dict.get('prioritizeBuilders')
if prioritizeBuilders is not None and not callable(prioritizeBuilders):
error("prioritizeBuilders must be a callable")
else:
self.prioritizeBuilders = prioritizeBuilders
select_next_worker = config_dict.get("select_next_worker")
if select_next_worker is not None and not callable(select_next_worker):
error("select_next_worker must be a callable")
else:
self.select_next_worker = select_next_worker
protocols = config_dict.get('protocols', {})
if isinstance(protocols, dict):
for proto, options in protocols.items():
if not isinstance(proto, str):
error("c['protocols'] keys must be strings")
if not isinstance(options, dict):
error(f"c['protocols']['{proto}'] must be a dict")
return
if proto == "wamp":
self.check_wamp_proto(options)
else:
error("c['protocols'] must be dict")
return
self.protocols = protocols
if 'multiMaster' in config_dict:
self.multiMaster = config_dict["multiMaster"]
if 'debugPassword' in config_dict:
log.msg(
"the 'debugPassword' parameter is unused and "
"can be removed from the configuration file"
)
if 'manhole' in config_dict:
# we don't check that this is a manhole instance, since that
# requires importing buildbot.manhole for every user, and currently
# that will fail if cryptography isn't installed
self.manhole = config_dict['manhole']
if 'revlink' in config_dict:
revlink = config_dict['revlink']
if not callable(revlink):
error("revlink must be a callable")
else:
self.revlink = revlink
def load_validation(self, filename, config_dict):
validation = config_dict.get("validation", {})
if not isinstance(validation, dict):
error("c['validation'] must be a dictionary")
else:
unknown_keys = set(validation.keys()) - set(self.validation.keys())
if unknown_keys:
error(f"unrecognized validation key(s): {', '.join(unknown_keys)}")
else:
self.validation.update(validation)
@staticmethod
def getDbUrlFromConfig(config_dict, throwErrors=True):
if 'db' in config_dict:
db = config_dict['db']
if set(db.keys()) - set(['db_url']) and throwErrors:
error("unrecognized keys in c['db']")
config_dict = db
# we don't attempt to parse db URLs here - the engine strategy will do
# so.
if 'db_url' in config_dict:
return config_dict['db_url']
return DEFAULT_DB_URL
def load_db(self, filename, config_dict):
self.db = {"db_url": self.getDbUrlFromConfig(config_dict)}
def load_mq(self, filename, config_dict):
from buildbot.mq import connector # avoid circular imports
if 'mq' in config_dict:
self.mq.update(config_dict['mq'])
classes = connector.MQConnector.classes
typ = self.mq.get('type', 'simple')
if typ not in classes:
error(f"mq type '{typ}' is not known")
return
known_keys = classes[typ]['keys']
unk = set(self.mq.keys()) - known_keys - set(['type'])
if unk:
error(f"unrecognized keys in c['mq']: {', '.join(unk)}")
def load_metrics(self, filename, config_dict):
# we don't try to validate metrics keys
if 'metrics' in config_dict:
metrics = config_dict["metrics"]
if not isinstance(metrics, dict):
error("c['metrics'] must be a dictionary")
else:
self.metrics = metrics
def load_secrets(self, filename, config_dict):
if 'secretsProviders' in config_dict:
secretsProviders = config_dict["secretsProviders"]
if not isinstance(secretsProviders, list):
error("c['secretsProviders'] must be a list")
else:
self.secretsProviders = secretsProviders
def load_caches(self, filename, config_dict):
explicit = False
if 'caches' in config_dict:
explicit = True
caches = config_dict['caches']
if not isinstance(caches, dict):
error("c['caches'] must be a dictionary")
else:
for name, value in caches.items():
if not isinstance(value, int):
error(f"value for cache size '{name}' must be an integer")
return
if value < 1:
error(f"'{name}' cache size must be at least 1, got '{value}'")
self.caches.update(caches)
if 'buildCacheSize' in config_dict:
if explicit:
msg = "cannot specify c['caches'] and c['buildCacheSize']"
error(msg)
self.caches['Builds'] = config_dict['buildCacheSize']
if 'changeCacheSize' in config_dict:
if explicit:
msg = "cannot specify c['caches'] and c['changeCacheSize']"
error(msg)
self.caches['Changes'] = config_dict['changeCacheSize']
def load_schedulers(self, filename, config_dict):
if 'schedulers' not in config_dict:
return
schedulers = config_dict['schedulers']
ok = True
if not isinstance(schedulers, (list, tuple)):
ok = False
else:
for s in schedulers:
if not interfaces.IScheduler.providedBy(s):
ok = False
if not ok:
msg = "c['schedulers'] must be a list of Scheduler instances"
error(msg)
# convert from list to dict, first looking for duplicates
seen_names = set()
for s in schedulers:
if s.name in seen_names:
error(f"scheduler name '{s.name}' used multiple times")
seen_names.add(s.name)
self.schedulers = dict((s.name, s) for s in schedulers)
def load_projects(self, filename, config_dict):
if 'projects' not in config_dict:
return
projects = config_dict['projects']
if not isinstance(projects, (list, tuple)):
error("c['projects'] must be a list")
return
def mapper(p):
if isinstance(p, Project):
return p
error(f"{p!r} is not a project config (in c['projects']")
return None
self.projects = [mapper(p) for p in projects]
def load_builders(self, filename, config_dict):
if 'builders' not in config_dict:
return
builders = config_dict['builders']
if not isinstance(builders, (list, tuple)):
error("c['builders'] must be a list")
return
# convert all builder configs to BuilderConfig instances
def mapper(b):
if isinstance(b, BuilderConfig):
return b
elif isinstance(b, dict):
return BuilderConfig(**b)
else:
error(f"{b!r} is not a builder config (in c['builders']")
return None
builders = [mapper(b) for b in builders]
for builder in builders:
if builder and os.path.isabs(builder.builddir):
warnings.warn(
(
f"Absolute path '{builder.builddir}' for builder may cause mayhem. "
"Perhaps you meant to specify workerbuilddir instead."
),
category=ConfigWarning,
stacklevel=1,
)
self.builders = builders
@staticmethod
def _check_workers(workers, conf_key):
if not isinstance(workers, (list, tuple)):
error(f"{conf_key} must be a list")
return False
for worker in workers:
if not interfaces.IWorker.providedBy(worker):
msg = f"{conf_key} must be a list of Worker instances but there is {worker!r}"
error(msg)
return False
def validate(workername):
if workername in ("debug", "change", "status"):
yield f"worker name {workername!r} is reserved"
if not util_identifiers.ident_re.match(workername):
yield f"worker name {workername!r} is not an identifier"
if not workername:
yield f"worker name {workername!r} cannot be an empty string"
max_workername = 50
if len(workername) > max_workername:
yield f"worker name {workername!r} is longer than {max_workername} characters"
errors = list(validate(worker.workername))
for msg in errors:
error(msg)
if errors:
return False
return True
def load_workers(self, filename, config_dict):
workers = config_dict.get('workers')
if workers is None:
return
if not self._check_workers(workers, "c['workers']"):
return
self.workers = workers[:]
def load_change_sources(self, filename, config_dict):
change_source = config_dict.get('change_source', [])
if isinstance(change_source, (list, tuple)):
change_sources = change_source
else:
change_sources = [change_source]
for s in change_sources:
if not interfaces.IChangeSource.providedBy(s):
msg = "c['change_source'] must be a list of change sources"
error(msg)
return
self.change_sources = change_sources
def load_machines(self, filename, config_dict):
if 'machines' not in config_dict:
return
machines = config_dict['machines']
msg = "c['machines'] must be a list of machines"
if not isinstance(machines, (list, tuple)):
error(msg)
return
for m in machines:
if not interfaces.IMachine.providedBy(m):
error(msg)
return
self.machines = machines
def load_user_managers(self, filename, config_dict):
if 'user_managers' not in config_dict:
return
user_managers = config_dict['user_managers']
msg = "c['user_managers'] must be a list of user managers"
if not isinstance(user_managers, (list, tuple)):
error(msg)
return
self.user_managers = user_managers
def load_www(self, filename, config_dict):
if 'www' not in config_dict:
return
www_cfg = config_dict['www']
allowed = {
'allowed_origins',
'auth',
'authz',
'avatar_methods',
'change_hook_auth',
'change_hook_dialects',
'cookie_expiration_time',
'custom_templates_dir',
'debug',
'default_page',
'json_cache_seconds',
'jsonp',
'logRotateLength',
'logfileName',
'maxRotatedFiles',
'plugins',
'port',
'rest_minimum_version',
'ui_default_config',
'versions',
'ws_ping_interval',
'project_widgets',
'graphql',
'theme',
}
unknown = set(list(www_cfg)) - allowed
if unknown:
error(f"unknown www configuration parameter(s) {', '.join(unknown)}")
versions = www_cfg.get('versions')
if versions is not None:
cleaned_versions = []
if not isinstance(versions, list):
error('Invalid www configuration value of versions')
else:
for v in versions:
if not isinstance(v, tuple) or len(v) < 2:
error('Invalid www configuration value of versions')
break
cleaned_versions.append(v)
www_cfg['versions'] = cleaned_versions
cookie_expiration_time = www_cfg.get('cookie_expiration_time')
if cookie_expiration_time is not None:
if not isinstance(cookie_expiration_time, datetime.timedelta):
error(
'Invalid www["cookie_expiration_time"] configuration should '
'be a datetime.timedelta'
)
self.www.update(www_cfg)
def load_services(self, filename, config_dict):
if 'services' not in config_dict:
return
self.services = {}
for _service in config_dict['services']:
if not isinstance(_service, util_service.BuildbotService):
error(
f"{type(_service)} object should be an instance of "
"buildbot.util.service.BuildbotService"
)
continue
if _service.name in self.services:
error(f'Duplicate service name {_service.name!r}')
continue
self.services[_service.name] = _service
def check_single_master(self):
# check additional problems that are only valid in a single-master
# installation
if self.multiMaster:
return
if not self.workers:
error("no workers are configured")
if not self.builders:
error("no builders are configured")
# check that all builders are implemented on this master
unscheduled_buildernames = {b.name for b in self.builders}
for s in self.schedulers.values():
builderNames = s.listBuilderNames()
if interfaces.IRenderable.providedBy(builderNames):
unscheduled_buildernames.clear()
else:
for n in builderNames:
if interfaces.IRenderable.providedBy(n):
unscheduled_buildernames.clear()
elif n in unscheduled_buildernames:
unscheduled_buildernames.remove(n)
if unscheduled_buildernames:
names_str = ', '.join(unscheduled_buildernames)
error(f"builder(s) {names_str} have no schedulers to drive them")
def check_schedulers(self):
# don't perform this check in multiMaster mode
if self.multiMaster:
return
all_buildernames = {b.name for b in self.builders}
for s in self.schedulers.values():
builderNames = s.listBuilderNames()
if interfaces.IRenderable.providedBy(builderNames):
continue
for n in builderNames:
if interfaces.IRenderable.providedBy(n):
continue
if n not in all_buildernames:
error(f"Unknown builder '{n}' in scheduler '{s.name}'")
def check_locks(self):
# assert that all locks used by the Builds and their Steps are
# uniquely named.
lock_dict = {}
def check_lock(lock):
if isinstance(lock, locks.LockAccess):
lock = lock.lockid
if lock.name in lock_dict:
if lock_dict[lock.name] is not lock:
msg = f"Two locks share the same name, '{lock.name}'"
error(msg)
else:
lock_dict[lock.name] = lock
for b in self.builders:
if b.locks and not IRenderable.providedBy(b.locks):
for lock in b.locks:
check_lock(lock)
def check_projects(self):
seen_names = set()
for p in self.projects:
if p.name in seen_names:
error(f"duplicate project name '{p.name}'")
seen_names.add(p.name)
def check_builders(self):
# look both for duplicate builder names, and for builders pointing
# to unknown workers
workernames = {w.workername for w in self.workers}
project_names = {p.name for p in self.projects}
seen_names = set()
seen_builddirs = set()
for b in self.builders:
unknowns = set(b.workernames) - workernames
if unknowns:
error(
f"builder '{b.name}' uses unknown workers "
f"{', '.join(repr(u) for u in unknowns)}"
)
if b.name in seen_names:
error(f"duplicate builder name '{b.name}'")
seen_names.add(b.name)
if b.builddir in seen_builddirs:
error(f"duplicate builder builddir '{b.builddir}'")
seen_builddirs.add(b.builddir)
if b.project is not None:
if b.project not in project_names:
error(f"builder '{b.name}' uses unknown project name '{b.project}'")
def check_ports(self):
ports = set()
if self.protocols:
for proto, options in self.protocols.items():
if proto == 'null':
port = -1
else:
port = options.get("port")
if port is None:
continue
if isinstance(port, int):
# Conversion needed to compare listenTCP and strports ports
port = f"tcp:{port}"
if port != -1 and port in ports:
error("Some of ports in c['protocols'] duplicated")
ports.add(port)
if ports:
return
if self.workers:
error("workers are configured, but c['protocols'] not")
def check_machines(self):
seen_names = set()
for mm in self.machines:
if mm.name in seen_names:
error(f"duplicate machine name '{mm.name}'")
seen_names.add(mm.name)
for w in self.workers:
if w.machine_name is not None and w.machine_name not in seen_names:
error(f"worker '{w.name}' uses unknown machine '{w.machine_name}'")
| 33,984 | Python | .py | 804 | 30.570896 | 100 | 0.577863 | buildbot/buildbot | 5,232 | 1,616 | 728 | GPL-2.0 | 9/5/2024, 5:09:21 PM (Europe/Amsterdam) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.