text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Record a reference to the argument key.
<END_TASK>
<USER_TASK:>
Description:
def _ref_key(self, key):
"""Record a reference to the argument key.""" |
queue = self.queue
refcount = self.refcount
queue.append(key)
refcount[key] = refcount[key] + 1
# periodically compact the queue by eliminating duplicate keys
# while preserving order of most recent access. Note that this
# is only required when the cache does not exceed its maximum
# size
if len(queue) > self.max_queue:
refcount.clear()
queue_appendleft = queue.appendleft
queue_appendleft(self.sentinel)
for k in filterfalse(refcount.__contains__,
iter(queue.pop, self.sentinel)):
queue_appendleft(k)
refcount[k] = 1 |
<SYSTEM_TASK:>
Try to do a value lookup from the existing cache entries.
<END_TASK>
<USER_TASK:>
Description:
def _get_hit(self, key):
"""Try to do a value lookup from the existing cache entries.""" |
try:
result = self.cache[key]
self.hits += 1
self._ref_key(key)
return result
except KeyError:
pass
result = self.weakrefs[key]
self.refhits += 1
self.cache[key] = result
self._ref_key(key)
return result |
<SYSTEM_TASK:>
Trim the cache down to max_size by evicting the
<END_TASK>
<USER_TASK:>
Description:
def _purge(self):
"""
Trim the cache down to max_size by evicting the
least-recently-used entries.
""" |
if len(self.cache) <= self.max_size:
return
cache = self.cache
refcount = self.refcount
queue = self.queue
max_size = self.max_size
# purge least recently used entries, using refcount to count entries
# that appear multiple times in the queue
while len(cache) > max_size:
refc = 1
while refc:
k = queue.popleft()
refc = refcount[k] = refcount[k] - 1
del cache[k]
del refcount[k] |
<SYSTEM_TASK:>
Start worker process.
<END_TASK>
<USER_TASK:>
Description:
def startWorker(basedir, quiet, nodaemon):
"""
Start worker process.
Fork and start twisted application described in basedir buildbot.tac file.
Print it's log messages to stdout for a while and try to figure out if
start was successful.
If quiet or nodaemon parameters are True, or we are running on a win32
system, will not fork and log will not be printed to stdout.
@param basedir: worker's basedir path
@param quiet: don't display startup log messages
@param nodaemon: don't daemonize (stay in foreground)
@return: 0 if worker was successfully started,
1 if we are not sure that worker started successfully
""" |
os.chdir(basedir)
if quiet or nodaemon:
return launch(nodaemon)
# we probably can't do this os.fork under windows
from twisted.python.runtime import platformType
if platformType == "win32":
return launch(nodaemon)
# fork a child to launch the daemon, while the parent process tails the
# logfile
if os.fork():
# this is the parent
rc = Follower().follow()
return rc
# this is the child: give the logfile-watching parent a chance to start
# watching it before we start the daemon
time.sleep(0.2)
launch(nodaemon) |
<SYSTEM_TASK:>
Calculates the width of the specified text.
<END_TASK>
<USER_TASK:>
Description:
def textwidth(self, text, config):
"""Calculates the width of the specified text.
""" |
surface = cairo.SVGSurface(None, 1280, 200)
ctx = cairo.Context(surface)
ctx.select_font_face(config['font_face'],
cairo.FONT_SLANT_NORMAL,
cairo.FONT_WEIGHT_BOLD)
ctx.set_font_size(int(config['font_size']))
return ctx.text_extents(text)[2] + 2 |
<SYSTEM_TASK:>
Renders an SVG from the template, using the specified data
<END_TASK>
<USER_TASK:>
Description:
def makesvg(self, right_text, status=None, left_text=None,
left_color=None, config=None):
"""Renders an SVG from the template, using the specified data
""" |
right_color = config['color_scheme'].get(status, "#9f9f9f") # Grey
left_text = left_text or config['left_text']
left_color = left_color or config['left_color']
left = {
"color": left_color,
"text": left_text,
"width": self.textwidth(left_text, config)
}
right = {
"color": right_color,
"text": right_text,
"width": self.textwidth(right_text, config)
}
template = self.env.get_template(config['template_name'].format(**config))
return template.render(left=left, right=right, config=config) |
<SYSTEM_TASK:>
This formats the results of the database operations for printing
<END_TASK>
<USER_TASK:>
Description:
def formatResults(self, op, results):
"""
This formats the results of the database operations for printing
back to the caller
@param op: operation to perform (add, remove, update, get)
@type op: string
@param results: results from db queries in perspective_commandline
@type results: list
@returns: string containing formatted results
""" |
formatted_results = ""
if op == 'add':
# list, alternating ident, uid
formatted_results += "user(s) added:\n"
for user in results:
if isinstance(user, str):
formatted_results += "identifier: %s\n" % user
else:
formatted_results += "uid: %d\n\n" % user
elif op == 'remove':
# list of dictionaries
formatted_results += "user(s) removed:\n"
for user in results:
if user:
formatted_results += "identifier: %s\n" % (user)
elif op == 'update':
# list, alternating ident, None
formatted_results += "user(s) updated:\n"
for user in results:
if user:
formatted_results += "identifier: %s\n" % (user)
elif op == 'get':
# list of dictionaries
formatted_results += "user(s) found:\n"
for user in results:
if user:
for key in sorted(user.keys()):
if key != 'bb_password':
formatted_results += "%s: %s\n" % (key, user[key])
formatted_results += "\n"
else:
formatted_results += "no match found\n"
return formatted_results |
<SYSTEM_TASK:>
This performs the requested operations from the `buildbot user`
<END_TASK>
<USER_TASK:>
Description:
def perspective_commandline(self, op, bb_username, bb_password, ids, info):
"""
This performs the requested operations from the `buildbot user`
call by calling the proper buildbot.db.users methods based on
the operation. It yields a deferred instance with the results
from the database methods.
@param op: operation to perform (add, remove, update, get)
@type op: string
@param bb_username: username portion of auth credentials
@type bb_username: string
@param bb_password: hashed password portion of auth credentials
@type bb_password: hashed string
@param ids: user identifiers used to find existing users
@type ids: list of strings or None
@param info: type/value pairs for each user that will be added
or updated in the database
@type info: list of dictionaries or None
@returns: results from db.users methods via deferred
""" |
log.msg("perspective_commandline called")
results = []
# pylint: disable=too-many-nested-blocks
if ids:
for user in ids:
# get identifier, guaranteed to be in user from checks
# done in C{scripts.runner}
uid = yield self.master.db.users.identifierToUid(
identifier=user)
result = None
if op == 'remove':
if uid:
yield self.master.db.users.removeUser(uid)
result = user
else:
log.msg("Unable to find uid for identifier %s" % user)
elif op == 'get':
if uid:
result = yield self.master.db.users.getUser(uid)
else:
log.msg("Unable to find uid for identifier %s" % user)
results.append(result)
else:
for user in info:
# get identifier, guaranteed to be in user from checks
# done in C{scripts.runner}
ident = user.pop('identifier')
uid = yield self.master.db.users.identifierToUid(
identifier=ident)
# if only an identifier was in user, we're updating only
# the bb_username and bb_password.
if not user:
if uid:
result = yield self.master.db.users.updateUser(
uid=uid,
identifier=ident,
bb_username=bb_username,
bb_password=bb_password)
results.append(ident)
else:
log.msg("Unable to find uid for identifier %s"
% user)
else:
# when adding, we update the user after the first attr
once_through = False
for attr in user:
result = None
if op == 'update' or once_through:
if uid:
result = yield self.master.db.users.updateUser(
uid=uid,
identifier=ident,
bb_username=bb_username,
bb_password=bb_password,
attr_type=attr,
attr_data=user[attr])
else:
log.msg("Unable to find uid for identifier %s"
% user)
elif op == 'add':
result = yield self.master.db.users.findUserByAttr(
identifier=ident,
attr_type=attr,
attr_data=user[attr])
once_through = True
results.append(ident)
# result is None from updateUser calls
if result:
results.append(result)
uid = result
results = self.formatResults(op, results)
return results |
<SYSTEM_TASK:>
A method to allow posting data that is not generated and stored as build-data in
<END_TASK>
<USER_TASK:>
Description:
def yieldMetricsValue(self, data_name, post_data, buildid):
"""
A method to allow posting data that is not generated and stored as build-data in
the database. This method generates the `stats-yield-data` event to the mq layer
which is then consumed in self.postData.
@params
data_name: (str) The unique name for identifying this data.
post_data: (dict) A dictionary of key-value pairs that'll be sent for storage.
buildid: The buildid of the current Build.
""" |
build_data = yield self.master.data.get(('builds', buildid))
routingKey = ("stats-yieldMetricsValue", "stats-yield-data")
msg = {
'data_name': data_name,
'post_data': post_data,
'build_data': build_data
}
self.master.mq.produce(routingKey, msg) |
<SYSTEM_TASK:>
helper to get a direct interface to _Plugins
<END_TASK>
<USER_TASK:>
Description:
def get_plugins(namespace, interface=None, check_extras=True, load_now=False):
"""
helper to get a direct interface to _Plugins
""" |
return _DB.add_namespace(namespace, interface, check_extras, load_now) |
<SYSTEM_TASK:>
register given namespace in global database of plugins
<END_TASK>
<USER_TASK:>
Description:
def add_namespace(self, namespace, interface=None, check_extras=True,
load_now=False):
"""
register given namespace in global database of plugins
in case it's already registered, return the registration
""" |
tempo = self._namespaces.get(namespace)
if tempo is None:
tempo = _Plugins(namespace, interface, check_extras)
self._namespaces[namespace] = tempo
if load_now:
tempo.load()
return tempo |
<SYSTEM_TASK:>
get information about all plugins in registered namespaces
<END_TASK>
<USER_TASK:>
Description:
def info(self):
"""
get information about all plugins in registered namespaces
""" |
result = dict()
for name, namespace in self._namespaces.items():
result[name] = namespace.info_all()
return result |
<SYSTEM_TASK:>
Helper method for testing; returns the TCP port used for this
<END_TASK>
<USER_TASK:>
Description:
def getPort(self):
"""
Helper method for testing; returns the TCP port used for this
registration, even if it was specified as 0 and thus allocated by the
OS.
""" |
disp = self.pbmanager.dispatchers[self.portstr]
return disp.port.getHost().port |
<SYSTEM_TASK:>
Start a timer to detect if the connection is hung.
<END_TASK>
<USER_TASK:>
Description:
def _startHungConnectionTimer(self):
"""
Start a timer to detect if the connection is hung.
""" |
def hungConnection():
self._hung_callback()
self._hungConnectionTimer = None
self.transport.loseConnection()
self._hungConnectionTimer = self._reactor.callLater(
self._HUNG_CONNECTION_TIMEOUT, hungConnection) |
<SYSTEM_TASK:>
Return a boolean whether the lock is available for claiming
<END_TASK>
<USER_TASK:>
Description:
def isAvailable(self, requester, access):
""" Return a boolean whether the lock is available for claiming """ |
debuglog("%s isAvailable(%s, %s): self.owners=%r"
% (self, requester, access, self.owners))
num_excl, num_counting = self._claimed_excl, self._claimed_counting
# Find all waiters ahead of the requester in the wait queue
for idx, waiter in enumerate(self.waiting):
if waiter[0] is requester:
w_index = idx
break
else:
w_index = len(self.waiting)
ahead = self.waiting[:w_index]
if access.mode == 'counting':
# Wants counting access
return num_excl == 0 and num_counting + len(ahead) < self.maxCount \
and all([w[1].mode == 'counting' for w in ahead])
# else Wants exclusive access
return num_excl == 0 and num_counting == 0 and not ahead |
<SYSTEM_TASK:>
Responds to events and starts the build process
<END_TASK>
<USER_TASK:>
Description:
def render_POST(self, request):
"""
Responds to events and starts the build process
different implementations can decide on what methods they will accept
:arguments:
request
the http request object
""" |
try:
d = self.getAndSubmitChanges(request)
except Exception:
d = defer.fail()
def ok(_):
request.setResponseCode(202)
request.finish()
def err(why):
code = 500
if why.check(ValueError):
code = 400
msg = unicode2bytes(why.getErrorMessage())
else:
log.err(why, "adding changes from web hook")
msg = b'Error processing changes.'
request.setResponseCode(code, msg)
request.write(msg)
request.finish()
d.addCallbacks(ok, err)
return server.NOT_DONE_YET |
<SYSTEM_TASK:>
create and cache the handler object for this dialect
<END_TASK>
<USER_TASK:>
Description:
def makeHandler(self, dialect):
"""create and cache the handler object for this dialect""" |
if dialect not in self.dialects:
m = "The dialect specified, '{}', wasn't whitelisted in change_hook".format(dialect)
log.msg(m)
log.msg(
"Note: if dialect is 'base' then it's possible your URL is malformed and we didn't regex it properly")
raise ValueError(m)
if dialect not in self._dialect_handlers:
if dialect not in self._plugins:
m = "The dialect specified, '{}', is not registered as a buildbot.webhook plugin".format(dialect)
log.msg(m)
raise ValueError(m)
options = self.dialects[dialect]
if isinstance(options, dict) and 'custom_class' in options:
klass = options['custom_class']
else:
klass = self._plugins.get(dialect)
self._dialect_handlers[dialect] = klass(self.master, self.dialects[dialect])
return self._dialect_handlers[dialect] |
<SYSTEM_TASK:>
Take the logic from the change hook, and then delegate it
<END_TASK>
<USER_TASK:>
Description:
def getChanges(self, request):
"""
Take the logic from the change hook, and then delegate it
to the proper handler
We use the buildbot plugin mechanisms to find out about dialects
and call getChanges()
the return value is a list of changes
if DIALECT is unspecified, a sample implementation is provided
""" |
uriRE = re.search(r'^/change_hook/?([a-zA-Z0-9_]*)', bytes2unicode(request.uri))
if not uriRE:
log.msg("URI doesn't match change_hook regex: %s" % request.uri)
raise ValueError(
"URI doesn't match change_hook regex: %s" % request.uri)
changes = []
src = None
# Was there a dialect provided?
if uriRE.group(1):
dialect = uriRE.group(1)
else:
dialect = 'base'
handler = self.makeHandler(dialect)
changes, src = yield handler.getChanges(request)
return (changes, src) |
<SYSTEM_TASK:>
We need to convert a dictionary where keys and values
<END_TASK>
<USER_TASK:>
Description:
def decode(data, encoding='utf-8', errors='strict'):
"""We need to convert a dictionary where keys and values
are bytes, to unicode strings. This happens when a
Python 2 worker sends a dictionary back to a Python 3 master.
""" |
data_type = type(data)
if data_type == bytes:
return bytes2unicode(data, encoding, errors)
if data_type in (dict, list, tuple):
if data_type == dict:
data = data.items()
return data_type(map(decode, data))
return data |
<SYSTEM_TASK:>
A kind of create_or_update, that's between one or two queries per
<END_TASK>
<USER_TASK:>
Description:
def setBuildProperty(self, bid, name, value, source):
""" A kind of create_or_update, that's between one or two queries per
call """ |
def thd(conn):
bp_tbl = self.db.model.build_properties
self.checkLength(bp_tbl.c.name, name)
self.checkLength(bp_tbl.c.source, source)
whereclause = sa.and_(bp_tbl.c.buildid == bid,
bp_tbl.c.name == name)
q = sa.select(
[bp_tbl.c.value, bp_tbl.c.source],
whereclause=whereclause)
prop = conn.execute(q).fetchone()
value_js = json.dumps(value)
if prop is None:
conn.execute(bp_tbl.insert(),
dict(buildid=bid, name=name, value=value_js,
source=source))
elif (prop.value != value_js) or (prop.source != source):
conn.execute(bp_tbl.update(whereclause=whereclause),
dict(value=value_js, source=source))
yield self.db.pool.do(thd) |
<SYSTEM_TASK:>
Return a deferred to set current revision in persistent state.
<END_TASK>
<USER_TASK:>
Description:
def _setCurrentRev(self, rev, branch='default'):
"""Return a deferred to set current revision in persistent state.""" |
self.lastRev[branch] = str(rev)
return self.setState('lastRev', self.lastRev) |
<SYSTEM_TASK:>
Return a deferred for branch head revision or None.
<END_TASK>
<USER_TASK:>
Description:
def _getHead(self, branch):
"""Return a deferred for branch head revision or None.
We'll get an error if there is no head for this branch, which is
probably a good thing, since it's probably a misspelling
(if really buildbotting a branch that does not have any changeset
yet, one shouldn't be surprised to get errors)
""" |
d = utils.getProcessOutput(self.hgbin,
['heads', '-r', branch,
'--template={rev}' + os.linesep],
path=self._absWorkdir(), env=os.environ, errortoo=False)
@d.addErrback
def no_head_err(exc):
log.err("hgpoller: could not find revision %r in repository %r" % (
branch, self.repourl))
@d.addCallback
def results(heads):
if not heads:
return
if len(heads.split()) > 1:
log.err(("hgpoller: caught several heads in branch %r "
"from repository %r. Staying at previous revision"
"You should wait until the situation is normal again "
"due to a merge or directly strip if remote repo "
"gets stripped later.") % (branch, self.repourl))
return
# in case of whole reconstruction, are we sure that we'll get the
# same node -> rev assignations ?
return heads.strip().decode(self.encoding)
return d |
<SYSTEM_TASK:>
Send info about pulled changes to the master and record current.
<END_TASK>
<USER_TASK:>
Description:
def _processChanges(self, unused_output):
"""Send info about pulled changes to the master and record current.
HgPoller does the recording by moving the working dir to the head
of the branch.
We don't update the tree (unnecessary treatment and waste of space)
instead, we simply store the current rev number in a file.
Recall that hg rev numbers are local and incremental.
""" |
for branch in self.branches + self.bookmarks:
rev = yield self._getHead(branch)
if rev is None:
# Nothing pulled?
continue
yield self._processBranchChanges(rev, branch) |
<SYSTEM_TASK:>
Patches to manifest projects are a bit special.
<END_TASK>
<USER_TASK:>
Description:
def filterManifestPatches(self):
"""
Patches to manifest projects are a bit special.
repo does not support a way to download them automatically,
so we need to implement the boilerplate manually.
This code separates the manifest patches from the other patches,
and generates commands to import those manifest patches.
""" |
manifest_unrelated_downloads = []
manifest_related_downloads = []
for download in self.repoDownloads:
project, ch_ps = download.split(" ")[-2:]
if (self.manifestURL.endswith("/" + project) or
self.manifestURL.endswith("/" + project + ".git")):
ch, ps = map(int, ch_ps.split("/"))
branch = "refs/changes/%02d/%d/%d" % (ch % 100, ch, ps)
manifest_related_downloads.append(
["git", "fetch", self.manifestURL, branch])
manifest_related_downloads.append(
["git", "cherry-pick", "FETCH_HEAD"])
else:
manifest_unrelated_downloads.append(download)
self.repoDownloads = manifest_unrelated_downloads
self.manifestDownloads = manifest_related_downloads |
<SYSTEM_TASK:>
also used by tests for expectations
<END_TASK>
<USER_TASK:>
Description:
def _getCleanupCommand(self):
"""also used by tests for expectations""" |
return textwrap.dedent("""\
set -v
if [ -d .repo/manifests ]
then
# repo just refuse to run if manifest is messed up
# so ensure we are in a known state
cd .repo/manifests
rm -f .git/index.lock
git fetch origin
git reset --hard remotes/origin/%(manifestBranch)s
git config branch.default.merge %(manifestBranch)s
cd ..
ln -sf manifests/%(manifestFile)s manifest.xml
cd ..
fi
repo forall -c rm -f .git/index.lock
repo forall -c git clean -f -d -x 2>/dev/null
repo forall -c git reset --hard HEAD 2>/dev/null
rm -f %(workdir)s/.repo/project.list
""") % dict(manifestBranch=self.manifestBranch,
manifestFile=self.manifestFile,
workdir=self.workdir) |
<SYSTEM_TASK:>
Return the object id for this master, for associating state with the
<END_TASK>
<USER_TASK:>
Description:
def getObjectId(self):
"""
Return the object id for this master, for associating state with the
master.
@returns: ID, via Deferred
""" |
# try to get the cached value
if self._object_id is not None:
return defer.succeed(self._object_id)
# failing that, get it from the DB; multiple calls to this function
# at the same time will not hurt
d = self.db.state.getObjectId(self.name,
"buildbot.master.BuildMaster")
@d.addCallback
def keep(id):
self._object_id = id
return id
return d |
<SYSTEM_TASK:>
Take a Change author and source and translate them into a User Object,
<END_TASK>
<USER_TASK:>
Description:
def createUserObject(master, author, src=None):
"""
Take a Change author and source and translate them into a User Object,
storing the user in master.db, or returning None if the src is not
specified.
@param master: link to Buildmaster for database operations
@type master: master.Buildmaster instance
@param authors: Change author if string or Authz instance
@type authors: string or www.authz instance
@param src: source from which the User Object will be created
@type src: string
""" |
if not src:
log.msg("No vcs information found, unable to create User Object")
return defer.succeed(None)
if src in srcs:
usdict = dict(identifier=author, attr_type=src, attr_data=author)
else:
log.msg("Unrecognized source argument: %s" % src)
return defer.succeed(None)
return master.db.users.findUserByAttr(
identifier=usdict['identifier'],
attr_type=usdict['attr_type'],
attr_data=usdict['attr_data']) |
<SYSTEM_TASK:>
Encrypts the incoming password after adding some salt to store
<END_TASK>
<USER_TASK:>
Description:
def encrypt(passwd):
"""
Encrypts the incoming password after adding some salt to store
it in the database.
@param passwd: password portion of user credentials
@type passwd: string
@returns: encrypted/salted string
""" |
m = sha1()
salt = hexlify(os.urandom(salt_len))
m.update(unicode2bytes(passwd) + salt)
crypted = bytes2unicode(salt) + m.hexdigest()
return crypted |
<SYSTEM_TASK:>
Tests to see if the guess, after salting and hashing, matches the
<END_TASK>
<USER_TASK:>
Description:
def check_passwd(guess, passwd):
"""
Tests to see if the guess, after salting and hashing, matches the
passwd from the database.
@param guess: incoming password trying to be used for authentication
@param passwd: already encrypted password from the database
@returns: boolean
""" |
m = sha1()
salt = passwd[:salt_len * 2] # salt_len * 2 due to encode('hex_codec')
m.update(unicode2bytes(guess) + unicode2bytes(salt))
crypted_guess = bytes2unicode(salt) + m.hexdigest()
return (crypted_guess == bytes2unicode(passwd)) |
<SYSTEM_TASK:>
make sure the result is backward compatible
<END_TASK>
<USER_TASK:>
Description:
def _handleLegacyResult(result):
"""
make sure the result is backward compatible
""" |
if not isinstance(result, dict):
warnings.warn('The Gerrit status callback uses the old way to '
'communicate results. The outcome might be not what is '
'expected.')
message, verified, reviewed = result
result = makeReviewResult(message,
(GERRIT_LABEL_VERIFIED, verified),
(GERRIT_LABEL_REVIEWED, reviewed))
return result |
<SYSTEM_TASK:>
Try to start any builds that can be started right now. This function
<END_TASK>
<USER_TASK:>
Description:
def maybeStartBuildsOn(self, new_builders):
"""
Try to start any builds that can be started right now. This function
returns immediately, and promises to trigger those builders
eventually.
@param new_builders: names of new builders that should be given the
opportunity to check for new requests.
""" |
if not self.running:
return
d = self._maybeStartBuildsOn(new_builders)
self._pendingMSBOCalls.append(d)
try:
yield d
except Exception as e: # pragma: no cover
log.err(e, "while starting builds on {0}".format(new_builders))
finally:
self._pendingMSBOCalls.remove(d) |
<SYSTEM_TASK:>
This method must be called by user classes
<END_TASK>
<USER_TASK:>
Description:
def addLogForRemoteCommands(self, logname):
"""This method must be called by user classes
composite steps could create several logs, this mixin functions will write
to the last one.
""" |
self.rc_log = self.addLog(logname)
return self.rc_log |
<SYSTEM_TASK:>
remove a directory from the worker
<END_TASK>
<USER_TASK:>
Description:
def runRmdir(self, dir, timeout=None, **kwargs):
""" remove a directory from the worker """ |
cmd_args = {'dir': dir, 'logEnviron': self.logEnviron}
if timeout:
cmd_args['timeout'] = timeout
return self.runRemoteCommand('rmdir', cmd_args, **kwargs) |
<SYSTEM_TASK:>
remove a file from the worker
<END_TASK>
<USER_TASK:>
Description:
def runRmFile(self, path, timeout=None, **kwargs):
""" remove a file from the worker """ |
cmd_args = {'path': path, 'logEnviron': self.logEnviron}
if timeout:
cmd_args['timeout'] = timeout
if self.workerVersionIsOlderThan('rmfile', '3.1'):
cmd_args['dir'] = os.path.abspath(path)
return self.runRemoteCommand('rmdir', cmd_args, **kwargs)
return self.runRemoteCommand('rmfile', cmd_args, **kwargs) |
<SYSTEM_TASK:>
test whether path exists
<END_TASK>
<USER_TASK:>
Description:
def pathExists(self, path):
""" test whether path exists""" |
def commandComplete(cmd):
return not cmd.didFail()
return self.runRemoteCommand('stat', {'file': path,
'logEnviron': self.logEnviron, },
abandonOnFailure=False,
evaluateCommand=commandComplete) |
<SYSTEM_TASK:>
create a directory and its parents
<END_TASK>
<USER_TASK:>
Description:
def runMkdir(self, _dir, **kwargs):
""" create a directory and its parents""" |
return self.runRemoteCommand('mkdir', {'dir': _dir,
'logEnviron': self.logEnviron, },
**kwargs) |
<SYSTEM_TASK:>
find files matching a shell-style pattern
<END_TASK>
<USER_TASK:>
Description:
def runGlob(self, path, **kwargs):
""" find files matching a shell-style pattern""" |
def commandComplete(cmd):
return cmd.updates['files'][-1]
return self.runRemoteCommand('glob', {'path': path,
'logEnviron': self.logEnviron, },
evaluateCommand=commandComplete, **kwargs) |
<SYSTEM_TASK:>
Hook that is ran when old API name is used.
<END_TASK>
<USER_TASK:>
Description:
def reportDeprecatedWorkerNameUsage(message, stacklevel=None, filename=None,
lineno=None):
"""Hook that is ran when old API name is used.
:param stacklevel: stack level relative to the caller's frame.
Defaults to caller of the caller of this function.
""" |
if filename is None:
if stacklevel is None:
# Warning will refer to the caller of the caller of this function.
stacklevel = 3
else:
stacklevel += 2
warnings.warn(DeprecatedWorkerNameWarning(message), None, stacklevel)
else:
assert stacklevel is None
if lineno is None:
lineno = 0
warnings.warn_explicit(
DeprecatedWorkerNameWarning(message),
DeprecatedWorkerNameWarning,
filename, lineno) |
<SYSTEM_TASK:>
Hook Twisted deprecation machinery to use custom warning class
<END_TASK>
<USER_TASK:>
Description:
def setupWorkerTransition():
"""Hook Twisted deprecation machinery to use custom warning class
for Worker API deprecation warnings.""" |
default_warn_method = getWarningMethod()
def custom_warn_method(message, category, stacklevel):
if stacklevel is not None:
stacklevel += 1
if _WORKER_WARNING_MARK in message:
# Message contains our mark - it's Worker API Renaming warning,
# issue it appropriately.
message = message.replace(_WORKER_WARNING_MARK, "")
warnings.warn(
DeprecatedWorkerNameWarning(message), message, stacklevel)
else:
# Other's warning message
default_warn_method(message, category, stacklevel)
setWarningMethod(custom_warn_method) |
<SYSTEM_TASK:>
Handles fallbacks for failure of fetch,
<END_TASK>
<USER_TASK:>
Description:
def _fetchOrFallback(self, _=None):
"""
Handles fallbacks for failure of fetch,
wrapper for self._fetch
""" |
res = yield self._fetch(None)
if res == RC_SUCCESS:
return res
elif self.retryFetch:
yield self._fetch(None)
elif self.clobberOnFailure:
yield self.clobber()
else:
raise buildstep.BuildStepFailed() |
<SYSTEM_TASK:>
Perform full clone and checkout to the revision if specified
<END_TASK>
<USER_TASK:>
Description:
def _fullClone(self, shallowClone=False):
"""Perform full clone and checkout to the revision if specified
In the case of shallow clones if any of the step fail abort whole build step.
""" |
res = yield self._clone(shallowClone)
if res != RC_SUCCESS:
return res
# If revision specified checkout that revision
if self.revision:
res = yield self._dovccmd(['reset', '--hard',
self.revision, '--'],
shallowClone)
# init and update submodules, recursively. If there's not recursion
# it will not do it.
if self.submodules:
res = yield self._dovccmd(['submodule', 'update',
'--init', '--recursive'],
shallowClone)
return res |
<SYSTEM_TASK:>
Remove the work directory
<END_TASK>
<USER_TASK:>
Description:
def _doClobber(self):
"""Remove the work directory""" |
rc = yield self.runRmdir(self.workdir, timeout=self.timeout)
if rc != RC_SUCCESS:
raise RuntimeError("Failed to delete directory")
return rc |
<SYSTEM_TASK:>
Catch a POST request from BitBucket and start a build process
<END_TASK>
<USER_TASK:>
Description:
def getChanges(self, request):
"""Catch a POST request from BitBucket and start a build process
Check the URL below if you require more information about payload
https://confluence.atlassian.com/display/BITBUCKET/POST+Service+Management
:param request: the http request Twisted object
:param options: additional options
""" |
event_type = request.getHeader(_HEADER_EVENT)
event_type = bytes2unicode(event_type)
payload = json.loads(bytes2unicode(request.args[b'payload'][0]))
repo_url = '{}{}'.format(
payload['canon_url'], payload['repository']['absolute_url'])
project = request.args.get(b'project', [b''])[0]
project = bytes2unicode(project)
changes = []
for commit in payload['commits']:
changes.append({
'author': commit['raw_author'],
'files': [f['file'] for f in commit['files']],
'comments': commit['message'],
'revision': commit['raw_node'],
'when_timestamp': dateparse(commit['utctimestamp']),
'branch': commit['branch'],
'revlink': '{}commits/{}'.format(repo_url, commit['raw_node']),
'repository': repo_url,
'project': project,
'properties': {
'event': event_type,
},
})
log.msg('New revision: {}'.format(commit['node']))
log.msg('Received {} changes from bitbucket'.format(len(changes)))
return (changes, payload['repository']['scm']) |
<SYSTEM_TASK:>
This method returns Build's properties according to property filters.
<END_TASK>
<USER_TASK:>
Description:
def _generate_filtered_properties(self, props, filters):
"""
This method returns Build's properties according to property filters.
:param props: Properties as a dict (from db)
:param filters: Desired properties keys as a list (from API URI)
""" |
# by default no properties are returned
if props and filters:
return (props
if '*' in filters
else dict(((k, v) for k, v in props.items() if k in filters))) |
<SYSTEM_TASK:>
get the value from vault secret backend
<END_TASK>
<USER_TASK:>
Description:
def get(self, entry):
"""
get the value from vault secret backend
""" |
if self.apiVersion == 1:
path = self.secretsmount + '/' + entry
else:
path = self.secretsmount + '/data/' + entry
# note that the HTTP path contains v1 for both versions of the key-value
# secret engine. Different versions of the key-value engine are
# effectively separate secret engines in vault, with the same base HTTP
# API, but with different paths within it.
proj = yield self._http.get('/v1/{0}'.format(path))
code = yield proj.code
if code != 200:
raise KeyError("The key %s does not exist in Vault provider: request"
" return code:%d." % (entry, code))
json = yield proj.json()
if self.apiVersion == 1:
ret = json.get('data', {}).get('value')
else:
ret = json.get('data', {}).get('data', {}).get('value')
return ret |
<SYSTEM_TASK:>
Format the duration.
<END_TASK>
<USER_TASK:>
Description:
def formatDuration(self, duration):
"""Format the duration.
This method could be overridden if really needed, as the duration format in gerrit
is an arbitrary string.
:param duration: duration in timedelta
""" |
days = duration.days
hours, remainder = divmod(duration.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
if days:
return '{} day{} {}h {}m {}s'.format(days, "s" if days > 1 else "",
hours, minutes, seconds)
elif hours:
return '{}h {}m {}s'.format(hours, minutes, seconds)
return '{}m {}s'.format(minutes, seconds) |
<SYSTEM_TASK:>
Get the gerrit changes
<END_TASK>
<USER_TASK:>
Description:
def getGerritChanges(props):
""" Get the gerrit changes
This method could be overridden if really needed to accommodate for other
custom steps method for fetching gerrit changes.
:param props: an IProperty
:return: (optionally via deferred) a list of dictionary with at list
change_id, and revision_id,
which format is the one accepted by the gerrit REST API as of
/changes/:change_id/revision/:revision_id paths (see gerrit doc)
""" |
if 'gerrit_changes' in props:
return props.getProperty('gerrit_changes')
if 'event.change.number' in props:
return [{
'change_id': props.getProperty('event.change.number'),
'revision_id': props.getProperty('event.patchSet.number')
}]
return [] |
<SYSTEM_TASK:>
Split CONTENT on a line boundary into a prefix smaller than 64k and
<END_TASK>
<USER_TASK:>
Description:
def _splitBigChunk(self, content, logid):
"""
Split CONTENT on a line boundary into a prefix smaller than 64k and
a suffix containing the remainder, omitting the splitting newline.
""" |
# if it's small enough, just return it
if len(content) < self.MAX_CHUNK_SIZE:
return content, None
# find the last newline before the limit
i = content.rfind(b'\n', 0, self.MAX_CHUNK_SIZE)
if i != -1:
return content[:i], content[i + 1:]
log.msg('truncating long line for log %d' % logid)
# first, truncate this down to something that decodes correctly
truncline = content[:self.MAX_CHUNK_SIZE]
while truncline:
try:
truncline.decode('utf-8')
break
except UnicodeDecodeError:
truncline = truncline[:-1]
# then find the beginning of the next line
i = content.find(b'\n', self.MAX_CHUNK_SIZE)
if i == -1:
return truncline, None
return truncline, content[i + 1:] |
<SYSTEM_TASK:>
Update the cookie after session object was modified
<END_TASK>
<USER_TASK:>
Description:
def updateSession(self, request):
"""
Update the cookie after session object was modified
@param request: the request object which should get a new cookie
""" |
# we actually need to copy some hardcoded constants from twisted :-(
# Make sure we aren't creating a secure session on a non-secure page
secure = request.isSecure()
if not secure:
cookieString = b"TWISTED_SESSION"
else:
cookieString = b"TWISTED_SECURE_SESSION"
cookiename = b"_".join([cookieString] + request.sitepath)
request.addCookie(cookiename, self.uid, path=b"/",
secure=secure) |
<SYSTEM_TASK:>
uid is now generated automatically according to the claims.
<END_TASK>
<USER_TASK:>
Description:
def uid(self):
"""uid is now generated automatically according to the claims.
This should actually only be used for cookie generation
""" |
exp = datetime.datetime.utcnow() + self.expDelay
claims = {
'user_info': self.user_info,
# Note that we use JWT standard 'exp' field to implement session expiration
# we completely bypass twisted.web session expiration mechanisms
'exp': calendar.timegm(datetime.datetime.timetuple(exp))}
return jwt.encode(claims, self.site.session_secret, algorithm=SESSION_SECRET_ALGORITHM) |
<SYSTEM_TASK:>
Make worker base directory if needed.
<END_TASK>
<USER_TASK:>
Description:
def _makeBaseDir(basedir, quiet):
"""
Make worker base directory if needed.
@param basedir: worker base directory relative path
@param quiet: if True, don't print info messages
@raise CreateWorkerError: on error making base directory
""" |
if os.path.exists(basedir):
if not quiet:
print("updating existing installation")
return
if not quiet:
print("mkdir", basedir)
try:
os.mkdir(basedir)
except OSError as exception:
raise CreateWorkerError("error creating directory {0}: {1}".format(
basedir, exception.strerror)) |
<SYSTEM_TASK:>
Create buildbot.tac file. If buildbot.tac file already exists with
<END_TASK>
<USER_TASK:>
Description:
def _makeBuildbotTac(basedir, tac_file_contents, quiet):
"""
Create buildbot.tac file. If buildbot.tac file already exists with
different contents, create buildbot.tac.new instead.
@param basedir: worker base directory relative path
@param tac_file_contents: contents of buildbot.tac file to write
@param quiet: if True, don't print info messages
@raise CreateWorkerError: on error reading or writing tac file
""" |
tacfile = os.path.join(basedir, "buildbot.tac")
if os.path.exists(tacfile):
try:
with open(tacfile, "rt") as f:
oldcontents = f.read()
except IOError as exception:
raise CreateWorkerError("error reading {0}: {1}".format(
tacfile, exception.strerror))
if oldcontents == tac_file_contents:
if not quiet:
print("buildbot.tac already exists and is correct")
return
if not quiet:
print("not touching existing buildbot.tac")
print("creating buildbot.tac.new instead")
tacfile = os.path.join(basedir, "buildbot.tac.new")
try:
with open(tacfile, "wt") as f:
f.write(tac_file_contents)
os.chmod(tacfile, 0o600)
except IOError as exception:
raise CreateWorkerError("could not write {0}: {1}".format(
tacfile, exception.strerror)) |
<SYSTEM_TASK:>
Returns true if both buildrequest can be merged, via Deferred.
<END_TASK>
<USER_TASK:>
Description:
def canBeCollapsed(master, br1, br2):
"""
Returns true if both buildrequest can be merged, via Deferred.
This implements Buildbot's default collapse strategy.
""" |
# short-circuit: if these are for the same buildset, collapse away
if br1['buildsetid'] == br2['buildsetid']:
return True
# get the buidlsets for each buildrequest
selfBuildsets = yield master.data.get(
('buildsets', str(br1['buildsetid'])))
otherBuildsets = yield master.data.get(
('buildsets', str(br2['buildsetid'])))
# extract sourcestamps, as dictionaries by codebase
selfSources = dict((ss['codebase'], ss)
for ss in selfBuildsets['sourcestamps'])
otherSources = dict((ss['codebase'], ss)
for ss in otherBuildsets['sourcestamps'])
# if the sets of codebases do not match, we can't collapse
if set(selfSources) != set(otherSources):
return False
for c, selfSS in selfSources.items():
otherSS = otherSources[c]
if selfSS['repository'] != otherSS['repository']:
return False
if selfSS['branch'] != otherSS['branch']:
return False
if selfSS['project'] != otherSS['project']:
return False
# anything with a patch won't be collapsed
if selfSS['patch'] or otherSS['patch']:
return False
# get changes & compare
selfChanges = yield master.data.get(('sourcestamps', selfSS['ssid'], 'changes'))
otherChanges = yield master.data.get(('sourcestamps', otherSS['ssid'], 'changes'))
# if both have changes, proceed, else fail - if no changes check revision instead
if selfChanges and otherChanges:
continue
elif selfChanges and not otherChanges:
return False
elif not selfChanges and otherChanges:
return False
# else check revisions
elif selfSS['revision'] != otherSS['revision']:
return False
return True |
<SYSTEM_TASK:>
Return a reason for the merged build request.
<END_TASK>
<USER_TASK:>
Description:
def mergeReasons(self, others):
"""Return a reason for the merged build request.""" |
reasons = []
for req in [self] + others:
if req.reason and req.reason not in reasons:
reasons.append(req.reason)
return ", ".join(reasons) |
<SYSTEM_TASK:>
Try to remove the old mock logs first.
<END_TASK>
<USER_TASK:>
Description:
def start(self):
"""
Try to remove the old mock logs first.
""" |
if self.resultdir:
for lname in self.mock_logfiles:
self.logfiles[lname] = self.build.path_module.join(self.resultdir,
lname)
else:
for lname in self.mock_logfiles:
self.logfiles[lname] = lname
self.addLogObserver('state.log', MockStateObserver())
cmd = remotecommand.RemoteCommand('rmdir', {'dir':
[self.build.path_module.join('build', self.logfiles[l])
for l in self.mock_logfiles]})
d = self.runCommand(cmd)
# must resolve super() outside of the callback context.
super_ = super()
@d.addCallback
def removeDone(cmd):
super_.start()
d.addErrback(self.failed) |
<SYSTEM_TASK:>
get a Contact instance for ``user`` on ``channel``
<END_TASK>
<USER_TASK:>
Description:
def getContact(self, user=None, channel=None):
""" get a Contact instance for ``user`` on ``channel`` """ |
try:
return self.contacts[(channel, user)]
except KeyError:
new_contact = self.contactClass(self, user=user, channel=channel)
self.contacts[(channel, user)] = new_contact
new_contact.setServiceParent(self)
return new_contact |
<SYSTEM_TASK:>
An issue - failing, erroring etc test.
<END_TASK>
<USER_TASK:>
Description:
def issue(self, test, err):
"""An issue - failing, erroring etc test.""" |
self.step.setProgress('tests failed', len(self.failures) +
len(self.errors)) |
<SYSTEM_TASK:>
Consumes the merge_request JSON as a python object and turn it into a buildbot change.
<END_TASK>
<USER_TASK:>
Description:
def _process_merge_request_change(self, payload, event, codebase=None):
"""
Consumes the merge_request JSON as a python object and turn it into a buildbot change.
:arguments:
payload
Python Object that represents the JSON sent by GitLab Service
Hook.
""" |
attrs = payload['object_attributes']
commit = attrs['last_commit']
when_timestamp = dateparse(commit['timestamp'])
# @todo provide and document a way to choose between http and ssh url
repo_url = attrs['target']['git_http_url']
# project name from http headers is empty for me, so get it from object_attributes/target/name
project = attrs['target']['name']
# Filter out uninteresting events
state = attrs['state']
if re.match('^(closed|merged|approved)$', state):
log.msg("GitLab MR#{}: Ignoring because state is {}".format(attrs['iid'], state))
return []
action = attrs['action']
if not re.match('^(open|reopen)$', action) and not (action == "update" and "oldrev" in attrs):
log.msg("GitLab MR#{}: Ignoring because action {} was not open or "
"reopen or an update that added code".format(attrs['iid'],
action))
return []
changes = [{
'author': '%s <%s>' % (commit['author']['name'],
commit['author']['email']),
'files': [], # @todo use rest API
'comments': "MR#{}: {}\n\n{}".format(attrs['iid'], attrs['title'], attrs['description']),
'revision': commit['id'],
'when_timestamp': when_timestamp,
'branch': attrs['target_branch'],
'repository': repo_url,
'project': project,
'category': event,
'revlink': attrs['url'],
'properties': {
'source_branch': attrs['source_branch'],
'source_project_id': attrs['source_project_id'],
'source_repository': attrs['source']['git_http_url'],
'source_git_ssh_url': attrs['source']['git_ssh_url'],
'target_branch': attrs['target_branch'],
'target_project_id': attrs['target_project_id'],
'target_repository': attrs['target']['git_http_url'],
'target_git_ssh_url': attrs['target']['git_ssh_url'],
'event': event,
},
}]
if codebase is not None:
changes[0]['codebase'] = codebase
return changes |
<SYSTEM_TASK:>
Reponds only to POST events and starts the build process
<END_TASK>
<USER_TASK:>
Description:
def getChanges(self, request):
"""
Reponds only to POST events and starts the build process
:arguments:
request
the http request object
""" |
expected_secret = isinstance(self.options, dict) and self.options.get('secret')
if expected_secret:
received_secret = request.getHeader(_HEADER_GITLAB_TOKEN)
received_secret = bytes2unicode(received_secret)
p = Properties()
p.master = self.master
expected_secret_value = yield p.render(expected_secret)
if received_secret != expected_secret_value:
raise ValueError("Invalid secret")
try:
content = request.content.read()
payload = json.loads(bytes2unicode(content))
except Exception as e:
raise ValueError("Error loading JSON: " + str(e))
event_type = request.getHeader(_HEADER_EVENT)
event_type = bytes2unicode(event_type)
# newer version of gitlab have a object_kind parameter,
# which allows not to use the http header
event_type = payload.get('object_kind', event_type)
codebase = request.args.get(b'codebase', [None])[0]
codebase = bytes2unicode(codebase)
if event_type in ("push", "tag_push", "Push Hook"):
user = payload['user_name']
repo = payload['repository']['name']
repo_url = payload['repository']['url']
changes = self._process_change(
payload, user, repo, repo_url, event_type, codebase=codebase)
elif event_type == 'merge_request':
changes = self._process_merge_request_change(
payload, event_type, codebase=codebase)
else:
changes = []
if changes:
log.msg("Received {} changes from {} gitlab event".format(
len(changes), event_type))
return (changes, 'git') |
<SYSTEM_TASK:>
return the full spec of the connector as a list of dicts
<END_TASK>
<USER_TASK:>
Description:
def allEndpoints(self):
"""return the full spec of the connector as a list of dicts
""" |
paths = []
for k, v in sorted(self.matcher.iterPatterns()):
paths.append(dict(path="/".join(k),
plural=str(v.rtype.plural),
type=str(v.rtype.entityType.name),
type_spec=v.rtype.entityType.getSpec()))
return paths |
<SYSTEM_TASK:>
concat a path for this name
<END_TASK>
<USER_TASK:>
Description:
def addEnvPath(env, name, value):
""" concat a path for this name """ |
try:
oldval = env[name]
if not oldval.endswith(';'):
oldval = oldval + ';'
except KeyError:
oldval = ""
if not value.endswith(';'):
value = value + ';'
env[name] = oldval + value |
<SYSTEM_TASK:>
Ping the worker to make sure it is still there. Returns a Deferred
<END_TASK>
<USER_TASK:>
Description:
def ping(self, status=None):
"""Ping the worker to make sure it is still there. Returns a Deferred
that fires with True if it is.
@param status: if you point this at a BuilderStatus, a 'pinging'
event will be pushed.
""" |
newping = not self.ping_watchers
d = defer.Deferred()
self.ping_watchers.append(d)
if newping:
Ping().ping(self.worker.conn).addBoth(self._pong)
return d |
<SYSTEM_TASK:>
Call me at checkConfig time to properly report config error
<END_TASK>
<USER_TASK:>
Description:
def checkAvailable(from_module):
"""Call me at checkConfig time to properly report config error
if neither txrequests or treq is installed
""" |
if txrequests is None and treq is None:
config.error("neither txrequests nor treq is installed, but {} is requiring it\n\n{}".format(
from_module, HTTPClientService.TREQ_PROS_AND_CONS)) |
<SYSTEM_TASK:>
Return a string of human readable time delta.
<END_TASK>
<USER_TASK:>
Description:
def human_readable_delta(start, end):
"""
Return a string of human readable time delta.
""" |
start_date = datetime.datetime.fromtimestamp(start)
end_date = datetime.datetime.fromtimestamp(end)
delta = end_date - start_date
result = []
if delta.days > 0:
result.append('%d days' % (delta.days,))
if delta.seconds > 0:
hours = int(delta.seconds / 3600)
if hours > 0:
result.append('%d hours' % (hours,))
minutes = int((delta.seconds - hours * 3600) / 60)
if minutes:
result.append('%d minutes' % (minutes,))
seconds = delta.seconds % 60
if seconds > 0:
result.append('%d seconds' % (seconds,))
if result:
return ', '.join(result)
return 'super fast' |
<SYSTEM_TASK:>
decorate a function by running it with maybeDeferred in a reactor
<END_TASK>
<USER_TASK:>
Description:
def in_reactor(f):
"""decorate a function by running it with maybeDeferred in a reactor""" |
def wrap(*args, **kwargs):
from twisted.internet import reactor, defer
result = []
def _async():
d = defer.maybeDeferred(f, *args, **kwargs)
@d.addErrback
def eb(f):
f.printTraceback()
@d.addBoth
def do_stop(r):
result.append(r)
reactor.stop()
reactor.callWhenRunning(_async)
reactor.run()
return result[0]
wrap.__doc__ = f.__doc__
wrap.__name__ = f.__name__
wrap._orig = f # for tests
return wrap |
<SYSTEM_TASK:>
Returns the submitted_at of the oldest unclaimed build request for
<END_TASK>
<USER_TASK:>
Description:
def getOldestRequestTime(self):
"""Returns the submitted_at of the oldest unclaimed build request for
this builder, or None if there are no build requests.
@returns: datetime instance or None, via Deferred
""" |
bldrid = yield self.getBuilderId()
unclaimed = yield self.master.data.get(
('builders', bldrid, 'buildrequests'),
[resultspec.Filter('claimed', 'eq', [False])],
order=['submitted_at'], limit=1)
if unclaimed:
return unclaimed[0]['submitted_at'] |
<SYSTEM_TASK:>
Returns the complete_at of the latest completed build request for
<END_TASK>
<USER_TASK:>
Description:
def getNewestCompleteTime(self):
"""Returns the complete_at of the latest completed build request for
this builder, or None if there are no such build requests.
@returns: datetime instance or None, via Deferred
""" |
bldrid = yield self.getBuilderId()
completed = yield self.master.data.get(
('builders', bldrid, 'buildrequests'),
[resultspec.Filter('complete', 'eq', [False])],
order=['-complete_at'], limit=1)
if completed:
return completed[0]['complete_at']
else:
return None |
<SYSTEM_TASK:>
This is invoked by the Worker when the self.workername bot
<END_TASK>
<USER_TASK:>
Description:
def attached(self, worker, commands):
"""This is invoked by the Worker when the self.workername bot
registers their builder.
@type worker: L{buildbot.worker.Worker}
@param worker: the Worker that represents the worker as a whole
@type commands: dict: string -> string, or None
@param commands: provides the worker's version of each RemoteCommand
@rtype: L{twisted.internet.defer.Deferred}
@return: a Deferred that fires (with 'self') when the worker-side
builder is fully attached and ready to accept commands.
""" |
for w in self.attaching_workers + self.workers:
if w.worker == worker:
# already attached to them. This is fairly common, since
# attached() gets called each time we receive the builder
# list from the worker, and we ask for it each time we add or
# remove a builder. So if the worker is hosting builders
# A,B,C, and the config file changes A, we'll remove A and
# re-add it, triggering two builder-list requests, getting
# two redundant calls to attached() for B, and another two
# for C.
#
# Therefore, when we see that we're already attached, we can
# just ignore it.
return defer.succeed(self)
wfb = workerforbuilder.WorkerForBuilder()
wfb.setBuilder(self)
self.attaching_workers.append(wfb)
try:
wfb = yield wfb.attached(worker, commands)
self.attaching_workers.remove(wfb)
self.workers.append(wfb)
return self
except Exception as e: # pragma: no cover
# already log.err'ed by WorkerForBuilder._attachFailure
# TODO: remove from self.workers (except that detached() should get
# run first, right?)
log.err(e, 'worker failed to attach')
return None |
<SYSTEM_TASK:>
This is called when the connection to the bot is lost.
<END_TASK>
<USER_TASK:>
Description:
def detached(self, worker):
"""This is called when the connection to the bot is lost.""" |
for wfb in self.attaching_workers + self.workers:
if wfb.worker == worker:
break
else:
log.msg("WEIRD: Builder.detached(%s) (%s)"
" not in attaching_workers(%s)"
" or workers(%s)" % (worker, worker.workername,
self.attaching_workers,
self.workers))
return
if wfb in self.attaching_workers:
self.attaching_workers.remove(wfb)
if wfb in self.workers:
self.workers.remove(wfb)
# inform the WorkerForBuilder that their worker went away
wfb.detached() |
<SYSTEM_TASK:>
A cheat that routes around the impedance mismatch between
<END_TASK>
<USER_TASK:>
Description:
def _spawnAsBatch(self, processProtocol, executable, args, env,
path, usePTY):
"""A cheat that routes around the impedance mismatch between
twisted and cmd.exe with respect to escaping quotes""" |
# NamedTemporaryFile differs in PY2 and PY3.
# In PY2, it needs encoded str and its encoding cannot be specified.
# In PY3, it needs str which is unicode and its encoding can be specified.
if PY3:
tf = NamedTemporaryFile(mode='w+', dir='.', suffix=".bat",
delete=False, encoding=self.builder.unicode_encoding)
else:
tf = NamedTemporaryFile(mode='w+', dir='.', suffix=".bat",
delete=False)
# echo off hides this cheat from the log files.
tf.write(u"@echo off\n")
if isinstance(self.command, (string_types, bytes)):
tf.write(bytes2NativeString(self.command, self.builder.unicode_encoding))
else:
tf.write(win32_batch_quote(self.command, self.builder.unicode_encoding))
tf.close()
argv = os.environ['COMSPEC'].split() # allow %COMSPEC% to have args
if '/c' not in argv:
argv += ['/c']
argv += [tf.name]
def unlink_temp(result):
os.unlink(tf.name)
return result
self.deferred.addBoth(unlink_temp)
return reactor.spawnProcess(processProtocol, executable, argv, env,
path, usePTY=usePTY) |
<SYSTEM_TASK:>
limit the chunks that we send over PB to 128k, since it has a hardwired
<END_TASK>
<USER_TASK:>
Description:
def _chunkForSend(self, data):
"""
limit the chunks that we send over PB to 128k, since it has a hardwired
string-size limit of 640k.
""" |
LIMIT = self.CHUNK_LIMIT
for i in range(0, len(data), LIMIT):
yield data[i:i + LIMIT] |
<SYSTEM_TASK:>
Take msg, which is a dictionary of lists of output chunks, and
<END_TASK>
<USER_TASK:>
Description:
def _collapseMsg(self, msg):
"""
Take msg, which is a dictionary of lists of output chunks, and
concatenate all the chunks into a single string
""" |
retval = {}
for logname in msg:
data = u""
for m in msg[logname]:
m = bytes2unicode(m, self.builder.unicode_encoding)
data += m
if isinstance(logname, tuple) and logname[0] == 'log':
retval['log'] = (logname[1], data)
else:
retval[logname] = data
return retval |
<SYSTEM_TASK:>
Collapse and send msg to the master
<END_TASK>
<USER_TASK:>
Description:
def _sendMessage(self, msg):
"""
Collapse and send msg to the master
""" |
if not msg:
return
msg = self._collapseMsg(msg)
self.sendStatus(msg) |
<SYSTEM_TASK:>
Send all the content in our buffers.
<END_TASK>
<USER_TASK:>
Description:
def _sendBuffers(self):
"""
Send all the content in our buffers.
""" |
msg = {}
msg_size = 0
lastlog = None
logdata = []
while self.buffered:
# Grab the next bits from the buffer
logname, data = self.buffered.popleft()
# If this log is different than the last one, then we have to send
# out the message so far. This is because the message is
# transferred as a dictionary, which makes the ordering of keys
# unspecified, and makes it impossible to interleave data from
# different logs. A future enhancement could be to change the
# master to support a list of (logname, data) tuples instead of a
# dictionary.
# On our first pass through this loop lastlog is None
if lastlog is None:
lastlog = logname
elif logname != lastlog:
self._sendMessage(msg)
msg = {}
msg_size = 0
lastlog = logname
logdata = msg.setdefault(logname, [])
# Chunkify the log data to make sure we're not sending more than
# CHUNK_LIMIT at a time
for chunk in self._chunkForSend(data):
if not chunk:
continue
logdata.append(chunk)
msg_size += len(chunk)
if msg_size >= self.CHUNK_LIMIT:
# We've gone beyond the chunk limit, so send out our
# message. At worst this results in a message slightly
# larger than (2*CHUNK_LIMIT)-1
self._sendMessage(msg)
msg = {}
logdata = msg.setdefault(logname, [])
msg_size = 0
self.buflen = 0
if logdata:
self._sendMessage(msg)
if self.sendBuffersTimer:
if self.sendBuffersTimer.active():
self.sendBuffersTimer.cancel()
self.sendBuffersTimer = None |
<SYSTEM_TASK:>
Add data to the buffer for logname
<END_TASK>
<USER_TASK:>
Description:
def _addToBuffers(self, logname, data):
"""
Add data to the buffer for logname
Start a timer to send the buffers if BUFFER_TIMEOUT elapses.
If adding data causes the buffer size to grow beyond BUFFER_SIZE, then
the buffers will be sent.
""" |
n = len(data)
self.buflen += n
self.buffered.append((logname, data))
if self.buflen > self.BUFFER_SIZE:
self._sendBuffers()
elif not self.sendBuffersTimer:
self.sendBuffersTimer = self._reactor.callLater(
self.BUFFER_TIMEOUT, self._bufferTimeout) |
<SYSTEM_TASK:>
Halt the current step.
<END_TASK>
<USER_TASK:>
Description:
def remote_interruptCommand(self, stepId, why):
"""Halt the current step.""" |
log.msg("asked to interrupt current command: {0}".format(why))
self.activity()
if not self.command:
# TODO: just log it, a race could result in their interrupting a
# command that wasn't actually running
log.msg(" .. but none was running")
return
self.command.doInterrupt() |
<SYSTEM_TASK:>
Make any currently-running command die, with no further status
<END_TASK>
<USER_TASK:>
Description:
def stopCommand(self):
"""Make any currently-running command die, with no further status
output. This is used when the worker is shutting down or the
connection to the master has been lost. Interrupt the command,
silence it, and then forget about it.""" |
if not self.command:
return
log.msg("stopCommand: halting current command {0}".format(self.command))
self.command.doInterrupt() # shut up! and die!
self.command = None |
<SYSTEM_TASK:>
walk upwards from the current directory until we find this topfile
<END_TASK>
<USER_TASK:>
Description:
def getTopdir(topfile, start=None):
"""walk upwards from the current directory until we find this topfile""" |
if not start:
start = os.getcwd()
here = start
toomany = 20
while toomany > 0:
if os.path.exists(os.path.join(here, topfile)):
return here
next = os.path.dirname(here)
if next == here:
break # we've hit the root
here = next
toomany -= 1
output("Unable to find topfile '{}' anywhere "
"from {} upwards".format(topfile, start))
sys.exit(1) |
<SYSTEM_TASK:>
This accepts the arguments of a command, without the actual
<END_TASK>
<USER_TASK:>
Description:
def dovc(self, cmd):
"""This accepts the arguments of a command, without the actual
command itself.""" |
env = os.environ.copy()
env['LC_ALL'] = "C"
d = utils.getProcessOutputAndValue(self.exe, cmd, env=env,
path=self.treetop)
d.addCallback(self._didvc, cmd)
return d |
<SYSTEM_TASK:>
Return a Deferred that fires with a SourceStamp instance.
<END_TASK>
<USER_TASK:>
Description:
def get(self):
"""Return a Deferred that fires with a SourceStamp instance.""" |
d = self.getBaseRevision()
d.addCallback(self.getPatch)
d.addCallback(self.done)
return d |
<SYSTEM_TASK:>
Version of check_output which does not throw error
<END_TASK>
<USER_TASK:>
Description:
def check_output(cmd):
"""Version of check_output which does not throw error""" |
popen = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
out = popen.communicate()[0].strip()
if not isinstance(out, str):
out = out.decode(sys.stdout.encoding)
return out |
<SYSTEM_TASK:>
Extract the tag if a source is from git archive.
<END_TASK>
<USER_TASK:>
Description:
def getVersionFromArchiveId(git_archive_id='$Format:%ct %d$'):
""" Extract the tag if a source is from git archive.
When source is exported via `git archive`, the git_archive_id init value is modified
and placeholders are expanded to the "archived" revision:
%ct: committer date, UNIX timestamp
%d: ref names, like the --decorate option of git-log
See man gitattributes(5) and git-log(1) (PRETTY FORMATS) for more details.
""" |
# mangle the magic string to make sure it is not replaced by git archive
if not git_archive_id.startswith('$For''mat:'):
# source was modified by git archive, try to parse the version from
# the value of git_archive_id
match = re.search(r'tag:\s*v([^,)]+)', git_archive_id)
if match:
# archived revision is tagged, use the tag
return gitDescribeToPep440(match.group(1))
# archived revision is not tagged, use the commit date
tstamp = git_archive_id.strip().split()[0]
d = datetime.datetime.utcfromtimestamp(int(tstamp))
return d.strftime('%Y.%m.%d')
return None |
<SYSTEM_TASK:>
Write a block of data to the remote writer
<END_TASK>
<USER_TASK:>
Description:
def _writeBlock(self):
"""Write a block of data to the remote writer""" |
if self.interrupted or self.fp is None:
if self.debug:
log.msg('WorkerFileUploadCommand._writeBlock(): end')
return True
length = self.blocksize
if self.remaining is not None and length > self.remaining:
length = self.remaining
if length <= 0:
if self.stderr is None:
self.stderr = 'Maximum filesize reached, truncating file \'{0}\''.format(
self.path)
self.rc = 1
data = ''
else:
data = self.fp.read(length)
if self.debug:
log.msg('WorkerFileUploadCommand._writeBlock(): ' +
'allowed={0} readlen={1}'.format(length, len(data)))
if not data:
log.msg("EOF: callRemote(close)")
return True
if self.remaining is not None:
self.remaining = self.remaining - len(data)
assert self.remaining >= 0
d = self.writer.callRemote('write', data)
d.addCallback(lambda res: False)
return d |
<SYSTEM_TASK:>
Read a block of data from the remote reader.
<END_TASK>
<USER_TASK:>
Description:
def _readBlock(self):
"""Read a block of data from the remote reader.""" |
if self.interrupted or self.fp is None:
if self.debug:
log.msg('WorkerFileDownloadCommand._readBlock(): end')
return True
length = self.blocksize
if self.bytes_remaining is not None and length > self.bytes_remaining:
length = self.bytes_remaining
if length <= 0:
if self.stderr is None:
self.stderr = "Maximum filesize reached, truncating file '{0}'".format(
self.path)
self.rc = 1
return True
else:
d = self.reader.callRemote('read', length)
d.addCallback(self._writeData)
return d |
<SYSTEM_TASK:>
Generate a buildbot mail message and return a dictionary
<END_TASK>
<USER_TASK:>
Description:
def formatMessageForBuildResults(self, mode, buildername, buildset, build, master, previous_results, blamelist):
"""Generate a buildbot mail message and return a dictionary
containing the message body, type and subject.""" |
ss_list = buildset['sourcestamps']
results = build['results']
ctx = dict(results=build['results'],
mode=mode,
buildername=buildername,
workername=build['properties'].get(
'workername', ["<unknown>"])[0],
buildset=buildset,
build=build,
projects=self.getProjects(ss_list, master),
previous_results=previous_results,
status_detected=self.getDetectedStatus(
mode, results, previous_results),
build_url=utils.getURLForBuild(
master, build['builder']['builderid'], build['number']),
buildbot_url=master.config.buildbotURL,
blamelist=blamelist,
summary=self.messageSummary(build, results),
sourcestamps=self.messageSourceStamps(ss_list)
)
yield self.buildAdditionalContext(master, ctx)
msgdict = self.renderMessage(ctx)
return msgdict |
<SYSTEM_TASK:>
A full name, intended to uniquely identify a parameter
<END_TASK>
<USER_TASK:>
Description:
def fullName(self):
"""A full name, intended to uniquely identify a parameter""" |
# join with '_' if both are set (cannot put '.', because it is used as
# **kwargs)
if self.parentName and self.name:
return self.parentName + '_' + self.name
# otherwise just use the one that is set
# (this allows empty name for "anonymous nests")
return self.name or self.parentName |
<SYSTEM_TASK:>
Simple customization point for child classes that do not need the other
<END_TASK>
<USER_TASK:>
Description:
def getFromKwargs(self, kwargs):
"""Simple customization point for child classes that do not need the other
parameters supplied to updateFromKwargs. Return the value for the property
named 'self.name'.
The default implementation converts from a list of items, validates using
the optional regex field and calls 'parse_from_args' for the final conversion.
""" |
args = kwargs.get(self.fullName, [])
# delete white space for args
for arg in args:
if isinstance(arg, str) and not arg.strip():
args.remove(arg)
if not args:
if self.required:
raise ValidationError(
"'%s' needs to be specified" % (self.label))
if self.multiple:
args = self.default
else:
args = [self.default]
if self.regex:
for arg in args:
if not self.regex.match(arg):
raise ValidationError("%s:'%s' does not match pattern '%s'"
% (self.label, arg, self.regex.pattern))
if self.maxsize is not None:
for arg in args:
if len(arg) > self.maxsize:
raise ValidationError("%s: is too large %d > %d"
% (self.label, len(arg), self.maxsize))
try:
arg = self.parse_from_args(args)
except Exception as e:
# an exception will just display an alert in the web UI
# also log the exception
if self.debug:
traceback.print_exc()
raise e
if arg is None:
raise ValidationError("need %s: no default provided by config"
% (self.fullName,))
return arg |
<SYSTEM_TASK:>
Primary entry point to turn 'kwargs' into 'properties
<END_TASK>
<USER_TASK:>
Description:
def updateFromKwargs(self, properties, kwargs, collector, **unused):
"""Primary entry point to turn 'kwargs' into 'properties'""" |
properties[self.name] = self.getFromKwargs(kwargs) |
<SYSTEM_TASK:>
Secondary customization point, called from getFromKwargs to turn
<END_TASK>
<USER_TASK:>
Description:
def parse_from_args(self, l):
"""Secondary customization point, called from getFromKwargs to turn
a validated value into a single property value""" |
if self.multiple:
return [self.parse_from_arg(arg) for arg in l]
return self.parse_from_arg(l[0]) |
<SYSTEM_TASK:>
Collapse the child values into a dictionary. This is intended to be
<END_TASK>
<USER_TASK:>
Description:
def collectChildProperties(self, kwargs, properties, collector, **kw):
"""Collapse the child values into a dictionary. This is intended to be
called by child classes to fix up the fullName->name conversions.""" |
childProperties = {}
for field in self.fields: # pylint: disable=not-an-iterable
yield collector.collectValidationErrors(field.fullName,
field.updateFromKwargs,
kwargs=kwargs,
properties=childProperties,
collector=collector,
**kw)
kwargs[self.fullName] = childProperties |
<SYSTEM_TASK:>
By default, the child values will be collapsed into a dictionary. If
<END_TASK>
<USER_TASK:>
Description:
def updateFromKwargs(self, kwargs, properties, collector, **kw):
"""By default, the child values will be collapsed into a dictionary. If
the parent is anonymous, this dictionary is the top-level properties.""" |
yield self.collectChildProperties(kwargs=kwargs, properties=properties,
collector=collector, **kw)
# default behavior is to set a property
# -- use setdefault+update in order to collapse 'anonymous' nested
# parameters correctly
if self.name:
d = properties.setdefault(self.name, {})
else:
# if there's no name, collapse this nest all the way
d = properties
d.update(kwargs[self.fullName]) |
<SYSTEM_TASK:>
We check the parameters, and launch the build, if everything is correct
<END_TASK>
<USER_TASK:>
Description:
def force(self, owner, builderNames=None, builderid=None, **kwargs):
"""
We check the parameters, and launch the build, if everything is correct
""" |
builderNames = yield self.computeBuilderNames(builderNames, builderid)
if not builderNames:
raise KeyError("builderNames not specified or not supported")
# Currently the validation code expects all kwargs to be lists
# I don't want to refactor that now so much sure we comply...
kwargs = dict((k, [v]) if not isinstance(v, list) else (k, v)
for k, v in kwargs.items())
# probably need to clean that out later as the IProperty is already a
# validation mechanism
collector = ValidationErrorCollector()
reason = yield collector.collectValidationErrors(self.reason.fullName,
self.reason.getFromKwargs, kwargs)
if owner is None or owner == "anonymous":
owner = yield collector.collectValidationErrors(self.username.fullName,
self.username.getFromKwargs, kwargs)
properties, changeids, sourcestamps = yield self.gatherPropertiesAndChanges(
collector, **kwargs)
collector.maybeRaiseCollectedErrors()
properties.setProperty("reason", reason, "Force Build Form")
properties.setProperty("owner", owner, "Force Build Form")
r = self.reasonString % {'owner': owner, 'reason': reason}
# turn sourcestamps into a list
for cb, ss in sourcestamps.items():
ss['codebase'] = cb
sourcestamps = list(sourcestamps.values())
# everything is validated, we can create our source stamp, and
# buildrequest
res = yield self.addBuildsetForSourceStampsWithDefaults(
reason=r,
sourcestamps=sourcestamps,
properties=properties,
builderNames=builderNames,
)
return res |
<SYSTEM_TASK:>
get secrets from the provider defined in the secret using args and
<END_TASK>
<USER_TASK:>
Description:
def get(self, secret, *args, **kwargs):
"""
get secrets from the provider defined in the secret using args and
kwargs
@secrets: secrets keys
@type: string
@return type: SecretDetails
""" |
for provider in self.services:
value = yield provider.get(secret)
source_name = provider.__class__.__name__
if value is not None:
return SecretDetails(source_name, secret, value) |
<SYSTEM_TASK:>
function for checking available frequency
<END_TASK>
<USER_TASK:>
Description:
def available_freq():
"""
function for checking available frequency
""" |
_file = open(CPU_PREFIX + 'cpu0/cpufreq/scaling_available_frequencies')
freq = [int(_file) for _file in _file.read().strip().split()]
_file.close()
return freq |
<SYSTEM_TASK:>
Pads data to the multiplies of 8 bytes.
<END_TASK>
<USER_TASK:>
Description:
def padto8(data):
"""Pads data to the multiplies of 8 bytes.
This makes x86_64 faster and prevents
undefined behavior on other platforms""" |
length = len(data)
return data + b'\xdb' * (roundto8(length) - length) |
<SYSTEM_TASK:>
A heuristic to find out if a function is simple enough.
<END_TASK>
<USER_TASK:>
Description:
def is_simple(fun):
"""A heuristic to find out if a function is simple enough.""" |
seen_load_fast_0 = False
seen_load_response = False
seen_call_fun = False
for instruction in dis.get_instructions(fun):
if instruction.opname == 'LOAD_FAST' and instruction.arg == 0:
seen_load_fast_0 = True
continue
if instruction.opname == 'LOAD_ATTR' \
and instruction.argval == 'Response':
seen_load_response = True
continue
if instruction.opname.startswith('CALL_FUNCTION'):
if seen_call_fun:
return False
seen_call_fun = True
continue
return seen_call_fun and seen_load_fast_0 and seen_load_response |
<SYSTEM_TASK:>
Return a list of variable names being rejected for high
<END_TASK>
<USER_TASK:>
Description:
def get_rejected_variables(self, threshold=0.9):
"""Return a list of variable names being rejected for high
correlation with one of remaining variables.
Parameters:
----------
threshold : float
Correlation value which is above the threshold are rejected
Returns
-------
list
The list of rejected variables or an empty list if the correlation has not been computed.
""" |
variable_profile = self.description_set['variables']
result = []
if hasattr(variable_profile, 'correlation'):
result = variable_profile.index[variable_profile.correlation > threshold].tolist()
return result |
<SYSTEM_TASK:>
Write the report to a file.
<END_TASK>
<USER_TASK:>
Description:
def to_file(self, outputfile=DEFAULT_OUTPUTFILE):
"""Write the report to a file.
By default a name is generated.
Parameters:
----------
outputfile : str
The name or the path of the file to generale including the extension (.html).
""" |
if outputfile != NO_OUTPUTFILE:
if outputfile == DEFAULT_OUTPUTFILE:
outputfile = 'profile_' + str(hash(self)) + ".html"
# TODO: should be done in the template
with codecs.open(outputfile, 'w+b', encoding='utf8') as self.file:
self.file.write(templates.template('wrapper').render(content=self.html)) |
<SYSTEM_TASK:>
Return a jinja template ready for rendering. If needed, global variables are initialized.
<END_TASK>
<USER_TASK:>
Description:
def template(template_name):
"""Return a jinja template ready for rendering. If needed, global variables are initialized.
Parameters
----------
template_name: str, the name of the template as defined in the templates mapping
Returns
-------
The Jinja template ready for rendering
""" |
globals = None
if template_name.startswith('row_'):
# This is a row template setting global variable
globals = dict()
globals['vartype'] = var_type[template_name.split('_')[1].upper()]
return jinja2_env.get_template(templates[template_name], globals=globals) |
<SYSTEM_TASK:>
Plot an histogram from the data and return the AxesSubplot object.
<END_TASK>
<USER_TASK:>
Description:
def _plot_histogram(series, bins=10, figsize=(6, 4), facecolor='#337ab7'):
"""Plot an histogram from the data and return the AxesSubplot object.
Parameters
----------
series : Series
The data to plot
figsize : tuple
The size of the figure (width, height) in inches, default (6,4)
facecolor : str
The color code.
Returns
-------
matplotlib.AxesSubplot
The plot.
""" |
if base.get_vartype(series) == base.TYPE_DATE:
# TODO: These calls should be merged
fig = plt.figure(figsize=figsize)
plot = fig.add_subplot(111)
plot.set_ylabel('Frequency')
try:
plot.hist(series.dropna().values, facecolor=facecolor, bins=bins)
except TypeError: # matplotlib 1.4 can't plot dates so will show empty plot instead
pass
else:
plot = series.plot(kind='hist', figsize=figsize,
facecolor=facecolor,
bins=bins) # TODO when running on server, send this off to a different thread
return plot |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.