Code
stringlengths 103
85.9k
| Summary
sequencelengths 0
94
|
---|---|
Please provide a description of the function:def set_result(self, result):
if self.is_finished():
raise InternalError("set_result called on finished AsynchronousResponse",
result=self._result, exception=self._exception)
self._result = result
self.finish() | [
"Finish this response and set the result."
] |
Please provide a description of the function:def set_exception(self, exc_class, exc_info, exc_stack):
if self.is_finished():
raise InternalError("set_exception called on finished AsynchronousResponse",
result=self._result, exception=self._exception)
self._exception = (exc_class, exc_info, exc_stack)
self.finish() | [
"Set an exception as the result of this operation.\n\n Args:\n exc_class (object): The exception type "
] |
Please provide a description of the function:def wait(self, timeout=None):
flag = self._finished.wait(timeout=timeout)
if flag is False:
raise TimeoutExpiredError("Timeout waiting for response to event loop operation")
if self._exception is not None:
self._raise_exception()
return self._result | [
"Wait for this operation to finish.\n\n You can specify an optional timeout that defaults to no timeout if\n None is passed. The result of the operation is returned from this\n method. If the operation raised an exception, it is reraised from this\n method.\n\n Args:\n timeout (float): The maximum number of seconds to wait before timing\n out.\n "
] |
Please provide a description of the function:async def wait(self, timeout=None):
await asyncio.wait_for(self._future, timeout)
if self._exception is not None:
self._raise_exception()
return self._result | [
"Wait for this operation to finish.\n\n You can specify an optional timeout that defaults to no timeout if\n None is passed. The result of the operation is returned from this\n method. If the operation raised an exception, it is reraised from this\n method.\n\n Args:\n timeout (float): The maximum number of seconds to wait before timing\n out.\n "
] |
Please provide a description of the function:def get_released_versions(component):
releases = get_tags()
releases = sorted([(x[0], [int(y) for y in x[1].split('.')]) for x in releases], key=lambda x: x[1])[::-1]
return [(x[0], ".".join(map(str, x[1]))) for x in releases if x[0] == component] | [
"Get all released versions of the given component ordered newest to oldest\n "
] |
Please provide a description of the function:def do_releases(self, subcmd, opts, component):
releases = get_released_versions(component)
for x in releases:
print("{} - {}".format(*x)) | [
"${cmd_name}: print all releases for the given component\n\n ${cmd_usage}\n ${cmd_option_list}\n "
] |
Please provide a description of the function:def do_dirty(self, subcmd, opts):
for comp_name, comp in components.comp_names.items():
releases = get_released_versions(comp_name)
if len(releases) == 0:
print(comp_name + ' - ' + 'No tagged releases')
else:
latest_tag = '-'.join(releases[0])
data = get_changed_since_tag(latest_tag, comp.path)
if len(data) > 0:
print(comp_name + ' - ' + 'Changed files in component tree') | [
"${cmd_name}: check if any components have unreleased changes\n\n ${cmd_usage}\n ${cmd_option_list}\n "
] |
Please provide a description of the function:def do_changed(self, subcmd, opts, component):
releases = get_released_versions(component)
latest = releases[0]
filter_dir = components.comp_names[component].path
latest_tag = '-'.join(latest)
data = get_changed_since_tag(latest_tag, filter_dir)
if len(data) > 0:
print(data) | [
"${cmd_name}: print all files changes in component since the latest release\n\n ${cmd_usage}\n ${cmd_option_list}\n "
] |
Please provide a description of the function:def load_dependencies(orig_tile, build_env):
if 'DEPENDENCIES' not in build_env:
build_env['DEPENDENCIES'] = []
dep_targets = []
chip = build_env['ARCH']
raw_arch_deps = chip.property('depends')
# Properly separate out the version information from the name of the dependency
# The raw keys come back as name,version
arch_deps = {}
for key, value in raw_arch_deps.items():
name, _, _ = key.partition(',')
arch_deps[name] = value
for dep in orig_tile.dependencies:
try:
tile = IOTile(os.path.join('build', 'deps', dep['unique_id']))
# Make sure we filter products using the view of module dependency products
# as seen in the target we are targeting.
if dep['name'] not in arch_deps:
tile.filter_products([])
else:
tile.filter_products(arch_deps[dep['name']])
except (ArgumentError, EnvironmentError):
raise BuildError("Could not find required dependency", name=dep['name'])
build_env['DEPENDENCIES'].append(tile)
target = os.path.join(tile.folder, 'module_settings.json')
dep_targets.append(target)
return dep_targets | [
"Load all tile dependencies and filter only the products from each that we use\n\n build_env must define the architecture that we are targeting so that we get the\n correct dependency list and products per dependency since that may change\n when building for different architectures\n "
] |
Please provide a description of the function:def find_dependency_wheels(tile):
return [os.path.join(x.folder, 'python', x.support_wheel) for x in _iter_dependencies(tile) if x.has_wheel] | [
"Return a list of all python wheel objects created by dependencies of this tile\n\n Args:\n tile (IOTile): Tile that we should scan for dependencies\n\n Returns:\n list: A list of paths to dependency wheels\n "
] |
Please provide a description of the function:def _check_ver_range(self, version, ver_range):
lower, upper, lower_inc, upper_inc = ver_range
#If the range extends over everything, we automatically match
if lower is None and upper is None:
return True
if lower is not None:
if lower_inc and version < lower:
return False
elif not lower_inc and version <= lower:
return False
if upper is not None:
if upper_inc and version > upper:
return False
elif not upper_inc and version >= upper:
return False
# Prereleases have special matching requirements
if version.is_prerelease:
# Prereleases cannot match ranges that are not defined as prereleases
if (lower is None or not lower.is_prerelease) and (upper is None or not upper.is_prerelease):
return False
# Prereleases without the same major.minor.patch as a range end point cannot match
if (lower is not None and version.release_tuple != lower.release_tuple) and \
(upper is not None and version.release_tuple != upper.release_tuple):
return False
return True | [
"Check if version is included in ver_range\n "
] |
Please provide a description of the function:def _check_insersection(self, version, ranges):
for ver_range in ranges:
if not self._check_ver_range(version, ver_range):
return False
return True | [
"Check that a version is inside all of a list of ranges"
] |
Please provide a description of the function:def check(self, version):
for disjunct in self._disjuncts:
if self._check_insersection(version, disjunct):
return True
return False | [
"Check that a version is inside this SemanticVersionRange\n\n Args:\n version (SemanticVersion): The version to check\n\n Returns:\n bool: True if the version is included in the range, False if not\n "
] |
Please provide a description of the function:def filter(self, versions, key=lambda x: x):
return [x for x in versions if self.check(key(x))] | [
"Filter all of the versions in an iterable that match this version range\n\n Args:\n versions (iterable): An iterable of SemanticVersion objects\n\n Returns:\n list: A list of the SemanticVersion objects that matched this range\n "
] |
Please provide a description of the function:def FromString(cls, range_string):
disjuncts = None
range_string = range_string.strip()
if len(range_string) == 0:
raise ArgumentError("You must pass a finite string to SemanticVersionRange.FromString",
range_string=range_string)
# Check for *
if len(range_string) == 1 and range_string[0] == '*':
conj = (None, None, True, True)
disjuncts = [[conj]]
# Check for ^X.Y.Z
elif range_string[0] == '^':
ver = range_string[1:]
try:
ver = SemanticVersion.FromString(ver)
except DataError as err:
raise ArgumentError("Could not parse ^X.Y.Z version", parse_error=str(err), range_string=range_string)
lower = ver
upper = ver.inc_first_nonzero()
conj = (lower, upper, True, False)
disjuncts = [[conj]]
elif range_string[0] == '=':
ver = range_string[1:]
try:
ver = SemanticVersion.FromString(ver)
except DataError as err:
raise ArgumentError("Could not parse =X.Y.Z version", parse_error=str(err), range_string=range_string)
conj = (ver, ver, True, True)
disjuncts = [[conj]]
if disjuncts is None:
raise ArgumentError("Invalid range specification that could not be parsed", range_string=range_string)
return SemanticVersionRange(disjuncts) | [
"Parse a version range string into a SemanticVersionRange\n\n Currently, the only possible range strings are:\n\n ^X.Y.Z - matches all versions with the same leading nonzero digit\n greater than or equal the given range.\n * - matches everything\n =X.Y.Z - matches only the exact version given\n\n Args:\n range_string (string): A string specifying the version range\n\n Returns:\n SemanticVersionRange: The resulting version range object\n\n Raises:\n ArgumentError: if the range string does not define a valid range.\n "
] |
Please provide a description of the function:def _call_rpc(self, address, rpc_id, payload):
# FIXME: Set a timeout of 1.1 seconds to make sure we fail if the device hangs but
# this should be long enough to accommodate any actual RPCs we need to send.
status, response = self.hw.stream.send_rpc(address, rpc_id, payload, timeout=1.1)
return response | [
"Call an RPC with the given information and return its response.\n\n Must raise a hardware error of the appropriate kind if the RPC\n can not be executed correctly. Otherwise it should return the binary\n response payload received from the RPC.\n\n Args:\n address (int): The address of the tile we want to call the RPC\n on\n rpc_id (int): The id of the RPC that we want to call\n payload (bytes, bytearray): The data that we want to send as the payload\n "
] |
Please provide a description of the function:def FromReadings(cls, uuid, readings):
if len(readings) != 1:
raise ArgumentError("IndividualReading reports must be created with exactly one reading",
num_readings=len(readings))
reading = readings[0]
data = struct.pack("<BBHLLLL", 0, 0, reading.stream, uuid, 0, reading.raw_time, reading.value)
return IndividualReadingReport(data) | [
"Generate an instance of the report format from a list of readings and a uuid\n "
] |
Please provide a description of the function:def decode(self):
fmt, _, stream, uuid, sent_timestamp, reading_timestamp, reading_value = unpack("<BBHLLLL", self.raw_report)
assert fmt == 0
# Estimate the UTC time when this device was turned on
time_base = self.received_time - datetime.timedelta(seconds=sent_timestamp)
reading = IOTileReading(reading_timestamp, stream, reading_value, time_base=time_base)
self.origin = uuid
self.sent_timestamp = sent_timestamp
return [reading], [] | [
"Decode this report into a single reading\n "
] |
Please provide a description of the function:def encode(self):
reading = self.visible_readings[0]
data = struct.pack("<BBHLLLL", 0, 0, reading.stream, self.origin,
self.sent_timestamp, reading.raw_time, reading.value)
return bytearray(data) | [
"Turn this report into a serialized bytearray that could be decoded with a call to decode"
] |
Please provide a description of the function:def InternalLaTeXAuxAction(XXXLaTeXAction, target = None, source= None, env=None):
global must_rerun_latex
# This routine is called with two actions. In this file for DVI builds
# with LaTeXAction and from the pdflatex.py with PDFLaTeXAction
# set this up now for the case where the user requests a different extension
# for the target filename
if (XXXLaTeXAction == LaTeXAction):
callerSuffix = ".dvi"
else:
callerSuffix = env['PDFSUFFIX']
basename = SCons.Util.splitext(str(source[0]))[0]
basedir = os.path.split(str(source[0]))[0]
basefile = os.path.split(str(basename))[1]
abspath = os.path.abspath(basedir)
targetext = os.path.splitext(str(target[0]))[1]
targetdir = os.path.split(str(target[0]))[0]
saved_env = {}
for var in SCons.Scanner.LaTeX.LaTeX.env_variables:
saved_env[var] = modify_env_var(env, var, abspath)
# Create base file names with the target directory since the auxiliary files
# will be made there. That's because the *COM variables have the cd
# command in the prolog. We check
# for the existence of files before opening them--even ones like the
# aux file that TeX always creates--to make it possible to write tests
# with stubs that don't necessarily generate all of the same files.
targetbase = os.path.join(targetdir, basefile)
# if there is a \makeindex there will be a .idx and thus
# we have to run makeindex at least once to keep the build
# happy even if there is no index.
# Same for glossaries, nomenclature, and acronyms
src_content = source[0].get_text_contents()
run_makeindex = makeindex_re.search(src_content) and not os.path.isfile(targetbase + '.idx')
run_nomenclature = makenomenclature_re.search(src_content) and not os.path.isfile(targetbase + '.nlo')
run_glossary = makeglossary_re.search(src_content) and not os.path.isfile(targetbase + '.glo')
run_glossaries = makeglossaries_re.search(src_content) and not os.path.isfile(targetbase + '.glo')
run_acronyms = makeacronyms_re.search(src_content) and not os.path.isfile(targetbase + '.acn')
saved_hashes = {}
suffix_nodes = {}
for suffix in all_suffixes+sum(newglossary_suffix, []):
theNode = env.fs.File(targetbase + suffix)
suffix_nodes[suffix] = theNode
saved_hashes[suffix] = theNode.get_csig()
if Verbose:
print("hashes: ",saved_hashes)
must_rerun_latex = True
# .aux files already processed by BibTex
already_bibtexed = []
#
# routine to update MD5 hash and compare
#
def check_MD5(filenode, suffix):
global must_rerun_latex
# two calls to clear old csig
filenode.clear_memoized_values()
filenode.ninfo = filenode.new_ninfo()
new_md5 = filenode.get_csig()
if saved_hashes[suffix] == new_md5:
if Verbose:
print("file %s not changed" % (targetbase+suffix))
return False # unchanged
saved_hashes[suffix] = new_md5
must_rerun_latex = True
if Verbose:
print("file %s changed, rerunning Latex, new hash = " % (targetbase+suffix), new_md5)
return True # changed
# generate the file name that latex will generate
resultfilename = targetbase + callerSuffix
count = 0
while (must_rerun_latex and count < int(env.subst('$LATEXRETRIES'))) :
result = XXXLaTeXAction(target, source, env)
if result != 0:
return result
count = count + 1
must_rerun_latex = False
# Decide if various things need to be run, or run again.
# Read the log file to find warnings/errors
logfilename = targetbase + '.log'
logContent = ''
if os.path.isfile(logfilename):
logContent = open(logfilename, "r").read()
# Read the fls file to find all .aux files
flsfilename = targetbase + '.fls'
flsContent = ''
auxfiles = []
if os.path.isfile(flsfilename):
flsContent = open(flsfilename, "r").read()
auxfiles = openout_aux_re.findall(flsContent)
# remove duplicates
dups = {}
for x in auxfiles:
dups[x] = 1
auxfiles = list(dups.keys())
bcffiles = []
if os.path.isfile(flsfilename):
flsContent = open(flsfilename, "r").read()
bcffiles = openout_bcf_re.findall(flsContent)
# remove duplicates
dups = {}
for x in bcffiles:
dups[x] = 1
bcffiles = list(dups.keys())
if Verbose:
print("auxfiles ",auxfiles)
print("bcffiles ",bcffiles)
# Now decide if bibtex will need to be run.
# The information that bibtex reads from the .aux file is
# pass-independent. If we find (below) that the .bbl file is unchanged,
# then the last latex saw a correct bibliography.
# Therefore only do this once
# Go through all .aux files and remember the files already done.
for auxfilename in auxfiles:
if auxfilename not in already_bibtexed:
already_bibtexed.append(auxfilename)
target_aux = os.path.join(targetdir, auxfilename)
if os.path.isfile(target_aux):
content = open(target_aux, "r").read()
if content.find("bibdata") != -1:
if Verbose:
print("Need to run bibtex on ",auxfilename)
bibfile = env.fs.File(SCons.Util.splitext(target_aux)[0])
result = BibTeXAction(bibfile, bibfile, env)
if result != 0:
check_file_error_message(env['BIBTEX'], 'blg')
must_rerun_latex = True
# Now decide if biber will need to be run.
# When the backend for biblatex is biber (by choice or default) the
# citation information is put in the .bcf file.
# The information that biber reads from the .bcf file is
# pass-independent. If we find (below) that the .bbl file is unchanged,
# then the last latex saw a correct bibliography.
# Therefore only do this once
# Go through all .bcf files and remember the files already done.
for bcffilename in bcffiles:
if bcffilename not in already_bibtexed:
already_bibtexed.append(bcffilename)
target_bcf = os.path.join(targetdir, bcffilename)
if os.path.isfile(target_bcf):
content = open(target_bcf, "r").read()
if content.find("bibdata") != -1:
if Verbose:
print("Need to run biber on ",bcffilename)
bibfile = env.fs.File(SCons.Util.splitext(target_bcf)[0])
result = BiberAction(bibfile, bibfile, env)
if result != 0:
check_file_error_message(env['BIBER'], 'blg')
must_rerun_latex = True
# Now decide if latex will need to be run again due to index.
if check_MD5(suffix_nodes['.idx'],'.idx') or (count == 1 and run_makeindex):
# We must run makeindex
if Verbose:
print("Need to run makeindex")
idxfile = suffix_nodes['.idx']
result = MakeIndexAction(idxfile, idxfile, env)
if result != 0:
check_file_error_message(env['MAKEINDEX'], 'ilg')
return result
# TO-DO: need to add a way for the user to extend this list for whatever
# auxiliary files they create in other (or their own) packages
# Harder is case is where an action needs to be called -- that should be rare (I hope?)
for index in check_suffixes:
check_MD5(suffix_nodes[index],index)
# Now decide if latex will need to be run again due to nomenclature.
if check_MD5(suffix_nodes['.nlo'],'.nlo') or (count == 1 and run_nomenclature):
# We must run makeindex
if Verbose:
print("Need to run makeindex for nomenclature")
nclfile = suffix_nodes['.nlo']
result = MakeNclAction(nclfile, nclfile, env)
if result != 0:
check_file_error_message('%s (nomenclature)' % env['MAKENCL'],
'nlg')
#return result
# Now decide if latex will need to be run again due to glossary.
if check_MD5(suffix_nodes['.glo'],'.glo') or (count == 1 and run_glossaries) or (count == 1 and run_glossary):
# We must run makeindex
if Verbose:
print("Need to run makeindex for glossary")
glofile = suffix_nodes['.glo']
result = MakeGlossaryAction(glofile, glofile, env)
if result != 0:
check_file_error_message('%s (glossary)' % env['MAKEGLOSSARY'],
'glg')
#return result
# Now decide if latex will need to be run again due to acronyms.
if check_MD5(suffix_nodes['.acn'],'.acn') or (count == 1 and run_acronyms):
# We must run makeindex
if Verbose:
print("Need to run makeindex for acronyms")
acrfile = suffix_nodes['.acn']
result = MakeAcronymsAction(acrfile, acrfile, env)
if result != 0:
check_file_error_message('%s (acronyms)' % env['MAKEACRONYMS'],
'alg')
return result
# Now decide if latex will need to be run again due to newglossary command.
for ig in range(len(newglossary_suffix)):
if check_MD5(suffix_nodes[newglossary_suffix[ig][2]],newglossary_suffix[ig][2]) or (count == 1):
# We must run makeindex
if Verbose:
print("Need to run makeindex for newglossary")
newglfile = suffix_nodes[newglossary_suffix[ig][2]]
MakeNewGlossaryAction = SCons.Action.Action("$MAKENEWGLOSSARYCOM ${SOURCE.filebase}%s -s ${SOURCE.filebase}.ist -t ${SOURCE.filebase}%s -o ${SOURCE.filebase}%s" % (newglossary_suffix[ig][2],newglossary_suffix[ig][0],newglossary_suffix[ig][1]), "$MAKENEWGLOSSARYCOMSTR")
result = MakeNewGlossaryAction(newglfile, newglfile, env)
if result != 0:
check_file_error_message('%s (newglossary)' % env['MAKENEWGLOSSARY'],
newglossary_suffix[ig][0])
return result
# Now decide if latex needs to be run yet again to resolve warnings.
if warning_rerun_re.search(logContent):
must_rerun_latex = True
if Verbose:
print("rerun Latex due to latex or package rerun warning")
if rerun_citations_re.search(logContent):
must_rerun_latex = True
if Verbose:
print("rerun Latex due to 'Rerun to get citations correct' warning")
if undefined_references_re.search(logContent):
must_rerun_latex = True
if Verbose:
print("rerun Latex due to undefined references or citations")
if (count >= int(env.subst('$LATEXRETRIES')) and must_rerun_latex):
print("reached max number of retries on Latex ,",int(env.subst('$LATEXRETRIES')))
# end of while loop
# rename Latex's output to what the target name is
if not (str(target[0]) == resultfilename and os.path.isfile(resultfilename)):
if os.path.isfile(resultfilename):
print("move %s to %s" % (resultfilename, str(target[0]), ))
shutil.move(resultfilename,str(target[0]))
# Original comment (when TEXPICTS was not restored):
# The TEXPICTS enviroment variable is needed by a dvi -> pdf step
# later on Mac OSX so leave it
#
# It is also used when searching for pictures (implicit dependencies).
# Why not set the variable again in the respective builder instead
# of leaving local modifications in the environment? What if multiple
# latex builds in different directories need different TEXPICTS?
for var in SCons.Scanner.LaTeX.LaTeX.env_variables:
if var == 'TEXPICTS':
continue
if saved_env[var] is _null:
try:
del env['ENV'][var]
except KeyError:
pass # was never set
else:
env['ENV'][var] = saved_env[var]
return result | [
"A builder for LaTeX files that checks the output in the aux file\n and decides how many times to use LaTeXAction, and BibTeXAction."
] |
Please provide a description of the function:def is_LaTeX(flist,env,abspath):
# We need to scan files that are included in case the
# \documentclass command is in them.
# get path list from both env['TEXINPUTS'] and env['ENV']['TEXINPUTS']
savedpath = modify_env_var(env, 'TEXINPUTS', abspath)
paths = env['ENV']['TEXINPUTS']
if SCons.Util.is_List(paths):
pass
else:
# Split at os.pathsep to convert into absolute path
paths = paths.split(os.pathsep)
# now that we have the path list restore the env
if savedpath is _null:
try:
del env['ENV']['TEXINPUTS']
except KeyError:
pass # was never set
else:
env['ENV']['TEXINPUTS'] = savedpath
if Verbose:
print("is_LaTeX search path ",paths)
print("files to search :",flist)
# Now that we have the search path and file list, check each one
for f in flist:
if Verbose:
print(" checking for Latex source ",str(f))
content = f.get_text_contents()
if LaTeX_re.search(content):
if Verbose:
print("file %s is a LaTeX file" % str(f))
return 1
if Verbose:
print("file %s is not a LaTeX file" % str(f))
# now find included files
inc_files = [ ]
inc_files.extend( include_re.findall(content) )
if Verbose:
print("files included by '%s': "%str(f),inc_files)
# inc_files is list of file names as given. need to find them
# using TEXINPUTS paths.
# search the included files
for src in inc_files:
srcNode = FindFile(src,['.tex','.ltx','.latex'],paths,env,requireExt=False)
# make this a list since is_LaTeX takes a list.
fileList = [srcNode,]
if Verbose:
print("FindFile found ",srcNode)
if srcNode is not None:
file_test = is_LaTeX(fileList, env, abspath)
# return on first file that finds latex is needed.
if file_test:
return file_test
if Verbose:
print(" done scanning ",str(f))
return 0 | [
"Scan a file list to decide if it's TeX- or LaTeX-flavored."
] |
Please provide a description of the function:def TeXLaTeXFunction(target = None, source= None, env=None):
# find these paths for use in is_LaTeX to search for included files
basedir = os.path.split(str(source[0]))[0]
abspath = os.path.abspath(basedir)
if is_LaTeX(source,env,abspath):
result = LaTeXAuxAction(target,source,env)
if result != 0:
check_file_error_message(env['LATEX'])
else:
result = TeXAction(target,source,env)
if result != 0:
check_file_error_message(env['TEX'])
return result | [
"A builder for TeX and LaTeX that scans the source file to\n decide the \"flavor\" of the source and then executes the appropriate\n program."
] |
Please provide a description of the function:def TeXLaTeXStrFunction(target = None, source= None, env=None):
if env.GetOption("no_exec"):
# find these paths for use in is_LaTeX to search for included files
basedir = os.path.split(str(source[0]))[0]
abspath = os.path.abspath(basedir)
if is_LaTeX(source,env,abspath):
result = env.subst('$LATEXCOM',0,target,source)+" ..."
else:
result = env.subst("$TEXCOM",0,target,source)+" ..."
else:
result = ''
return result | [
"A strfunction for TeX and LaTeX that scans the source file to\n decide the \"flavor\" of the source and then returns the appropriate\n command string."
] |
Please provide a description of the function:def tex_eps_emitter(target, source, env):
(target, source) = tex_emitter_core(target, source, env, TexGraphics)
return (target, source) | [
"An emitter for TeX and LaTeX sources when\n executing tex or latex. It will accept .ps and .eps\n graphics files\n "
] |
Please provide a description of the function:def tex_pdf_emitter(target, source, env):
(target, source) = tex_emitter_core(target, source, env, LatexGraphics)
return (target, source) | [
"An emitter for TeX and LaTeX sources when\n executing pdftex or pdflatex. It will accept graphics\n files of types .pdf, .jpg, .png, .gif, and .tif\n "
] |
Please provide a description of the function:def ScanFiles(theFile, target, paths, file_tests, file_tests_search, env, graphics_extensions, targetdir, aux_files):
content = theFile.get_text_contents()
if Verbose:
print(" scanning ",str(theFile))
for i in range(len(file_tests_search)):
if file_tests[i][0] is None:
if Verbose:
print("scan i ",i," files_tests[i] ",file_tests[i], file_tests[i][1])
file_tests[i][0] = file_tests_search[i].search(content)
if Verbose and file_tests[i][0]:
print(" found match for ",file_tests[i][1][-1])
# for newglossary insert the suffixes in file_tests[i]
if file_tests[i][0] and file_tests[i][1][-1] == 'newglossary':
findresult = file_tests_search[i].findall(content)
for l in range(len(findresult)) :
(file_tests[i][1]).insert(0,'.'+findresult[l][3])
(file_tests[i][1]).insert(0,'.'+findresult[l][2])
(file_tests[i][1]).insert(0,'.'+findresult[l][0])
suffix_list = ['.'+findresult[l][0],'.'+findresult[l][2],'.'+findresult[l][3] ]
newglossary_suffix.append(suffix_list)
if Verbose:
print(" new suffixes for newglossary ",newglossary_suffix)
incResult = includeOnly_re.search(content)
if incResult:
aux_files.append(os.path.join(targetdir, incResult.group(1)))
if Verbose:
print("\include file names : ", aux_files)
# recursively call this on each of the included files
inc_files = [ ]
inc_files.extend( include_re.findall(content) )
if Verbose:
print("files included by '%s': "%str(theFile),inc_files)
# inc_files is list of file names as given. need to find them
# using TEXINPUTS paths.
for src in inc_files:
srcNode = FindFile(src,['.tex','.ltx','.latex'],paths,env,requireExt=False)
if srcNode is not None:
file_tests = ScanFiles(srcNode, target, paths, file_tests, file_tests_search, env, graphics_extensions, targetdir, aux_files)
if Verbose:
print(" done scanning ",str(theFile))
return file_tests | [
" For theFile (a Node) update any file_tests and search for graphics files\n then find all included files and call ScanFiles recursively for each of them"
] |
Please provide a description of the function:def tex_emitter_core(target, source, env, graphics_extensions):
basename = SCons.Util.splitext(str(source[0]))[0]
basefile = os.path.split(str(basename))[1]
targetdir = os.path.split(str(target[0]))[0]
targetbase = os.path.join(targetdir, basefile)
basedir = os.path.split(str(source[0]))[0]
abspath = os.path.abspath(basedir)
target[0].attributes.path = abspath
#
# file names we will make use of in searching the sources and log file
#
emit_suffixes = ['.aux', '.log', '.ilg', '.blg', '.nls', '.nlg', '.gls', '.glg', '.alg'] + all_suffixes
auxfilename = targetbase + '.aux'
logfilename = targetbase + '.log'
flsfilename = targetbase + '.fls'
syncfilename = targetbase + '.synctex.gz'
env.SideEffect(auxfilename,target[0])
env.SideEffect(logfilename,target[0])
env.SideEffect(flsfilename,target[0])
env.SideEffect(syncfilename,target[0])
if Verbose:
print("side effect :",auxfilename,logfilename,flsfilename,syncfilename)
env.Clean(target[0],auxfilename)
env.Clean(target[0],logfilename)
env.Clean(target[0],flsfilename)
env.Clean(target[0],syncfilename)
content = source[0].get_text_contents()
# set up list with the regular expressions
# we use to find features used
file_tests_search = [auxfile_re,
makeindex_re,
bibliography_re,
bibunit_re,
multibib_re,
addbibresource_re,
tableofcontents_re,
listoffigures_re,
listoftables_re,
hyperref_re,
makenomenclature_re,
makeglossary_re,
makeglossaries_re,
makeacronyms_re,
beamer_re,
newglossary_re,
biblatex_re ]
# set up list with the file suffixes that need emitting
# when a feature is found
file_tests_suff = [['.aux','aux_file'],
['.idx', '.ind', '.ilg','makeindex'],
['.bbl', '.blg','bibliography'],
['.bbl', '.blg','bibunit'],
['.bbl', '.blg','multibib'],
['.bbl', '.blg','.bcf','addbibresource'],
['.toc','contents'],
['.lof','figures'],
['.lot','tables'],
['.out','hyperref'],
['.nlo', '.nls', '.nlg','nomenclature'],
['.glo', '.gls', '.glg','glossary'],
['.glo', '.gls', '.glg','glossaries'],
['.acn', '.acr', '.alg','acronyms'],
['.nav', '.snm', '.out', '.toc','beamer'],
['newglossary',],
['.bcf', '.blg','biblatex'] ]
# for newglossary the suffixes are added as we find the command
# build the list of lists
file_tests = []
for i in range(len(file_tests_search)):
file_tests.append( [None, file_tests_suff[i]] )
# TO-DO: need to add a way for the user to extend this list for whatever
# auxiliary files they create in other (or their own) packages
# get path list from both env['TEXINPUTS'] and env['ENV']['TEXINPUTS']
savedpath = modify_env_var(env, 'TEXINPUTS', abspath)
paths = env['ENV']['TEXINPUTS']
if SCons.Util.is_List(paths):
pass
else:
# Split at os.pathsep to convert into absolute path
paths = paths.split(os.pathsep)
# now that we have the path list restore the env
if savedpath is _null:
try:
del env['ENV']['TEXINPUTS']
except KeyError:
pass # was never set
else:
env['ENV']['TEXINPUTS'] = savedpath
if Verbose:
print("search path ",paths)
# scan all sources for side effect files
aux_files = []
file_tests = ScanFiles(source[0], target, paths, file_tests, file_tests_search, env, graphics_extensions, targetdir, aux_files)
for (theSearch,suffix_list) in file_tests:
# add side effects if feature is present.If file is to be generated,add all side effects
if Verbose and theSearch:
print("check side effects for ",suffix_list[-1])
if (theSearch != None) or (not source[0].exists() ):
file_list = [targetbase,]
# for bibunit we need a list of files
if suffix_list[-1] == 'bibunit':
file_basename = os.path.join(targetdir, 'bu*.aux')
file_list = glob.glob(file_basename)
# remove the suffix '.aux'
for i in range(len(file_list)):
file_list.append(SCons.Util.splitext(file_list[i])[0])
# for multibib we need a list of files
if suffix_list[-1] == 'multibib':
for multibibmatch in multibib_re.finditer(content):
if Verbose:
print("multibib match ",multibibmatch.group(1))
if multibibmatch != None:
baselist = multibibmatch.group(1).split(',')
if Verbose:
print("multibib list ", baselist)
for i in range(len(baselist)):
file_list.append(os.path.join(targetdir, baselist[i]))
# now define the side effects
for file_name in file_list:
for suffix in suffix_list[:-1]:
env.SideEffect(file_name + suffix,target[0])
if Verbose:
print("side effect tst :",file_name + suffix, " target is ",str(target[0]))
env.Clean(target[0],file_name + suffix)
for aFile in aux_files:
aFile_base = SCons.Util.splitext(aFile)[0]
env.SideEffect(aFile_base + '.aux',target[0])
if Verbose:
print("side effect aux :",aFile_base + '.aux')
env.Clean(target[0],aFile_base + '.aux')
# read fls file to get all other files that latex creates and will read on the next pass
# remove files from list that we explicitly dealt with above
if os.path.isfile(flsfilename):
content = open(flsfilename, "r").read()
out_files = openout_re.findall(content)
myfiles = [auxfilename, logfilename, flsfilename, targetbase+'.dvi',targetbase+'.pdf']
for filename in out_files[:]:
if filename in myfiles:
out_files.remove(filename)
env.SideEffect(out_files,target[0])
if Verbose:
print("side effect fls :",out_files)
env.Clean(target[0],out_files)
return (target, source) | [
"An emitter for TeX and LaTeX sources.\n For LaTeX sources we try and find the common created files that\n are needed on subsequent runs of latex to finish tables of contents,\n bibliographies, indices, lists of figures, and hyperlink references.\n "
] |
Please provide a description of the function:def generate(env):
global TeXLaTeXAction
if TeXLaTeXAction is None:
TeXLaTeXAction = SCons.Action.Action(TeXLaTeXFunction,
strfunction=TeXLaTeXStrFunction)
env.AppendUnique(LATEXSUFFIXES=SCons.Tool.LaTeXSuffixes)
generate_common(env)
from . import dvi
dvi.generate(env)
bld = env['BUILDERS']['DVI']
bld.add_action('.tex', TeXLaTeXAction)
bld.add_emitter('.tex', tex_eps_emitter) | [
"Add Builders and construction variables for TeX to an Environment."
] |
Please provide a description of the function:def generate_common(env):
# Add OSX system paths so TeX tools can be found
# when a list of tools is given the exists() method is not called
generate_darwin(env)
# A generic tex file Action, sufficient for all tex files.
global TeXAction
if TeXAction is None:
TeXAction = SCons.Action.Action("$TEXCOM", "$TEXCOMSTR")
# An Action to build a latex file. This might be needed more
# than once if we are dealing with labels and bibtex.
global LaTeXAction
if LaTeXAction is None:
LaTeXAction = SCons.Action.Action("$LATEXCOM", "$LATEXCOMSTR")
# Define an action to run BibTeX on a file.
global BibTeXAction
if BibTeXAction is None:
BibTeXAction = SCons.Action.Action("$BIBTEXCOM", "$BIBTEXCOMSTR")
# Define an action to run Biber on a file.
global BiberAction
if BiberAction is None:
BiberAction = SCons.Action.Action("$BIBERCOM", "$BIBERCOMSTR")
# Define an action to run MakeIndex on a file.
global MakeIndexAction
if MakeIndexAction is None:
MakeIndexAction = SCons.Action.Action("$MAKEINDEXCOM", "$MAKEINDEXCOMSTR")
# Define an action to run MakeIndex on a file for nomenclatures.
global MakeNclAction
if MakeNclAction is None:
MakeNclAction = SCons.Action.Action("$MAKENCLCOM", "$MAKENCLCOMSTR")
# Define an action to run MakeIndex on a file for glossaries.
global MakeGlossaryAction
if MakeGlossaryAction is None:
MakeGlossaryAction = SCons.Action.Action("$MAKEGLOSSARYCOM", "$MAKEGLOSSARYCOMSTR")
# Define an action to run MakeIndex on a file for acronyms.
global MakeAcronymsAction
if MakeAcronymsAction is None:
MakeAcronymsAction = SCons.Action.Action("$MAKEACRONYMSCOM", "$MAKEACRONYMSCOMSTR")
try:
environ = env['ENV']
except KeyError:
environ = {}
env['ENV'] = environ
# Some Linux platforms have pdflatex set up in a way
# that requires that the HOME environment variable be set.
# Add it here if defined.
v = os.environ.get('HOME')
if v:
environ['HOME'] = v
CDCOM = 'cd '
if platform.system() == 'Windows':
# allow cd command to change drives on Windows
CDCOM = 'cd /D '
env['TEX'] = 'tex'
env['TEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode -recorder')
env['TEXCOM'] = CDCOM + '${TARGET.dir} && $TEX $TEXFLAGS ${SOURCE.file}'
env['PDFTEX'] = 'pdftex'
env['PDFTEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode -recorder')
env['PDFTEXCOM'] = CDCOM + '${TARGET.dir} && $PDFTEX $PDFTEXFLAGS ${SOURCE.file}'
env['LATEX'] = 'latex'
env['LATEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode -recorder')
env['LATEXCOM'] = CDCOM + '${TARGET.dir} && $LATEX $LATEXFLAGS ${SOURCE.file}'
env['LATEXRETRIES'] = 4
env['PDFLATEX'] = 'pdflatex'
env['PDFLATEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode -recorder')
env['PDFLATEXCOM'] = CDCOM + '${TARGET.dir} && $PDFLATEX $PDFLATEXFLAGS ${SOURCE.file}'
env['BIBTEX'] = 'bibtex'
env['BIBTEXFLAGS'] = SCons.Util.CLVar('')
env['BIBTEXCOM'] = CDCOM + '${TARGET.dir} && $BIBTEX $BIBTEXFLAGS ${SOURCE.filebase}'
env['BIBER'] = 'biber'
env['BIBERFLAGS'] = SCons.Util.CLVar('')
env['BIBERCOM'] = CDCOM + '${TARGET.dir} && $BIBER $BIBERFLAGS ${SOURCE.filebase}'
env['MAKEINDEX'] = 'makeindex'
env['MAKEINDEXFLAGS'] = SCons.Util.CLVar('')
env['MAKEINDEXCOM'] = CDCOM + '${TARGET.dir} && $MAKEINDEX $MAKEINDEXFLAGS ${SOURCE.file}'
env['MAKEGLOSSARY'] = 'makeindex'
env['MAKEGLOSSARYSTYLE'] = '${SOURCE.filebase}.ist'
env['MAKEGLOSSARYFLAGS'] = SCons.Util.CLVar('-s ${MAKEGLOSSARYSTYLE} -t ${SOURCE.filebase}.glg')
env['MAKEGLOSSARYCOM'] = CDCOM + '${TARGET.dir} && $MAKEGLOSSARY ${SOURCE.filebase}.glo $MAKEGLOSSARYFLAGS -o ${SOURCE.filebase}.gls'
env['MAKEACRONYMS'] = 'makeindex'
env['MAKEACRONYMSSTYLE'] = '${SOURCE.filebase}.ist'
env['MAKEACRONYMSFLAGS'] = SCons.Util.CLVar('-s ${MAKEACRONYMSSTYLE} -t ${SOURCE.filebase}.alg')
env['MAKEACRONYMSCOM'] = CDCOM + '${TARGET.dir} && $MAKEACRONYMS ${SOURCE.filebase}.acn $MAKEACRONYMSFLAGS -o ${SOURCE.filebase}.acr'
env['MAKENCL'] = 'makeindex'
env['MAKENCLSTYLE'] = 'nomencl.ist'
env['MAKENCLFLAGS'] = '-s ${MAKENCLSTYLE} -t ${SOURCE.filebase}.nlg'
env['MAKENCLCOM'] = CDCOM + '${TARGET.dir} && $MAKENCL ${SOURCE.filebase}.nlo $MAKENCLFLAGS -o ${SOURCE.filebase}.nls'
env['MAKENEWGLOSSARY'] = 'makeindex'
env['MAKENEWGLOSSARYCOM'] = CDCOM + '${TARGET.dir} && $MAKENEWGLOSSARY ' | [
"Add internal Builders and construction variables for LaTeX to an Environment."
] |
Please provide a description of the function:def is_win64():
# Unfortunately, python does not provide a useful way to determine
# if the underlying Windows OS is 32-bit or 64-bit. Worse, whether
# the Python itself is 32-bit or 64-bit affects what it returns,
# so nothing in sys.* or os.* help.
# Apparently the best solution is to use env vars that Windows
# sets. If PROCESSOR_ARCHITECTURE is not x86, then the python
# process is running in 64 bit mode (on a 64-bit OS, 64-bit
# hardware, obviously).
# If this python is 32-bit but the OS is 64, Windows will set
# ProgramW6432 and PROCESSOR_ARCHITEW6432 to non-null.
# (Checking for HKLM\Software\Wow6432Node in the registry doesn't
# work, because some 32-bit installers create it.)
global _is_win64
if _is_win64 is None:
# I structured these tests to make it easy to add new ones or
# add exceptions in the future, because this is a bit fragile.
_is_win64 = False
if os.environ.get('PROCESSOR_ARCHITECTURE', 'x86') != 'x86':
_is_win64 = True
if os.environ.get('PROCESSOR_ARCHITEW6432'):
_is_win64 = True
if os.environ.get('ProgramW6432'):
_is_win64 = True
return _is_win64 | [
"Return true if running on windows 64 bits.\n\n Works whether python itself runs in 64 bits or 32 bits."
] |
Please provide a description of the function:def has_reg(value):
try:
SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_LOCAL_MACHINE, value)
ret = True
except SCons.Util.WinError:
ret = False
return ret | [
"Return True if the given key exists in HKEY_LOCAL_MACHINE, False\n otherwise."
] |
Please provide a description of the function:def normalize_env(env, keys, force=False):
normenv = {}
if env:
for k in list(env.keys()):
normenv[k] = copy.deepcopy(env[k])
for k in keys:
if k in os.environ and (force or not k in normenv):
normenv[k] = os.environ[k]
# This shouldn't be necessary, since the default environment should include system32,
# but keep this here to be safe, since it's needed to find reg.exe which the MSVC
# bat scripts use.
sys32_dir = os.path.join(os.environ.get("SystemRoot",
os.environ.get("windir", r"C:\Windows\system32")),
"System32")
if sys32_dir not in normenv['PATH']:
normenv['PATH'] = normenv['PATH'] + os.pathsep + sys32_dir
# Without Wbem in PATH, vcvarsall.bat has a "'wmic' is not recognized"
# error starting with Visual Studio 2017, although the script still
# seems to work anyway.
sys32_wbem_dir = os.path.join(sys32_dir, 'Wbem')
if sys32_wbem_dir not in normenv['PATH']:
normenv['PATH'] = normenv['PATH'] + os.pathsep + sys32_wbem_dir
debug("PATH: %s"%normenv['PATH'])
return normenv | [
"Given a dictionary representing a shell environment, add the variables\n from os.environ needed for the processing of .bat files; the keys are\n controlled by the keys argument.\n\n It also makes sure the environment values are correctly encoded.\n\n If force=True, then all of the key values that exist are copied\n into the returned dictionary. If force=false, values are only\n copied if the key does not already exist in the copied dictionary.\n\n Note: the environment is copied."
] |
Please provide a description of the function:def get_output(vcbat, args = None, env = None):
if env is None:
# Create a blank environment, for use in launching the tools
env = SCons.Environment.Environment(tools=[])
# TODO: This is a hard-coded list of the variables that (may) need
# to be imported from os.environ[] for v[sc]*vars*.bat file
# execution to work. This list should really be either directly
# controlled by vc.py, or else derived from the common_tools_var
# settings in vs.py.
vs_vc_vars = [
'COMSPEC',
# VS100 and VS110: Still set, but modern MSVC setup scripts will
# discard these if registry has values. However Intel compiler setup
# script still requires these as of 2013/2014.
'VS140COMNTOOLS',
'VS120COMNTOOLS',
'VS110COMNTOOLS',
'VS100COMNTOOLS',
'VS90COMNTOOLS',
'VS80COMNTOOLS',
'VS71COMNTOOLS',
'VS70COMNTOOLS',
'VS60COMNTOOLS',
]
env['ENV'] = normalize_env(env['ENV'], vs_vc_vars, force=False)
if args:
debug("Calling '%s %s'" % (vcbat, args))
popen = SCons.Action._subproc(env,
'"%s" %s & set' % (vcbat, args),
stdin='devnull',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
else:
debug("Calling '%s'" % vcbat)
popen = SCons.Action._subproc(env,
'"%s" & set' % vcbat,
stdin='devnull',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Use the .stdout and .stderr attributes directly because the
# .communicate() method uses the threading module on Windows
# and won't work under Pythons not built with threading.
stdout = popen.stdout.read()
stderr = popen.stderr.read()
# Extra debug logic, uncomment if necessary
# debug('get_output():stdout:%s'%stdout)
# debug('get_output():stderr:%s'%stderr)
if stderr:
# TODO: find something better to do with stderr;
# this at least prevents errors from getting swallowed.
import sys
sys.stderr.write(stderr)
if popen.wait() != 0:
raise IOError(stderr.decode("mbcs"))
output = stdout.decode("mbcs")
return output | [
"Parse the output of given bat file, with given args."
] |
Please provide a description of the function:def parse_output(output, keep=("INCLUDE", "LIB", "LIBPATH", "PATH")):
# dkeep is a dict associating key: path_list, where key is one item from
# keep, and pat_list the associated list of paths
dkeep = dict([(i, []) for i in keep])
# rdk will keep the regex to match the .bat file output line starts
rdk = {}
for i in keep:
rdk[i] = re.compile('%s=(.*)' % i, re.I)
def add_env(rmatch, key, dkeep=dkeep):
path_list = rmatch.group(1).split(os.pathsep)
for path in path_list:
# Do not add empty paths (when a var ends with ;)
if path:
# XXX: For some reason, VC98 .bat file adds "" around the PATH
# values, and it screws up the environment later, so we strip
# it.
path = path.strip('"')
dkeep[key].append(str(path))
for line in output.splitlines():
for k, value in rdk.items():
match = value.match(line)
if match:
add_env(match, k)
return dkeep | [
"\n Parse output from running visual c++/studios vcvarsall.bat and running set\n To capture the values listed in keep\n "
] |
Please provide a description of the function:def generate(env):
for t in SCons.Tool.tool_list(env['PLATFORM'], env):
SCons.Tool.Tool(t)(env) | [
"Add default tools."
] |
Please provide a description of the function:def verify(self, obj):
if not isinstance(obj, int):
raise ValidationError("Object is not a int", reason='object is not a int', object=obj,
type=type(obj), int_type=int)
return obj | [
"Verify that the object conforms to this verifier's schema\n\n Args:\n obj (object): A python object to verify\n\n Raises:\n ValidationError: If there is a problem verifying the dictionary, a\n ValidationError is thrown with at least the reason key set indicating\n the reason for the lack of validation.\n "
] |
Please provide a description of the function:def node_conv(obj):
try:
get = obj.get
except AttributeError:
if isinstance(obj, SCons.Node.Node) or SCons.Util.is_Sequence( obj ):
result = obj
else:
result = str(obj)
else:
result = get()
return result | [
"\n This is the \"string conversion\" routine that we have our substitutions\n use to return Nodes, not strings. This relies on the fact that an\n EntryProxy object has a get() method that returns the underlying\n Node that it wraps, which is a bit of architectural dependence\n that we might need to break or modify in the future in response to\n additional requirements.\n "
] |
Please provide a description of the function:def subst_path(self, env, target, source):
result = []
for type, value in self.pathlist:
if type == TYPE_STRING_SUBST:
value = env.subst(value, target=target, source=source,
conv=node_conv)
if SCons.Util.is_Sequence(value):
result.extend(SCons.Util.flatten(value))
elif value:
result.append(value)
elif type == TYPE_OBJECT:
value = node_conv(value)
if value:
result.append(value)
elif value:
result.append(value)
return tuple(result) | [
"\n Performs construction variable substitution on a pre-digested\n PathList for a specific target and source.\n "
] |
Please provide a description of the function:def _PathList_key(self, pathlist):
if SCons.Util.is_Sequence(pathlist):
pathlist = tuple(SCons.Util.flatten(pathlist))
return pathlist | [
"\n Returns the key for memoization of PathLists.\n\n Note that we want this to be pretty quick, so we don't completely\n canonicalize all forms of the same list. For example,\n 'dir1:$ROOT/dir2' and ['$ROOT/dir1', 'dir'] may logically\n represent the same list if you're executing from $ROOT, but\n we're not going to bother splitting strings into path elements,\n or massaging strings into Nodes, to identify that equivalence.\n We just want to eliminate obvious redundancy from the normal\n case of re-using exactly the same cloned value for a path.\n "
] |
Please provide a description of the function:def PathList(self, pathlist):
pathlist = self._PathList_key(pathlist)
try:
memo_dict = self._memo['PathList']
except KeyError:
memo_dict = {}
self._memo['PathList'] = memo_dict
else:
try:
return memo_dict[pathlist]
except KeyError:
pass
result = _PathList(pathlist)
memo_dict[pathlist] = result
return result | [
"\n Returns the cached _PathList object for the specified pathlist,\n creating and caching a new object as necessary.\n "
] |
Please provide a description of the function:def DefaultEnvironment(*args, **kw):
global _default_env
if not _default_env:
import SCons.Util
_default_env = SCons.Environment.Environment(*args, **kw)
if SCons.Util.md5:
_default_env.Decider('MD5')
else:
_default_env.Decider('timestamp-match')
global DefaultEnvironment
DefaultEnvironment = _fetch_DefaultEnvironment
_default_env._CacheDir_path = None
return _default_env | [
"\n Initial public entry point for creating the default construction\n Environment.\n\n After creating the environment, we overwrite our name\n (DefaultEnvironment) with the _fetch_DefaultEnvironment() function,\n which more efficiently returns the initialized default construction\n environment without checking for its existence.\n\n (This function still exists with its _default_check because someone\n else (*cough* Script/__init__.py *cough*) may keep a reference\n to this function. So we can't use the fully functional idiom of\n having the name originally be a something that *only* creates the\n construction environment and then overwrites the name.)\n "
] |
Please provide a description of the function:def copy_func(dest, src, symlinks=True):
dest = str(dest)
src = str(src)
SCons.Node.FS.invalidate_node_memos(dest)
if SCons.Util.is_List(src) and os.path.isdir(dest):
for file in src:
shutil.copy2(file, dest)
return 0
elif os.path.islink(src):
if symlinks:
return os.symlink(os.readlink(src), dest)
else:
return copy_func(dest, os.path.realpath(src))
elif os.path.isfile(src):
shutil.copy2(src, dest)
return 0
else:
shutil.copytree(src, dest, symlinks)
# copytree returns None in python2 and destination string in python3
# A error is raised in both cases, so we can just return 0 for success
return 0 | [
"\n If symlinks (is true), then a symbolic link will be\n shallow copied and recreated as a symbolic link; otherwise, copying\n a symbolic link will be equivalent to copying the symbolic link's\n final target regardless of symbolic link depth.\n "
] |
Please provide a description of the function:def _concat(prefix, list, suffix, env, f=lambda x: x, target=None, source=None):
if not list:
return list
l = f(SCons.PathList.PathList(list).subst_path(env, target, source))
if l is not None:
list = l
return _concat_ixes(prefix, list, suffix, env) | [
"\n Creates a new list from 'list' by first interpolating each element\n in the list using the 'env' dictionary and then calling f on the\n list, and finally calling _concat_ixes to concatenate 'prefix' and\n 'suffix' onto each element of the list.\n "
] |
Please provide a description of the function:def _concat_ixes(prefix, list, suffix, env):
result = []
# ensure that prefix and suffix are strings
prefix = str(env.subst(prefix, SCons.Subst.SUBST_RAW))
suffix = str(env.subst(suffix, SCons.Subst.SUBST_RAW))
for x in list:
if isinstance(x, SCons.Node.FS.File):
result.append(x)
continue
x = str(x)
if x:
if prefix:
if prefix[-1] == ' ':
result.append(prefix[:-1])
elif x[:len(prefix)] != prefix:
x = prefix + x
result.append(x)
if suffix:
if suffix[0] == ' ':
result.append(suffix[1:])
elif x[-len(suffix):] != suffix:
result[-1] = result[-1]+suffix
return result | [
"\n Creates a new list from 'list' by concatenating the 'prefix' and\n 'suffix' arguments onto each element of the list. A trailing space\n on 'prefix' or leading space on 'suffix' will cause them to be put\n into separate list elements rather than being concatenated.\n "
] |
Please provide a description of the function:def _stripixes(prefix, itms, suffix, stripprefixes, stripsuffixes, env, c=None):
if not itms:
return itms
if not callable(c):
env_c = env['_concat']
if env_c != _concat and callable(env_c):
# There's a custom _concat() method in the construction
# environment, and we've allowed people to set that in
# the past (see test/custom-concat.py), so preserve the
# backwards compatibility.
c = env_c
else:
c = _concat_ixes
stripprefixes = list(map(env.subst, SCons.Util.flatten(stripprefixes)))
stripsuffixes = list(map(env.subst, SCons.Util.flatten(stripsuffixes)))
stripped = []
for l in SCons.PathList.PathList(itms).subst_path(env, None, None):
if isinstance(l, SCons.Node.FS.File):
stripped.append(l)
continue
if not SCons.Util.is_String(l):
l = str(l)
for stripprefix in stripprefixes:
lsp = len(stripprefix)
if l[:lsp] == stripprefix:
l = l[lsp:]
# Do not strip more than one prefix
break
for stripsuffix in stripsuffixes:
lss = len(stripsuffix)
if l[-lss:] == stripsuffix:
l = l[:-lss]
# Do not strip more than one suffix
break
stripped.append(l)
return c(prefix, stripped, suffix, env) | [
"\n This is a wrapper around _concat()/_concat_ixes() that checks for\n the existence of prefixes or suffixes on list items and strips them\n where it finds them. This is used by tools (like the GNU linker)\n that need to turn something like 'libfoo.a' into '-lfoo'.\n "
] |
Please provide a description of the function:def processDefines(defs):
if SCons.Util.is_List(defs):
l = []
for d in defs:
if d is None:
continue
elif SCons.Util.is_List(d) or isinstance(d, tuple):
if len(d) >= 2:
l.append(str(d[0]) + '=' + str(d[1]))
else:
l.append(str(d[0]))
elif SCons.Util.is_Dict(d):
for macro,value in d.items():
if value is not None:
l.append(str(macro) + '=' + str(value))
else:
l.append(str(macro))
elif SCons.Util.is_String(d):
l.append(str(d))
else:
raise SCons.Errors.UserError("DEFINE %s is not a list, dict, string or None."%repr(d))
elif SCons.Util.is_Dict(defs):
# The items in a dictionary are stored in random order, but
# if the order of the command-line options changes from
# invocation to invocation, then the signature of the command
# line will change and we'll get random unnecessary rebuilds.
# Consequently, we have to sort the keys to ensure a
# consistent order...
l = []
for k,v in sorted(defs.items()):
if v is None:
l.append(str(k))
else:
l.append(str(k) + '=' + str(v))
else:
l = [str(defs)]
return l | [
"process defines, resolving strings, lists, dictionaries, into a list of\n strings\n "
] |
Please provide a description of the function:def _defines(prefix, defs, suffix, env, c=_concat_ixes):
return c(prefix, env.subst_path(processDefines(defs)), suffix, env) | [
"A wrapper around _concat_ixes that turns a list or string\n into a list of C preprocessor command-line definitions.\n "
] |
Please provide a description of the function:def run(self, sensor_graph, model):
# We can only eliminate a node if the following checks are true
# 1. It has no other nodes connected to it
# 2. Its stream is not an output of the entire sensor graph
# 3. Its stream is autogenerated by the compiler
# 4. Its operation has no side effects
# 5. Its stream is not buffered so the value will not be accessible
for node, inputs, outputs in sensor_graph.iterate_bfs():
can_remove = False
# Check 1
if len(outputs) != 0:
continue
# Check 2
if sensor_graph.is_output(node.stream):
continue
# Check 3
if node.stream.stream_id < StreamAllocator.StartingID:
continue
# Check 4
if node.func_name == u'call_rpc':
continue
# Check 5
if node.stream.buffered:
# FIXME: Add a warning here if the stream is buffered since
# its weird for the user to be saving useless data to flash
continue
# Check 6
if node.func_name == u'trigger_streamer':
continue
# If all of the checks above have passed, we have found a useless
# node, let's remove it and return True so we run the pass again
# and look for additional nodes that are now made useles because
# of the removal of this one.
for input_node in inputs:
input_node.outputs.remove(node)
if node in sensor_graph.roots:
sensor_graph.roots.remove(node)
sensor_graph.nodes.remove(node)
# FIXME: Check if we need to destroy any walkers here
return True
return False | [
"Run this optimization pass on the sensor graph\n\n If necessary, information on the device model being targeted\n can be found in the associated model argument.\n\n Args:\n sensor_graph (SensorGraph): The sensor graph to optimize\n model (DeviceModel): The device model we're using\n "
] |
Please provide a description of the function:def Scanner(function, *args, **kw):
if SCons.Util.is_Dict(function):
return Selector(function, *args, **kw)
else:
return Base(function, *args, **kw) | [
"\n Public interface factory function for creating different types\n of Scanners based on the different types of \"functions\" that may\n be supplied.\n\n TODO: Deprecate this some day. We've moved the functionality\n inside the Base class and really don't need this factory function\n any more. It was, however, used by some of our Tool modules, so\n the call probably ended up in various people's custom modules\n patterned on SCons code.\n "
] |
Please provide a description of the function:def ReportLength(cls, header):
parsed_header = cls._parse_header(header)
auth_size = cls._AUTH_BLOCK_LENGTHS.get(parsed_header.auth_type)
if auth_size is None:
raise DataError("Unknown auth block size in BroadcastReport")
return cls._HEADER_LENGTH + parsed_header.reading_length + auth_size | [
"Given a header of HeaderLength bytes, calculate the size of this report.\n\n Returns:\n int: The total length of the report including the header that we are passed.\n "
] |
Please provide a description of the function:def FromReadings(cls, uuid, readings, sent_timestamp=0):
header = struct.pack("<BBHLLL", cls.ReportType, 0, len(readings)*16, uuid, sent_timestamp, 0)
packed_readings = bytearray()
for reading in readings:
packed_reading = struct.pack("<HHLLL", reading.stream, 0, reading.reading_id,
reading.raw_time, reading.value)
packed_readings += bytearray(packed_reading)
return BroadcastReport(bytearray(header) + packed_readings) | [
"Generate a broadcast report from a list of readings and a uuid."
] |
Please provide a description of the function:def decode(self):
parsed_header = self._parse_header(self.raw_report[:self._HEADER_LENGTH])
auth_size = self._AUTH_BLOCK_LENGTHS.get(parsed_header.auth_type)
assert auth_size is not None
assert parsed_header.reading_length % 16 == 0
time_base = self.received_time - datetime.timedelta(seconds=parsed_header.sent_timestamp)
readings = self.raw_report[self._HEADER_LENGTH:self._HEADER_LENGTH + parsed_header.reading_length]
parsed_readings = []
for i in range(0, len(readings), 16):
reading = readings[i:i+16]
stream, _, reading_id, timestamp, value = struct.unpack("<HHLLL", reading)
parsed = IOTileReading(timestamp, stream, value, time_base=time_base, reading_id=reading_id)
parsed_readings.append(parsed)
self.sent_timestamp = parsed_header.sent_timestamp
self.origin = parsed_header.uuid
return parsed_readings, [] | [
"Decode this report into a list of visible readings."
] |
Please provide a description of the function:def _initialize_system_sync(self):
connected_devices = self.bable.list_connected_devices()
for device in connected_devices:
self.disconnect_sync(device.connection_handle)
self.stop_scan()
self.set_advertising(False)
# Register the GATT table to send the right services and characteristics when probed (like an IOTile device)
self.register_gatt_table() | [
"Initialize the device adapter by removing all active connections and resetting scan and advertising to have\n a clean starting state."
] |
Please provide a description of the function:def start(self, device):
super(NativeBLEVirtualInterface, self).start(device)
self.set_advertising(True) | [
"Start serving access to this VirtualIOTileDevice\n\n Args:\n device (VirtualIOTileDevice): The device we will be providing access to\n "
] |
Please provide a description of the function:def register_gatt_table(self):
services = [BLEService, TileBusService]
characteristics = [
NameChar,
AppearanceChar,
ReceiveHeaderChar,
ReceivePayloadChar,
SendHeaderChar,
SendPayloadChar,
StreamingChar,
HighSpeedChar,
TracingChar
]
self.bable.set_gatt_table(services, characteristics) | [
"Register the GATT table into baBLE."
] |
Please provide a description of the function:def set_advertising(self, enabled):
if enabled:
self.bable.set_advertising(
enabled=True,
uuids=[TileBusService.uuid],
name="V_IOTile ",
company_id=ArchManuID,
advertising_data=self._advertisement(),
scan_response=self._scan_response(),
sync=True
)
else:
try:
self.bable.set_advertising(enabled=False, sync=True)
except bable_interface.BaBLEException:
# If advertising is already disabled
pass | [
"Toggle advertising."
] |
Please provide a description of the function:def _advertisement(self):
# Flags are
# bit 0: whether we have pending data
# bit 1: whether we are in a low voltage state
# bit 2: whether another user is connected
# bit 3: whether we support robust reports
# bit 4: whether we allow fast writes
flags = int(self.device.pending_data) | (0 << 1) | (0 << 2) | (1 << 3) | (1 << 4)
return struct.pack("<LH", self.device.iotile_id, flags) | [
"Create advertisement data."
] |
Please provide a description of the function:def _scan_response(self):
voltage = struct.pack("<H", int(self.voltage*256))
reading = struct.pack("<HLLL", 0xFFFF, 0, 0, 0)
response = voltage + reading
return response | [
"Create scan response data."
] |
Please provide a description of the function:def stop_sync(self):
# Disconnect connected device
if self.connected:
self.disconnect_sync(self._connection_handle)
# Disable advertising
self.set_advertising(False)
# Stop the baBLE interface
self.bable.stop()
self.actions.queue.clear() | [
"Safely stop this BLED112 instance without leaving it in a weird state."
] |
Please provide a description of the function:def disconnect_sync(self, connection_handle):
self.bable.disconnect(connection_handle=connection_handle, sync=True) | [
"Synchronously disconnect from whoever has connected to us\n\n Args:\n connection_handle (int): The handle of the connection we wish to disconnect.\n "
] |
Please provide a description of the function:def _on_connected(self, device):
self._logger.debug("Device connected event: {}".format(device))
self.connected = True
self._connection_handle = device['connection_handle']
self.device.connected = True
self._audit('ClientConnected') | [
"Callback function called when a connected event has been received.\n It is executed in the baBLE working thread: should not be blocking.\n\n Args:\n device (dict): Information about the newly connected device\n "
] |
Please provide a description of the function:def _on_disconnected(self, device):
self._logger.debug("Device disconnected event: {}".format(device))
if self.streaming:
self.device.close_streaming_interface()
self.streaming = False
if self.tracing:
self.device.close_tracing_interface()
self.tracing = False
self.device.connected = False
self.connected = False
self._connection_handle = 0
self.header_notif = False
self.payload = False
self._clear_reports()
self._clear_traces()
self._defer(self.set_advertising, [True])
self._audit('ClientDisconnected') | [
"Callback function called when a disconnected event has been received.\n This resets any open interfaces on the virtual device and clears any\n in progress traces and streams.\n It is executed in the baBLE working thread: should not be blocking.\n\n Args:\n device (dict): Information about the newly connected device\n "
] |
Please provide a description of the function:def _on_write_request(self, request):
if request['connection_handle'] != self._connection_handle:
return False
attribute_handle = request['attribute_handle']
# If write to configure notification
config_handles = [
ReceiveHeaderChar.config_handle,
ReceivePayloadChar.config_handle,
StreamingChar.config_handle,
TracingChar.config_handle
]
if attribute_handle in config_handles:
notification_enabled, _ = struct.unpack('<BB', request['value'])
# ReceiveHeader or ReceivePayload
if attribute_handle in [ReceiveHeaderChar.config_handle, ReceivePayloadChar.config_handle] and notification_enabled:
if attribute_handle == ReceiveHeaderChar.config_handle:
self.header_notif = True
elif attribute_handle == ReceivePayloadChar.config_handle:
self.payload_notif = True
if self.header_notif and self.payload_notif:
self.device.open_rpc_interface()
self._audit("RPCInterfaceOpened")
# Streaming
elif attribute_handle == StreamingChar.config_handle:
if notification_enabled and not self.streaming:
self.streaming = True
# If we should send any reports, queue them for sending
reports = self.device.open_streaming_interface()
if reports is not None:
self._queue_reports(*reports)
self._audit('StreamingInterfaceOpened')
elif not notification_enabled and self.streaming:
self.streaming = False
self.device.close_streaming_interface()
self._audit('StreamingInterfaceClosed')
# Tracing
elif attribute_handle == TracingChar.config_handle:
if notification_enabled and not self.tracing:
self.tracing = True
# If we should send any trace data, queue it immediately
traces = self.device.open_tracing_interface()
if traces is not None:
self._queue_traces(*traces)
self._audit('TracingInterfaceOpened')
elif not notification_enabled and self.tracing:
self.tracing = False
self.device.close_tracing_interface()
self._audit('TracingInterfaceClosed')
return True
# If write an RPC
elif attribute_handle in [SendHeaderChar.value_handle, SendPayloadChar.value_handle]:
# Payload
if attribute_handle == SendPayloadChar.value_handle:
self.rpc_payload = bytearray(request['value'])
if len(self.rpc_payload) < 20:
self.rpc_payload += bytearray(20 - len(self.rpc_payload))
# Header
elif attribute_handle == SendHeaderChar.value_handle:
self._defer(self._call_rpc, [bytearray(request['value'])])
return True
else:
return False | [
"Callback function called when a write request has been received.\n It is executed in the baBLE working thread: should not be blocking.\n\n Args:\n request (dict): Information about the request\n - connection_handle (int): The connection handle that sent the request\n - attribute_handle (int): The attribute handle to write\n - value (bytes): The value to write\n "
] |
Please provide a description of the function:def _call_rpc(self, header):
length, _, cmd, feature, address = struct.unpack("<BBBBB", bytes(header))
rpc_id = (feature << 8) | cmd
payload = self.rpc_payload[:length]
status = (1 << 6)
try:
response = self.device.call_rpc(address, rpc_id, bytes(payload))
if len(response) > 0:
status |= (1 << 7)
except (RPCInvalidIDError, RPCNotFoundError):
status = 2 # FIXME: Insert the correct ID here
response = b''
except TileNotFoundError:
status = 0xFF
response = b''
except Exception:
status = 3
response = b''
self._logger.exception("Exception raise while calling rpc, header=%s, payload=%s", header, payload)
self._audit(
"RPCReceived",
rpc_id=rpc_id,
address=address,
payload=binascii.hexlify(payload),
status=status,
response=binascii.hexlify(response)
)
resp_header = struct.pack("<BBBB", status, 0, 0, len(response))
if len(response) > 0:
self._send_rpc_response(
(ReceiveHeaderChar.value_handle, resp_header),
(ReceivePayloadChar.value_handle, response)
)
else:
self._send_rpc_response((ReceiveHeaderChar.value_handle, resp_header)) | [
"Call an RPC given a header and possibly a previously sent payload\n It is executed in the baBLE working thread: should not be blocking.\n\n Args:\n header (bytearray): The RPC header we should call\n "
] |
Please provide a description of the function:def _send_notification(self, handle, payload):
self.bable.notify(
connection_handle=self._connection_handle,
attribute_handle=handle,
value=payload
) | [
"Send a notification over BLE\n It is executed in the baBLE working thread: should not be blocking.\n\n Args:\n handle (int): The handle to notify on\n payload (bytearray): The value to notify\n "
] |
Please provide a description of the function:def _send_rpc_response(self, *packets):
if len(packets) == 0:
return
handle, payload = packets[0]
try:
self._send_notification(handle, payload)
except bable_interface.BaBLEException as err:
if err.packet.status == 'Rejected': # If we are streaming too fast, back off and try again
time.sleep(0.05)
self._defer(self._send_rpc_response, list(packets))
else:
self._audit('ErrorSendingRPCResponse')
self._logger.exception("Error while sending RPC response, handle=%s, payload=%s", handle, payload)
return
if len(packets) > 1:
self._defer(self._send_rpc_response, list(packets[1:])) | [
"Send an RPC response.\n It is executed in the baBLE working thread: should not be blocking.\n\n The RPC response is notified in one or two packets depending on whether or not\n response data is included. If there is a temporary error sending one of the packets\n it is retried automatically. If there is a permanent error, it is logged and the response\n is abandoned.\n "
] |
Please provide a description of the function:def _stream_data(self, chunk=None):
# If we failed to transmit a chunk, we will be requeued with an argument
self._stream_sm_running = True
if chunk is None:
chunk = self._next_streaming_chunk(20)
if chunk is None or len(chunk) == 0:
self._stream_sm_running = False
return
try:
self._send_notification(StreamingChar.value_handle, chunk)
self._defer(self._stream_data)
except bable_interface.BaBLEException as err:
if err.packet.status == 'Rejected': # If we are streaming too fast, back off and try again
time.sleep(0.05)
self._defer(self._stream_data, [chunk])
else:
self._audit('ErrorStreamingReport') # If there was an error, stop streaming but don't choke
self._logger.exception("Error while streaming data") | [
"Stream reports to the ble client in 20 byte chunks\n\n Args:\n chunk (bytearray): A chunk that should be sent instead of requesting a\n new chunk from the pending reports.\n "
] |
Please provide a description of the function:def _send_trace(self, chunk=None):
self._trace_sm_running = True
# If we failed to transmit a chunk, we will be requeued with an argument
if chunk is None:
chunk = self._next_tracing_chunk(20)
if chunk is None or len(chunk) == 0:
self._trace_sm_running = False
return
try:
self._send_notification(TracingChar.value_handle, chunk)
self._defer(self._send_trace)
except bable_interface.BaBLEException as err:
if err.packet.status == 'Rejected': # If we are streaming too fast, back off and try again
time.sleep(0.05)
self._defer(self._send_trace, [chunk])
else:
self._audit('ErrorStreamingTrace') # If there was an error, stop streaming but don't choke
self._logger.exception("Error while tracing data") | [
"Stream tracing data to the ble client in 20 byte chunks\n\n Args:\n chunk (bytearray): A chunk that should be sent instead of requesting a\n new chunk from the pending reports.\n "
] |
Please provide a description of the function:def process(self):
super(NativeBLEVirtualInterface, self).process()
if (not self._stream_sm_running) and (not self.reports.empty()):
self._stream_data()
if (not self._trace_sm_running) and (not self.traces.empty()):
self._send_trace() | [
"Periodic nonblocking processes"
] |
Please provide a description of the function:async def _populate_name_map(self):
services = await self.sync_services()
with self._state_lock:
self.services = services
for i, name in enumerate(self.services.keys()):
self._name_map[i] = name | [
"Populate the name map of services as reported by the supervisor"
] |
Please provide a description of the function:def local_service(self, name_or_id):
if not self._loop.inside_loop():
self._state_lock.acquire()
try:
if isinstance(name_or_id, int):
if name_or_id not in self._name_map:
raise ArgumentError("Unknown ID used to look up service", id=name_or_id)
name = self._name_map[name_or_id]
else:
name = name_or_id
if name not in self.services:
raise ArgumentError("Unknown service name", name=name)
return copy(self.services[name])
finally:
if not self._loop.inside_loop():
self._state_lock.release() | [
"Get the locally synced information for a service.\n\n This method is safe to call outside of the background event loop\n without any race condition. Internally it uses a thread-safe mutex to\n protect the local copies of supervisor data and ensure that it cannot\n change while this method is iterating over it.\n\n Args:\n name_or_id (string or int): Either a short name for the service or\n a numeric id.\n\n Returns:\n ServiceState: the current state of the service synced locally\n at the time of the call.\n "
] |
Please provide a description of the function:def local_services(self):
if not self._loop.inside_loop():
self._state_lock.acquire()
try:
return sorted([(index, name) for index, name in self._name_map.items()], key=lambda element: element[0])
finally:
if not self._loop.inside_loop():
self._state_lock.release() | [
"Get a list of id, name pairs for all of the known synced services.\n\n This method is safe to call outside of the background event loop\n without any race condition. Internally it uses a thread-safe mutex to\n protect the local copies of supervisor data and ensure that it cannot\n change while this method is iterating over it.\n\n Returns:\n list (id, name): A list of tuples with id and service name sorted by id\n from low to high\n "
] |
Please provide a description of the function:async def sync_services(self):
services = {}
servs = await self.list_services()
for i, serv in enumerate(servs):
info = await self.service_info(serv)
status = await self.service_status(serv)
messages = await self.get_messages(serv)
headline = await self.get_headline(serv)
services[serv] = states.ServiceState(info['short_name'], info['long_name'], info['preregistered'], i)
services[serv].state = status['numeric_status']
for message in messages:
services[serv].post_message(message.level, message.message, message.count, message.created)
if headline is not None:
services[serv].set_headline(headline.level, headline.message, headline.created)
return services | [
"Poll the current state of all services.\n\n Returns:\n dict: A dictionary mapping service name to service status\n "
] |
Please provide a description of the function:async def get_messages(self, name):
resp = await self.send_command(OPERATIONS.CMD_QUERY_MESSAGES, {'name': name},
MESSAGES.QueryMessagesResponse, timeout=5.0)
return [states.ServiceMessage.FromDictionary(x) for x in resp] | [
"Get stored messages for a service.\n\n Args:\n name (string): The name of the service to get messages from.\n\n Returns:\n list(ServiceMessage): A list of the messages stored for this service\n "
] |
Please provide a description of the function:async def get_headline(self, name):
resp = await self.send_command(OPERATIONS.CMD_QUERY_HEADLINE, {'name': name},
MESSAGES.QueryHeadlineResponse, timeout=5.0)
if resp is not None:
resp = states.ServiceMessage.FromDictionary(resp)
return resp | [
"Get stored messages for a service.\n\n Args:\n name (string): The name of the service to get messages from.\n\n Returns:\n ServiceMessage: the headline or None if no headline has been set\n "
] |
Please provide a description of the function:async def service_info(self, name):
return await self.send_command(OPERATIONS.CMD_QUERY_INFO, {'name': name},
MESSAGES.QueryInfoResponse, timeout=5.0) | [
"Pull descriptive info of a service by name.\n\n Information returned includes the service's user friendly\n name and whether it was preregistered or added dynamically.\n\n Returns:\n dict: A dictionary of service information with the following keys\n set:\n long_name (string): The user friendly name of the service\n preregistered (bool): Whether the service was explicitly\n called out as a preregistered service.\n "
] |
Please provide a description of the function:async def send_heartbeat(self, name):
await self.send_command(OPERATIONS.CMD_HEARTBEAT, {'name': name},
MESSAGES.HeartbeatResponse, timeout=5.0) | [
"Send a heartbeat for a service.\n\n Args:\n name (string): The name of the service to send a heartbeat for\n "
] |
Please provide a description of the function:async def update_state(self, name, state):
await self.send_command(OPERATIONS.CMD_UPDATE_STATE,
{'name': name, 'new_status': state},
MESSAGES.UpdateStateResponse, timeout=5.0) | [
"Update the state for a service.\n\n Args:\n name (string): The name of the service\n state (int): The new state of the service\n "
] |
Please provide a description of the function:def post_headline(self, name, level, message):
self.post_command(OPERATIONS.CMD_SET_HEADLINE,
{'name': name, 'level': level, 'message': message}) | [
"Asynchronously update the sticky headline for a service.\n\n Args:\n name (string): The name of the service\n level (int): A message level in states.*_LEVEL\n message (string): The user facing error message that will be stored\n for the service and can be queried later.\n "
] |
Please provide a description of the function:def post_state(self, name, state):
self.post_command(OPERATIONS.CMD_UPDATE_STATE,
{'name': name, 'new_status': state}) | [
"Asynchronously try to update the state for a service.\n\n If the update fails, nothing is reported because we don't wait for a\n response from the server. This function will return immmediately and\n not block.\n\n Args:\n name (string): The name of the service\n state (int): The new state of the service\n "
] |
Please provide a description of the function:def post_error(self, name, message):
self.post_command(OPERATIONS.CMD_POST_MESSAGE,
_create_message(name, states.ERROR_LEVEL, message)) | [
"Asynchronously post a user facing error message about a service.\n\n Args:\n name (string): The name of the service\n message (string): The user facing error message that will be stored\n for the service and can be queried later.\n "
] |
Please provide a description of the function:def post_warning(self, name, message):
self.post_command(OPERATIONS.CMD_POST_MESSAGE,
_create_message(name, states.WARNING_LEVEL, message)) | [
"Asynchronously post a user facing warning message about a service.\n\n Args:\n name (string): The name of the service\n message (string): The user facing warning message that will be stored\n for the service and can be queried later.\n "
] |
Please provide a description of the function:def post_info(self, name, message):
self.post_command(OPERATIONS.CMD_POST_MESSAGE,
_create_message(name, states.INFO_LEVEL, message)) | [
"Asynchronously post a user facing info message about a service.\n\n Args:\n name (string): The name of the service\n message (string): The user facing info message that will be stored\n for the service and can be queried later.\n "
] |
Please provide a description of the function:async def service_status(self, name):
return await self.send_command(OPERATIONS.CMD_QUERY_STATUS, {'name': name},
MESSAGES.QueryStatusResponse, timeout=5.0) | [
"Pull the current status of a service by name.\n\n Returns:\n dict: A dictionary of service status\n "
] |
Please provide a description of the function:async def send_rpc(self, name, rpc_id, payload, timeout=1.0):
msg = dict(name=name, rpc_id=rpc_id, payload=payload, timeout=timeout)
try:
resp = await self.send_command(OPERATIONS.CMD_SEND_RPC, msg,
MESSAGES.SendRPCResponse, timeout=timeout + 1)
except asyncio.TimeoutError:
resp = dict(result='timeout', response=b'')
return resp | [
"Send an RPC to a service and synchronously wait for the response.\n\n Args:\n name (str): The short name of the service to send the RPC to\n rpc_id (int): The id of the RPC we want to call\n payload (bytes): Any binary arguments that we want to send\n timeout (float): The number of seconds to wait for the RPC to finish\n before timing out and returning\n\n Returns:\n dict: A response dictionary with 1 or 2 keys set\n 'result': one of 'success', 'service_not_found',\n or 'rpc_not_found', 'timeout'\n 'response': the binary response object if the RPC was successful\n "
] |
Please provide a description of the function:async def register_service(self, short_name, long_name, allow_duplicate=True):
try:
await self.send_command(OPERATIONS.CMD_REGISTER_SERVICE, dict(name=short_name, long_name=long_name),
MESSAGES.RegisterServiceResponse)
except ArgumentError:
if not allow_duplicate:
raise | [
"Register a new service with the service manager.\n\n Args:\n short_name (string): A unique short name for this service that functions\n as an id\n long_name (string): A user facing name for this service\n allow_duplicate (boolean): Don't throw an error if this service is already\n registered. This is important if the service is preregistered for example.\n Raises:\n ArgumentError: if the short_name is already taken\n "
] |
Please provide a description of the function:async def register_agent(self, short_name):
await self.send_command(OPERATIONS.CMD_SET_AGENT, {'name': short_name},
MESSAGES.SetAgentResponse) | [
"Register to act as the RPC agent for this service.\n\n After this call succeeds, all requests to send RPCs to this service\n will be routed through this agent.\n\n Args:\n short_name (str): A unique short name for this service that functions\n as an id\n "
] |
Please provide a description of the function:async def _on_status_change(self, update):
info = update['payload']
new_number = info['new_status']
name = update['service']
if name not in self.services:
return
with self._state_lock:
is_changed = self.services[name].state != new_number
self.services[name].state = new_number
# Notify about this service state change if anyone is listening
if self._on_change_callback and is_changed:
self._on_change_callback(name, self.services[name].id, new_number, False, False) | [
"Update a service that has its status updated."
] |
Please provide a description of the function:async def _on_service_added(self, update):
info = update['payload']
name = info['short_name']
if name in self.services:
return
with self._state_lock:
new_id = len(self.services)
serv = states.ServiceState(name, info['long_name'],
info['preregistered'], new_id)
self.services[name] = serv
self._name_map[new_id] = name
# Notify about this new service if anyone is listening
if self._on_change_callback:
self._on_change_callback(name, new_id, serv.state, True, False) | [
"Add a new service."
] |
Please provide a description of the function:async def _on_heartbeat(self, update):
name = update['service']
if name not in self.services:
return
with self._state_lock:
self.services[name].heartbeat() | [
"Receive a new heartbeat for a service."
] |
Please provide a description of the function:async def _on_message(self, update):
name = update['service']
message_obj = update['payload']
if name not in self.services:
return
with self._state_lock:
self.services[name].post_message(message_obj['level'], message_obj['message']) | [
"Receive a message from a service."
] |
Please provide a description of the function:async def _on_headline(self, update):
name = update['service']
message_obj = update['payload']
new_headline = False
if name not in self.services:
return
with self._state_lock:
self.services[name].set_headline(message_obj['level'], message_obj['message'])
if self.services[name].headline.count == 1:
new_headline = True
# Notify about this service state change if anyone is listening
# headline changes are only reported if they are not duplicates
if self._on_change_callback and new_headline:
self._on_change_callback(name, self.services[name].id, self.services[name].state, False, True) | [
"Receive a headline from a service."
] |
Please provide a description of the function:async def _on_rpc_command(self, event):
payload = event['payload']
rpc_id = payload['rpc_id']
tag = payload['response_uuid']
args = payload['payload']
result = 'success'
response = b''
if self._rpc_dispatcher is None or not self._rpc_dispatcher.has_rpc(rpc_id):
result = 'rpc_not_found'
else:
try:
response = self._rpc_dispatcher.call_rpc(rpc_id, args)
if inspect.iscoroutine(response):
response = await response
except RPCInvalidArgumentsError:
result = 'invalid_arguments'
except RPCInvalidReturnValueError:
result = 'invalid_response'
except Exception: #pylint:disable=broad-except;We are being called in a background task
self._logger.exception("Exception handling RPC 0x%04X", rpc_id)
result = 'execution_exception'
message = dict(response_uuid=tag, result=result, response=response)
try:
await self.send_command(OPERATIONS.CMD_RESPOND_RPC, message,
MESSAGES.RespondRPCResponse)
except: #pylint:disable=bare-except;We are being called in a background worker
self._logger.exception("Error sending response to RPC 0x%04X", rpc_id) | [
"Received an RPC command that we should execute."
] |
Please provide a description of the function:def get_messages(self, name):
return self._loop.run_coroutine(self._client.get_messages(name)) | [
"Get stored messages for a service.\n\n Args:\n name (string): The name of the service to get messages from.\n\n Returns:\n list(ServiceMessage): A list of the messages stored for this service\n "
] |
Please provide a description of the function:def get_headline(self, name):
return self._loop.run_coroutine(self._client.get_headline(name)) | [
"Get stored messages for a service.\n\n Args:\n name (string): The name of the service to get messages from.\n\n Returns:\n ServiceMessage: the headline or None if no headline has been set\n "
] |
Please provide a description of the function:def send_heartbeat(self, name):
return self._loop.run_coroutine(self._client.send_heartbeat(name)) | [
"Send a heartbeat for a service.\n\n Args:\n name (string): The name of the service to send a heartbeat for\n "
] |
Please provide a description of the function:def service_info(self, name):
return self._loop.run_coroutine(self._client.service_info(name)) | [
"Pull descriptive info of a service by name.\n\n Information returned includes the service's user friendly\n name and whether it was preregistered or added dynamically.\n\n Returns:\n dict: A dictionary of service information with the following keys\n set:\n long_name (string): The user friendly name of the service\n preregistered (bool): Whether the service was explicitly\n called out as a preregistered service.\n "
] |
Please provide a description of the function:def update_state(self, name, state):
self._loop.run_coroutine(self._client.update_state(name, state)) | [
"Update the state for a service.\n\n Args:\n name (string): The name of the service\n state (int): The new state of the service\n "
] |
Please provide a description of the function:def post_headline(self, name, level, message):
self._client.post_headline(name, level, message) | [
"Asynchronously update the sticky headline for a service.\n\n Args:\n name (string): The name of the service\n level (int): A message level in states.*_LEVEL\n message (string): The user facing error message that will be stored\n for the service and can be queried later.\n "
] |
Please provide a description of the function:def service_status(self, name):
return self._loop.run_coroutine(self._client.service_status(name)) | [
"Pull the current status of a service by name.\n\n Returns:\n dict: A dictionary of service status\n "
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.