Code
stringlengths 103
85.9k
| Summary
sequencelengths 0
94
|
---|---|
Please provide a description of the function:def FromDictionary(cls, msg_dict):
level = msg_dict.get('level')
msg = msg_dict.get('message')
now = msg_dict.get('now_time')
created = msg_dict.get('created_time')
count = msg_dict.get('count', 1)
msg_id = msg_dict.get('id', 0)
new_msg = ServiceMessage(level, msg, msg_id, created, now)
if count > 1:
new_msg.count = count
return new_msg | [
"Create from a dictionary with kv pairs.\n\n Args:\n msg_dict (dict): A dictionary with information as created by to_dict()\n\n Returns:\n ServiceMessage: the converted message\n "
] |
Please provide a description of the function:def to_dict(self):
msg_dict = {}
msg_dict['level'] = self.level
msg_dict['message'] = self.message
msg_dict['now_time'] = monotonic()
msg_dict['created_time'] = self.created
msg_dict['id'] = self.id
msg_dict['count'] = self.count
return msg_dict | [
"Create a dictionary with the information in this message.\n\n Returns:\n dict: The dictionary with information\n "
] |
Please provide a description of the function:def get_message(self, message_id):
for message in self.messages:
if message.id == message_id:
return message
raise ArgumentError("Message ID not found", message_id=message_id) | [
"Get a message by its persistent id.\n\n Args:\n message_id (int): The id of the message that we're looking for\n "
] |
Please provide a description of the function:def post_message(self, level, message, count=1, timestamp=None, now_reference=None):
if len(self.messages) > 0 and self.messages[-1].message == message:
self.messages[-1].count += 1
else:
msg_object = ServiceMessage(level, message, self._last_message_id, timestamp, now_reference)
msg_object.count = count
self.messages.append(msg_object)
self._last_message_id += 1
return self.messages[-1] | [
"Post a new message for service.\n\n Args:\n level (int): The level of the message (info, warning, error)\n message (string): The message contents\n count (int): The number of times the message has been repeated\n timestamp (float): An optional monotonic value in seconds for when the message was created\n now_reference (float): If timestamp is not relative to monotonic() as called from this\n module then this should be now() as seen by whoever created the timestamp.\n\n Returns:\n ServiceMessage: The posted message\n "
] |
Please provide a description of the function:def set_headline(self, level, message, timestamp=None, now_reference=None):
if self.headline is not None and self.headline.message == message:
self.headline.created = monotonic()
self.headline.count += 1
return
msg_object = ServiceMessage(level, message, self._last_message_id, timestamp, now_reference)
self.headline = msg_object
self._last_message_id += 1 | [
"Set the persistent headline message for this service.\n\n Args:\n level (int): The level of the message (info, warning, error)\n message (string): The message contents\n timestamp (float): An optional monotonic value in seconds for when the message was created\n now_reference (float): If timestamp is not relative to monotonic() as called from this\n module then this should be now() as seen by whoever created the timestamp.\n "
] |
Please provide a description of the function:def generate_doxygen_file(output_path, iotile):
mapping = {}
mapping['short_name'] = iotile.short_name
mapping['full_name'] = iotile.full_name
mapping['authors'] = iotile.authors
mapping['version'] = iotile.version
render_template('doxygen.txt.tpl', mapping, out_path=output_path) | [
"Fill in our default doxygen template file with info from an IOTile\n\n This populates things like name, version, etc.\n\n Arguments:\n output_path (str): a string path for where the filled template should go\n iotile (IOTile): An IOTile object that can be queried for information\n "
] |
Please provide a description of the function:def pull(name, version, force=False):
chain = DependencyResolverChain()
ver = SemanticVersionRange.FromString(version)
chain.pull_release(name, ver, force=force) | [
"Pull a released IOTile component into the current working directory\n\n The component is found using whatever DependencyResolvers are installed and registered\n as part of the default DependencyResolverChain. This is the same mechanism used in\n iotile depends update, so any component that can be updated using iotile depends update\n can be found and pulled using this method.\n "
] |
Please provide a description of the function:def add_callback(self, name, func):
if name == 'on_scan':
events = ['device_seen']
def callback(_conn_string, _conn_id, _name, event):
func(self.id, event, event.get('validity_period', 60))
elif name == 'on_report':
events = ['report', 'broadcast']
def callback(_conn_string, conn_id, _name, event):
func(conn_id, event)
elif name == 'on_trace':
events = ['trace']
def callback(_conn_string, conn_id, _name, event):
func(conn_id, event)
elif name == 'on_disconnect':
events = ['disconnection']
def callback(_conn_string, conn_id, _name, _event):
func(self.id, conn_id)
else:
raise ArgumentError("Unknown callback type {}".format(name))
self._adapter.register_monitor([None], events, callback) | [
"Add a callback when device events happen.\n\n Args:\n name (str): currently support 'on_scan' and 'on_disconnect'\n func (callable): the function that should be called\n "
] |
Please provide a description of the function:def connect_async(self, conn_id, connection_string, callback):
future = self._loop.launch_coroutine(self._adapter.connect(conn_id, connection_string))
future.add_done_callback(lambda x: self._callback_future(conn_id, x, callback)) | [
"Asynchronously connect to a device."
] |
Please provide a description of the function:def disconnect_async(self, conn_id, callback):
future = self._loop.launch_coroutine(self._adapter.disconnect(conn_id))
future.add_done_callback(lambda x: self._callback_future(conn_id, x, callback)) | [
"Asynchronously disconnect from a device."
] |
Please provide a description of the function:def open_interface_async(self, conn_id, interface, callback, connection_string=None):
future = self._loop.launch_coroutine(self._adapter.open_interface(conn_id, interface))
future.add_done_callback(lambda x: self._callback_future(conn_id, x, callback)) | [
"Asynchronously connect to a device."
] |
Please provide a description of the function:def probe_async(self, callback):
future = self._loop.launch_coroutine(self._adapter.probe())
future.add_done_callback(lambda x: self._callback_future(None, x, callback)) | [
"Asynchronously connect to a device."
] |
Please provide a description of the function:def send_rpc_async(self, conn_id, address, rpc_id, payload, timeout, callback):
future = self._loop.launch_coroutine(self._adapter.send_rpc(conn_id, address, rpc_id, payload, timeout))
def format_response(future):
payload = None
exception = future.exception()
rpc_status = None
rpc_response = b''
failure = None
success = True
if exception is None:
payload = future.result()
rpc_status, rpc_response = pack_rpc_response(payload, exception)
elif isinstance(exception, (RPCInvalidIDError, TileNotFoundError, RPCNotFoundError,
RPCErrorCode, BusyRPCResponse)):
rpc_status, rpc_response = pack_rpc_response(payload, exception)
else:
success = False
failure = str(exception)
callback(conn_id, self.id, success, failure, rpc_status, rpc_response)
future.add_done_callback(format_response) | [
"Asynchronously send an RPC to this IOTile device."
] |
Please provide a description of the function:def debug_async(self, conn_id, cmd_name, cmd_args, progress_callback, callback):
def monitor_callback(_conn_string, _conn_id, _event_name, event):
if event.get('operation') != 'debug':
return
progress_callback(event.get('finished'), event.get('total'))
async def _install_monitor():
try:
conn_string = self._adapter._get_property(conn_id, 'connection_string')
return self._adapter.register_monitor([conn_string], ['progress'], monitor_callback)
except: #pylint:disable=bare-except;This is a legacy shim that must always ensure it doesn't raise.
self._logger.exception("Error installing debug progress monitor")
return None
monitor_id = self._loop.run_coroutine(_install_monitor())
if monitor_id is None:
callback(conn_id, self.id, False, 'could not install progress monitor', None)
return
future = self._loop.launch_coroutine(self._adapter.debug(conn_id, cmd_name, cmd_args))
def format_response(future):
ret_val = None
success = True
failure = None
if future.exception() is None:
ret_val = future.result()
else:
success = False
failure = str(future.exception())
self._adapter.remove_monitor(monitor_id)
callback(conn_id, self.id, success, ret_val, failure)
future.add_done_callback(format_response) | [
"Asynchronously complete a named debug command.\n\n The command name and arguments are passed to the underlying device adapter\n and interpreted there. If the command is long running, progress_callback\n may be used to provide status updates. Callback is called when the command\n has finished.\n\n Args:\n conn_id (int): A unique identifier that will refer to this connection\n cmd_name (string): the name of the debug command we want to invoke\n cmd_args (dict): any arguments that we want to send with this command.\n progress_callback (callable): A function to be called with status on our progress, called as:\n progress_callback(done_count, total_count)\n callback (callable): The callback that should be called when finished.\n "
] |
Please provide a description of the function:def send_script_async(self, conn_id, data, progress_callback, callback):
def monitor_callback(_conn_string, _conn_id, _event_name, event):
if event.get('operation') != 'script':
return
progress_callback(event.get('finished'), event.get('total'))
async def _install_monitor():
try:
conn_string = self._adapter._get_property(conn_id, 'connection_string')
return self._adapter.register_monitor([conn_string], ['progress'], monitor_callback)
except: #pylint:disable=bare-except;This is a legacy shim that must always ensure it doesn't raise.
self._logger.exception("Error installing script progress monitor")
return None
monitor_id = self._loop.run_coroutine(_install_monitor())
if monitor_id is None:
callback(conn_id, self.id, False, 'could not install progress monitor')
return
future = self._loop.launch_coroutine(self._adapter.send_script(conn_id, data))
future.add_done_callback(lambda x: self._callback_future(conn_id, x, callback, monitors=[monitor_id])) | [
"Asynchronously send a script to the device."
] |
Please provide a description of the function:def lock(self, key, client):
self.key = key
self.client = client | [
"Set the key that will be used to ensure messages come from one party\n\n Args:\n key (string): The key used to validate future messages\n client (string): A string that will be returned to indicate who\n locked this device.\n "
] |
Please provide a description of the function:def track_change(self, tile, property_name, value, formatter=None):
if not self.tracking:
return
if len(self._whitelist) > 0 and (tile, property_name) not in self._whitelist:
return
if formatter is None:
formatter = str
change = StateChange(monotonic(), tile, property_name, value, formatter(value))
with self._lock:
self.changes.append(change) | [
"Record that a change happened on a given tile's property.\n\n This will as a StateChange object to our list of changes if we\n are recording changes, otherwise, it will drop the change.\n\n Args:\n tile (int): The address of the tile that the change happened on.\n property_name (str): The name of the property that changed.\n value (object): The new value assigned to the property.\n formatter (callable): Optional function to convert value to a\n string. This function will only be called if track_changes()\n is enabled and `name` is on the whitelist for properties that\n should be tracked. If `formatter` is not passed or is None,\n it will default to `str`.\n "
] |
Please provide a description of the function:def dump(self, out_path, header=True):
# See https://stackoverflow.com/a/3348664/9739119 for why this is necessary
if sys.version_info[0] < 3:
mode = "wb"
else:
mode = "w"
with open(out_path, mode) as outfile:
writer = csv.writer(outfile, quoting=csv.QUOTE_MINIMAL)
if header:
writer.writerow(["Timestamp", "Tile Address", "Property Name", "Value"])
for entry in self.changes:
writer.writerow([entry.time, entry.tile, entry.property, entry.string_value]) | [
"Save this list of changes as a csv file at out_path.\n\n The format of the output file will be a CSV with 4 columns:\n timestamp, tile address, property, string_value\n\n There will be a single header row starting the CSV output unless\n header=False is passed.\n\n Args:\n out_path (str): The path where we should save our current list of\n changes.\n header (bool): Whether we should include a header row in the csv\n file. Defaults to True.\n "
] |
Please provide a description of the function:def PDFTeXLaTeXFunction(target = None, source= None, env=None):
basedir = os.path.split(str(source[0]))[0]
abspath = os.path.abspath(basedir)
if SCons.Tool.tex.is_LaTeX(source,env,abspath):
result = PDFLaTeXAuxAction(target,source,env)
if result != 0:
SCons.Tool.tex.check_file_error_message(env['PDFLATEX'])
else:
result = PDFTeXAction(target,source,env)
if result != 0:
SCons.Tool.tex.check_file_error_message(env['PDFTEX'])
return result | [
"A builder for TeX and LaTeX that scans the source file to\n decide the \"flavor\" of the source and then executes the appropriate\n program."
] |
Please provide a description of the function:def generate(env):
global PDFTeXAction
if PDFTeXAction is None:
PDFTeXAction = SCons.Action.Action('$PDFTEXCOM', '$PDFTEXCOMSTR')
global PDFLaTeXAction
if PDFLaTeXAction is None:
PDFLaTeXAction = SCons.Action.Action("$PDFLATEXCOM", "$PDFLATEXCOMSTR")
global PDFTeXLaTeXAction
if PDFTeXLaTeXAction is None:
PDFTeXLaTeXAction = SCons.Action.Action(PDFTeXLaTeXFunction,
strfunction=SCons.Tool.tex.TeXLaTeXStrFunction)
env.AppendUnique(LATEXSUFFIXES=SCons.Tool.LaTeXSuffixes)
from . import pdf
pdf.generate(env)
bld = env['BUILDERS']['PDF']
bld.add_action('.tex', PDFTeXLaTeXAction)
bld.add_emitter('.tex', SCons.Tool.tex.tex_pdf_emitter)
# Add the epstopdf builder after the pdftex builder
# so pdftex is the default for no source suffix
pdf.generate2(env)
SCons.Tool.tex.generate_common(env) | [
"Add Builders and construction variables for pdftex to an Environment."
] |
Please provide a description of the function:def start(self, channel):
super(TileBasedVirtualDevice, self).start(channel)
for tile in self._tiles.values():
tile.start(channel=channel) | [
"Start running this virtual device including any necessary worker threads.\n\n Args:\n channel (IOTilePushChannel): the channel with a stream and trace\n routine for streaming and tracing data through a VirtualInterface\n "
] |
Please provide a description of the function:def stop(self):
for tile in self._tiles.values():
tile.signal_stop()
for tile in self._tiles.values():
tile.wait_stopped()
super(TileBasedVirtualDevice, self).stop() | [
"Stop running this virtual device including any worker threads."
] |
Please provide a description of the function:def SetCacheMode(mode):
global cache_mode
if mode == "auto":
cache_mode = AUTO
elif mode == "force":
cache_mode = FORCE
elif mode == "cache":
cache_mode = CACHE
else:
raise ValueError("SCons.SConf.SetCacheMode: Unknown mode " + mode) | [
"Set the Configure cache mode. mode must be one of \"auto\", \"force\",\n or \"cache\"."
] |
Please provide a description of the function:def CreateConfigHBuilder(env):
action = SCons.Action.Action(_createConfigH,
_stringConfigH)
sconfigHBld = SCons.Builder.Builder(action=action)
env.Append( BUILDERS={'SConfigHBuilder':sconfigHBld} )
for k in list(_ac_config_hs.keys()):
env.SConfigHBuilder(k, env.Value(_ac_config_hs[k])) | [
"Called if necessary just before the building targets phase begins."
] |
Please provide a description of the function:def CheckHeader(context, header, include_quotes = '<>', language = None):
prog_prefix, hdr_to_check = \
createIncludesFromHeaders(header, 1, include_quotes)
res = SCons.Conftest.CheckHeader(context, hdr_to_check, prog_prefix,
language = language,
include_quotes = include_quotes)
context.did_show_result = 1
return not res | [
"\n A test for a C or C++ header file.\n "
] |
Please provide a description of the function:def CheckLib(context, library = None, symbol = "main",
header = None, language = None, autoadd = 1):
if library == []:
library = [None]
if not SCons.Util.is_List(library):
library = [library]
# ToDo: accept path for the library
res = SCons.Conftest.CheckLib(context, library, symbol, header = header,
language = language, autoadd = autoadd)
context.did_show_result = 1
return not res | [
"\n A test for a library. See also CheckLibWithHeader.\n Note that library may also be None to test whether the given symbol\n compiles without flags.\n "
] |
Please provide a description of the function:def CheckLibWithHeader(context, libs, header, language,
call = None, autoadd = 1):
# ToDo: accept path for library. Support system header files.
prog_prefix, dummy = \
createIncludesFromHeaders(header, 0)
if libs == []:
libs = [None]
if not SCons.Util.is_List(libs):
libs = [libs]
res = SCons.Conftest.CheckLib(context, libs, None, prog_prefix,
call = call, language = language, autoadd = autoadd)
context.did_show_result = 1
return not res | [
"\n Another (more sophisticated) test for a library.\n Checks, if library and header is available for language (may be 'C'\n or 'CXX'). Call maybe be a valid expression _with_ a trailing ';'.\n As in CheckLib, we support library=None, to test if the call compiles\n without extra link flags.\n "
] |
Please provide a description of the function:def CheckProg(context, prog_name):
res = SCons.Conftest.CheckProg(context, prog_name)
context.did_show_result = 1
return res | [
"Simple check if a program exists in the path. Returns the path\n for the application, or None if not found.\n "
] |
Please provide a description of the function:def display_cached_string(self, bi):
if not isinstance(bi, SConfBuildInfo):
SCons.Warnings.warn(SConfWarning,
"The stored build information has an unexpected class: %s" % bi.__class__)
else:
self.display("The original builder output was:\n" +
(" |" + str(bi.string)).replace("\n", "\n |")) | [
"\n Logs the original builder messages, given the SConfBuildInfo instance\n bi.\n "
] |
Please provide a description of the function:def Define(self, name, value = None, comment = None):
lines = []
if comment:
comment_str = "/* %s */" % comment
lines.append(comment_str)
if value is not None:
define_str = "#define %s %s" % (name, value)
else:
define_str = "#define %s" % name
lines.append(define_str)
lines.append('')
self.config_h_text = self.config_h_text + '\n'.join(lines) | [
"\n Define a pre processor symbol name, with the optional given value in the\n current config header.\n\n If value is None (default), then #define name is written. If value is not\n none, then #define name value is written.\n\n comment is a string which will be put as a C comment in the header, to explain the meaning of the value\n (appropriate C comments will be added automatically).\n "
] |
Please provide a description of the function:def BuildNodes(self, nodes):
if self.logstream is not None:
# override stdout / stderr to write in log file
oldStdout = sys.stdout
sys.stdout = self.logstream
oldStderr = sys.stderr
sys.stderr = self.logstream
# the engine assumes the current path is the SConstruct directory ...
old_fs_dir = SConfFS.getcwd()
old_os_dir = os.getcwd()
SConfFS.chdir(SConfFS.Top, change_os_dir=1)
# Because we take responsibility here for writing out our
# own .sconsign info (see SConfBuildTask.execute(), above),
# we override the store_info() method with a null place-holder
# so we really control how it gets written.
for n in nodes:
n.store_info = 0
if not hasattr(n, 'attributes'):
n.attributes = SCons.Node.Node.Attrs()
n.attributes.keep_targetinfo = 1
ret = 1
try:
# ToDo: use user options for calc
save_max_drift = SConfFS.get_max_drift()
SConfFS.set_max_drift(0)
tm = SCons.Taskmaster.Taskmaster(nodes, SConfBuildTask)
# we don't want to build tests in parallel
jobs = SCons.Job.Jobs(1, tm )
jobs.run()
for n in nodes:
state = n.get_state()
if (state != SCons.Node.executed and
state != SCons.Node.up_to_date):
# the node could not be built. we return 0 in this case
ret = 0
finally:
SConfFS.set_max_drift(save_max_drift)
os.chdir(old_os_dir)
SConfFS.chdir(old_fs_dir, change_os_dir=0)
if self.logstream is not None:
# restore stdout / stderr
sys.stdout = oldStdout
sys.stderr = oldStderr
return ret | [
"\n Tries to build the given nodes immediately. Returns 1 on success,\n 0 on error.\n "
] |
Please provide a description of the function:def pspawn_wrapper(self, sh, escape, cmd, args, env):
return self.pspawn(sh, escape, cmd, args, env, self.logstream, self.logstream) | [
"Wrapper function for handling piped spawns.\n\n This looks to the calling interface (in Action.py) like a \"normal\"\n spawn, but associates the call with the PSPAWN variable from\n the construction environment and with the streams to which we\n want the output logged. This gets slid into the construction\n environment as the SPAWN variable so Action.py doesn't have to\n know or care whether it's spawning a piped command or not.\n "
] |
Please provide a description of the function:def TryBuild(self, builder, text = None, extension = ""):
global _ac_build_counter
# Make sure we have a PSPAWN value, and save the current
# SPAWN value.
try:
self.pspawn = self.env['PSPAWN']
except KeyError:
raise SCons.Errors.UserError('Missing PSPAWN construction variable.')
try:
save_spawn = self.env['SPAWN']
except KeyError:
raise SCons.Errors.UserError('Missing SPAWN construction variable.')
nodesToBeBuilt = []
f = "conftest_" + str(_ac_build_counter)
pref = self.env.subst( builder.builder.prefix )
suff = self.env.subst( builder.builder.suffix )
target = self.confdir.File(pref + f + suff)
try:
# Slide our wrapper into the construction environment as
# the SPAWN function.
self.env['SPAWN'] = self.pspawn_wrapper
sourcetext = self.env.Value(text)
if text is not None:
textFile = self.confdir.File(f + extension)
textFileNode = self.env.SConfSourceBuilder(target=textFile,
source=sourcetext)
nodesToBeBuilt.extend(textFileNode)
source = textFileNode
else:
source = None
nodes = builder(target = target, source = source)
if not SCons.Util.is_List(nodes):
nodes = [nodes]
nodesToBeBuilt.extend(nodes)
result = self.BuildNodes(nodesToBeBuilt)
finally:
self.env['SPAWN'] = save_spawn
_ac_build_counter = _ac_build_counter + 1
if result:
self.lastTarget = nodes[0]
else:
self.lastTarget = None
return result | [
"Low level TryBuild implementation. Normally you don't need to\n call that - you can use TryCompile / TryLink / TryRun instead\n "
] |
Please provide a description of the function:def TryAction(self, action, text = None, extension = ""):
builder = SCons.Builder.Builder(action=action)
self.env.Append( BUILDERS = {'SConfActionBuilder' : builder} )
ok = self.TryBuild(self.env.SConfActionBuilder, text, extension)
del self.env['BUILDERS']['SConfActionBuilder']
if ok:
outputStr = self.lastTarget.get_contents().decode()
return (1, outputStr)
return (0, "") | [
"Tries to execute the given action with optional source file\n contents <text> and optional source file extension <extension>,\n Returns the status (0 : failed, 1 : ok) and the contents of the\n output file.\n "
] |
Please provide a description of the function:def TryCompile( self, text, extension):
return self.TryBuild(self.env.Object, text, extension) | [
"Compiles the program given in text to an env.Object, using extension\n as file extension (e.g. '.c'). Returns 1, if compilation was\n successful, 0 otherwise. The target is saved in self.lastTarget (for\n further processing).\n "
] |
Please provide a description of the function:def TryLink( self, text, extension ):
return self.TryBuild(self.env.Program, text, extension ) | [
"Compiles the program given in text to an executable env.Program,\n using extension as file extension (e.g. '.c'). Returns 1, if\n compilation was successful, 0 otherwise. The target is saved in\n self.lastTarget (for further processing).\n "
] |
Please provide a description of the function:def TryRun(self, text, extension ):
ok = self.TryLink(text, extension)
if( ok ):
prog = self.lastTarget
pname = prog.get_internal_path()
output = self.confdir.File(os.path.basename(pname)+'.out')
node = self.env.Command(output, prog, [ [ pname, ">", "${TARGET}"] ])
ok = self.BuildNodes(node)
if ok:
outputStr = SCons.Util.to_str(output.get_contents())
return( 1, outputStr)
return (0, "") | [
"Compiles and runs the program given in text, using extension\n as file extension (e.g. '.c'). Returns (1, outputStr) on success,\n (0, '') otherwise. The target (a file containing the program's stdout)\n is saved in self.lastTarget (for further processing).\n "
] |
Please provide a description of the function:def _shutdown(self):
global sconf_global, _ac_config_hs
if not self.active:
raise SCons.Errors.UserError("Finish may be called only once!")
if self.logstream is not None and not dryrun:
self.logstream.write("\n")
self.logstream.close()
self.logstream = None
# remove the SConfSourceBuilder from the environment
blds = self.env['BUILDERS']
del blds['SConfSourceBuilder']
self.env.Replace( BUILDERS=blds )
self.active = 0
sconf_global = None
if not self.config_h is None:
_ac_config_hs[self.config_h] = self.config_h_text
self.env.fs = self.lastEnvFs | [
"Private method. Reset to non-piped spawn"
] |
Please provide a description of the function:def Message(self, text):
self.Display(text)
self.sconf.cached = 1
self.did_show_result = 0 | [
"Inform about what we are doing right now, e.g.\n 'Checking for SOMETHING ... '\n "
] |
Please provide a description of the function:def Result(self, res):
if isinstance(res, str):
text = res
elif res:
text = "yes"
else:
text = "no"
if self.did_show_result == 0:
# Didn't show result yet, do it now.
self.Display(text + "\n")
self.did_show_result = 1 | [
"Inform about the result of the test. If res is not a string, displays\n 'yes' or 'no' depending on whether res is evaluated as true or false.\n The result is only displayed when self.did_show_result is not set.\n "
] |
Please provide a description of the function:def linux_ver_normalize(vstr):
# Check for version number like 9.1.026: return 91.026
# XXX needs to be updated for 2011+ versions (like 2011.11.344 which is compiler v12.1.5)
m = re.match(r'([0-9]+)\.([0-9]+)\.([0-9]+)', vstr)
if m:
vmaj,vmin,build = m.groups()
return float(vmaj) * 10. + float(vmin) + float(build) / 1000.;
else:
f = float(vstr)
if is_windows:
return f
else:
if f < 60: return f * 10.0
else: return f | [
"Normalize a Linux compiler version number.\n Intel changed from \"80\" to \"9.0\" in 2005, so we assume if the number\n is greater than 60 it's an old-style number and otherwise new-style.\n Always returns an old-style float like 80 or 90 for compatibility with Windows.\n Shades of Y2K!"
] |
Please provide a description of the function:def check_abi(abi):
if not abi:
return None
abi = abi.lower()
# valid_abis maps input name to canonical name
if is_windows:
valid_abis = {'ia32' : 'ia32',
'x86' : 'ia32',
'ia64' : 'ia64',
'em64t' : 'em64t',
'amd64' : 'em64t'}
if is_linux:
valid_abis = {'ia32' : 'ia32',
'x86' : 'ia32',
'x86_64' : 'x86_64',
'em64t' : 'x86_64',
'amd64' : 'x86_64'}
if is_mac:
valid_abis = {'ia32' : 'ia32',
'x86' : 'ia32',
'x86_64' : 'x86_64',
'em64t' : 'x86_64'}
try:
abi = valid_abis[abi]
except KeyError:
raise SCons.Errors.UserError("Intel compiler: Invalid ABI %s, valid values are %s"% \
(abi, list(valid_abis.keys())))
return abi | [
"Check for valid ABI (application binary interface) name,\n and map into canonical one"
] |
Please provide a description of the function:def get_version_from_list(v, vlist):
if is_windows:
# Simple case, just find it in the list
if v in vlist: return v
else: return None
else:
# Fuzzy match: normalize version number first, but still return
# original non-normalized form.
fuzz = 0.001
for vi in vlist:
if math.fabs(linux_ver_normalize(vi) - linux_ver_normalize(v)) < fuzz:
return vi
# Not found
return None | [
"See if we can match v (string) in vlist (list of strings)\n Linux has to match in a fuzzy way."
] |
Please provide a description of the function:def get_intel_registry_value(valuename, version=None, abi=None):
# Open the key:
if is_win64:
K = 'Software\\Wow6432Node\\Intel\\Compilers\\C++\\' + version + '\\'+abi.upper()
else:
K = 'Software\\Intel\\Compilers\\C++\\' + version + '\\'+abi.upper()
try:
k = SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_LOCAL_MACHINE, K)
except SCons.Util.RegError:
# For version 13 and later, check UUID subkeys for valuename
if is_win64:
K = 'Software\\Wow6432Node\\Intel\\Suites\\' + version + "\\Defaults\\C++\\" + abi.upper()
else:
K = 'Software\\Intel\\Suites\\' + version + "\\Defaults\\C++\\" + abi.upper()
try:
k = SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_LOCAL_MACHINE, K)
uuid = SCons.Util.RegQueryValueEx(k, 'SubKey')[0]
if is_win64:
K = 'Software\\Wow6432Node\\Intel\\Suites\\' + version + "\\" + uuid + "\\C++"
else:
K = 'Software\\Intel\\Suites\\' + version + "\\" + uuid + "\\C++"
k = SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_LOCAL_MACHINE, K)
try:
v = SCons.Util.RegQueryValueEx(k, valuename)[0]
return v # or v.encode('iso-8859-1', 'replace') to remove unicode?
except SCons.Util.RegError:
if abi.upper() == 'EM64T':
abi = 'em64t_native'
if is_win64:
K = 'Software\\Wow6432Node\\Intel\\Suites\\' + version + "\\" + uuid + "\\C++\\" + abi.upper()
else:
K = 'Software\\Intel\\Suites\\' + version + "\\" + uuid + "\\C++\\" + abi.upper()
k = SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_LOCAL_MACHINE, K)
try:
v = SCons.Util.RegQueryValueEx(k, valuename)[0]
return v # or v.encode('iso-8859-1', 'replace') to remove unicode?
except SCons.Util.RegError:
raise MissingRegistryError("%s was not found in the registry, for Intel compiler version %s, abi='%s'"%(K, version,abi))
except SCons.Util.RegError:
raise MissingRegistryError("%s was not found in the registry, for Intel compiler version %s, abi='%s'"%(K, version,abi))
except SCons.Util.WinError:
raise MissingRegistryError("%s was not found in the registry, for Intel compiler version %s, abi='%s'"%(K, version,abi))
# Get the value:
try:
v = SCons.Util.RegQueryValueEx(k, valuename)[0]
return v # or v.encode('iso-8859-1', 'replace') to remove unicode?
except SCons.Util.RegError:
raise MissingRegistryError("%s\\%s was not found in the registry."%(K, valuename)) | [
"\n Return a value from the Intel compiler registry tree. (Windows only)\n "
] |
Please provide a description of the function:def get_all_compiler_versions():
versions=[]
if is_windows:
if is_win64:
keyname = 'Software\\WoW6432Node\\Intel\\Compilers\\C++'
else:
keyname = 'Software\\Intel\\Compilers\\C++'
try:
k = SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_LOCAL_MACHINE,
keyname)
except SCons.Util.WinError:
# For version 13 or later, check for default instance UUID
if is_win64:
keyname = 'Software\\WoW6432Node\\Intel\\Suites'
else:
keyname = 'Software\\Intel\\Suites'
try:
k = SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_LOCAL_MACHINE,
keyname)
except SCons.Util.WinError:
return []
i = 0
versions = []
try:
while i < 100:
subkey = SCons.Util.RegEnumKey(k, i) # raises EnvironmentError
# Check that this refers to an existing dir.
# This is not 100% perfect but should catch common
# installation issues like when the compiler was installed
# and then the install directory deleted or moved (rather
# than uninstalling properly), so the registry values
# are still there.
if subkey == 'Defaults': # Ignore default instances
i = i + 1
continue
ok = False
for try_abi in ('IA32', 'IA32e', 'IA64', 'EM64T'):
try:
d = get_intel_registry_value('ProductDir', subkey, try_abi)
except MissingRegistryError:
continue # not found in reg, keep going
if os.path.exists(d): ok = True
if ok:
versions.append(subkey)
else:
try:
# Registry points to nonexistent dir. Ignore this
# version.
value = get_intel_registry_value('ProductDir', subkey, 'IA32')
except MissingRegistryError as e:
# Registry key is left dangling (potentially
# after uninstalling).
print("scons: *** Ignoring the registry key for the Intel compiler version %s.\n" \
"scons: *** It seems that the compiler was uninstalled and that the registry\n" \
"scons: *** was not cleaned up properly.\n" % subkey)
else:
print("scons: *** Ignoring "+str(value))
i = i + 1
except EnvironmentError:
# no more subkeys
pass
elif is_linux or is_mac:
for d in glob.glob('/opt/intel_cc_*'):
# Typical dir here is /opt/intel_cc_80.
m = re.search(r'cc_(.*)$', d)
if m:
versions.append(m.group(1))
for d in glob.glob('/opt/intel/cc*/*'):
# Typical dir here is /opt/intel/cc/9.0 for IA32,
# /opt/intel/cce/9.0 for EMT64 (AMD64)
m = re.search(r'([0-9][0-9.]*)$', d)
if m:
versions.append(m.group(1))
for d in glob.glob('/opt/intel/Compiler/*'):
# Typical dir here is /opt/intel/Compiler/11.1
m = re.search(r'([0-9][0-9.]*)$', d)
if m:
versions.append(m.group(1))
for d in glob.glob('/opt/intel/composerxe-*'):
# Typical dir here is /opt/intel/composerxe-2011.4.184
m = re.search(r'([0-9][0-9.]*)$', d)
if m:
versions.append(m.group(1))
for d in glob.glob('/opt/intel/composer_xe_*'):
# Typical dir here is /opt/intel/composer_xe_2011_sp1.11.344
# The _sp1 is useless, the installers are named 2011.9.x, 2011.10.x, 2011.11.x
m = re.search(r'([0-9]{0,4})(?:_sp\d*)?\.([0-9][0-9.]*)$', d)
if m:
versions.append("%s.%s"%(m.group(1), m.group(2)))
for d in glob.glob('/opt/intel/compilers_and_libraries_*'):
# JPA: For the new version of Intel compiler 2016.1.
m = re.search(r'([0-9]{0,4})(?:_sp\d*)?\.([0-9][0-9.]*)$', d)
if m:
versions.append("%s.%s"%(m.group(1), m.group(2)))
def keyfunc(str):
return [int(x) for x in str.split('.')]
# split into ints, sort, then remove dups
return sorted(SCons.Util.unique(versions), key=keyfunc, reverse=True) | [
"Returns a sorted list of strings, like \"70\" or \"80\" or \"9.0\"\n with most recent compiler version first.\n ",
"Given a dot-separated version string, return a tuple of ints representing it."
] |
Please provide a description of the function:def get_intel_compiler_top(version, abi):
if is_windows:
if not SCons.Util.can_read_reg:
raise NoRegistryModuleError("No Windows registry module was found")
top = get_intel_registry_value('ProductDir', version, abi)
archdir={'x86_64': 'intel64',
'amd64' : 'intel64',
'em64t' : 'intel64',
'x86' : 'ia32',
'i386' : 'ia32',
'ia32' : 'ia32'
}[abi] # for v11 and greater
# pre-11, icl was in Bin. 11 and later, it's in Bin/<abi> apparently.
if not os.path.exists(os.path.join(top, "Bin", "icl.exe")) \
and not os.path.exists(os.path.join(top, "Bin", abi, "icl.exe")) \
and not os.path.exists(os.path.join(top, "Bin", archdir, "icl.exe")):
raise MissingDirError("Can't find Intel compiler in %s"%(top))
elif is_mac or is_linux:
def find_in_2008style_dir(version):
# first dir is new (>=9.0) style, second is old (8.0) style.
dirs=('/opt/intel/cc/%s', '/opt/intel_cc_%s')
if abi == 'x86_64':
dirs=('/opt/intel/cce/%s',) # 'e' stands for 'em64t', aka x86_64 aka amd64
top=None
for d in dirs:
if os.path.exists(os.path.join(d%version, "bin", "icc")):
top = d%version
break
return top
def find_in_2010style_dir(version):
dirs=('/opt/intel/Compiler/%s/*'%version)
# typically /opt/intel/Compiler/11.1/064 (then bin/intel64/icc)
dirs=glob.glob(dirs)
# find highest sub-version number by reverse sorting and picking first existing one.
dirs.sort()
dirs.reverse()
top=None
for d in dirs:
if (os.path.exists(os.path.join(d, "bin", "ia32", "icc")) or
os.path.exists(os.path.join(d, "bin", "intel64", "icc"))):
top = d
break
return top
def find_in_2011style_dir(version):
# The 2011 (compiler v12) dirs are inconsistent, so just redo the search from
# get_all_compiler_versions and look for a match (search the newest form first)
top=None
for d in glob.glob('/opt/intel/composer_xe_*'):
# Typical dir here is /opt/intel/composer_xe_2011_sp1.11.344
# The _sp1 is useless, the installers are named 2011.9.x, 2011.10.x, 2011.11.x
m = re.search(r'([0-9]{0,4})(?:_sp\d*)?\.([0-9][0-9.]*)$', d)
if m:
cur_ver = "%s.%s"%(m.group(1), m.group(2))
if cur_ver == version and \
(os.path.exists(os.path.join(d, "bin", "ia32", "icc")) or
os.path.exists(os.path.join(d, "bin", "intel64", "icc"))):
top = d
break
if not top:
for d in glob.glob('/opt/intel/composerxe-*'):
# Typical dir here is /opt/intel/composerxe-2011.4.184
m = re.search(r'([0-9][0-9.]*)$', d)
if m and m.group(1) == version and \
(os.path.exists(os.path.join(d, "bin", "ia32", "icc")) or
os.path.exists(os.path.join(d, "bin", "intel64", "icc"))):
top = d
break
return top
def find_in_2016style_dir(version):
# The 2016 (compiler v16) dirs are inconsistent from previous.
top = None
for d in glob.glob('/opt/intel/compilers_and_libraries_%s/linux'%version):
if os.path.exists(os.path.join(d, "bin", "ia32", "icc")) or os.path.exists(os.path.join(d, "bin", "intel64", "icc")):
top = d
break
return top
top = find_in_2016style_dir(version) or find_in_2011style_dir(version) or find_in_2010style_dir(version) or find_in_2008style_dir(version)
# print "INTELC: top=",top
if not top:
raise MissingDirError("Can't find version %s Intel compiler in %s (abi='%s')"%(version,top, abi))
return top | [
"\n Return the main path to the top-level dir of the Intel compiler,\n using the given version.\n The compiler will be in <top>/bin/icl.exe (icc on linux),\n the include dir is <top>/include, etc.\n "
] |
Please provide a description of the function:def generate(env, version=None, abi=None, topdir=None, verbose=0):
if not (is_mac or is_linux or is_windows):
# can't handle this platform
return
if is_windows:
SCons.Tool.msvc.generate(env)
elif is_linux:
SCons.Tool.gcc.generate(env)
elif is_mac:
SCons.Tool.gcc.generate(env)
# if version is unspecified, use latest
vlist = get_all_compiler_versions()
if not version:
if vlist:
version = vlist[0]
else:
# User may have specified '90' but we need to get actual dirname '9.0'.
# get_version_from_list does that mapping.
v = get_version_from_list(version, vlist)
if not v:
raise SCons.Errors.UserError("Invalid Intel compiler version %s: "%version + \
"installed versions are %s"%(', '.join(vlist)))
version = v
# if abi is unspecified, use ia32
# alternatives are ia64 for Itanium, or amd64 or em64t or x86_64 (all synonyms here)
abi = check_abi(abi)
if abi is None:
if is_mac or is_linux:
# Check if we are on 64-bit linux, default to 64 then.
uname_m = os.uname()[4]
if uname_m == 'x86_64':
abi = 'x86_64'
else:
abi = 'ia32'
else:
if is_win64:
abi = 'em64t'
else:
abi = 'ia32'
if version and not topdir:
try:
topdir = get_intel_compiler_top(version, abi)
except (SCons.Util.RegError, IntelCError):
topdir = None
if not topdir:
# Normally this is an error, but it might not be if the compiler is
# on $PATH and the user is importing their env.
class ICLTopDirWarning(SCons.Warnings.Warning):
pass
if (is_mac or is_linux) and not env.Detect('icc') or \
is_windows and not env.Detect('icl'):
SCons.Warnings.enableWarningClass(ICLTopDirWarning)
SCons.Warnings.warn(ICLTopDirWarning,
"Failed to find Intel compiler for version='%s', abi='%s'"%
(str(version), str(abi)))
else:
# should be cleaned up to say what this other version is
# since in this case we have some other Intel compiler installed
SCons.Warnings.enableWarningClass(ICLTopDirWarning)
SCons.Warnings.warn(ICLTopDirWarning,
"Can't find Intel compiler top dir for version='%s', abi='%s'"%
(str(version), str(abi)))
if topdir:
archdir={'x86_64': 'intel64',
'amd64' : 'intel64',
'em64t' : 'intel64',
'x86' : 'ia32',
'i386' : 'ia32',
'ia32' : 'ia32'
}[abi] # for v11 and greater
if os.path.exists(os.path.join(topdir, 'bin', archdir)):
bindir="bin/%s"%archdir
libdir="lib/%s"%archdir
else:
bindir="bin"
libdir="lib"
if verbose:
print("Intel C compiler: using version %s (%g), abi %s, in '%s/%s'"%\
(repr(version), linux_ver_normalize(version),abi,topdir,bindir))
if is_linux:
# Show the actual compiler version by running the compiler.
os.system('%s/%s/icc --version'%(topdir,bindir))
if is_mac:
# Show the actual compiler version by running the compiler.
os.system('%s/%s/icc --version'%(topdir,bindir))
env['INTEL_C_COMPILER_TOP'] = topdir
if is_linux:
paths={'INCLUDE' : 'include',
'LIB' : libdir,
'PATH' : bindir,
'LD_LIBRARY_PATH' : libdir}
for p in list(paths.keys()):
env.PrependENVPath(p, os.path.join(topdir, paths[p]))
if is_mac:
paths={'INCLUDE' : 'include',
'LIB' : libdir,
'PATH' : bindir,
'LD_LIBRARY_PATH' : libdir}
for p in list(paths.keys()):
env.PrependENVPath(p, os.path.join(topdir, paths[p]))
if is_windows:
# env key reg valname default subdir of top
paths=(('INCLUDE', 'IncludeDir', 'Include'),
('LIB' , 'LibDir', 'Lib'),
('PATH' , 'BinDir', 'Bin'))
# We are supposed to ignore version if topdir is set, so set
# it to the emptry string if it's not already set.
if version is None:
version = ''
# Each path has a registry entry, use that or default to subdir
for p in paths:
try:
path=get_intel_registry_value(p[1], version, abi)
# These paths may have $(ICInstallDir)
# which needs to be substituted with the topdir.
path=path.replace('$(ICInstallDir)', topdir + os.sep)
except IntelCError:
# Couldn't get it from registry: use default subdir of topdir
env.PrependENVPath(p[0], os.path.join(topdir, p[2]))
else:
env.PrependENVPath(p[0], path.split(os.pathsep))
# print "ICL %s: %s, final=%s"%(p[0], path, str(env['ENV'][p[0]]))
if is_windows:
env['CC'] = 'icl'
env['CXX'] = 'icl'
env['LINK'] = 'xilink'
else:
env['CC'] = 'icc'
env['CXX'] = 'icpc'
# Don't reset LINK here;
# use smart_link which should already be here from link.py.
#env['LINK'] = '$CC'
env['AR'] = 'xiar'
env['LD'] = 'xild' # not used by default
# This is not the exact (detailed) compiler version,
# just the major version as determined above or specified
# by the user. It is a float like 80 or 90, in normalized form for Linux
# (i.e. even for Linux 9.0 compiler, still returns 90 rather than 9.0)
if version:
env['INTEL_C_COMPILER_VERSION']=linux_ver_normalize(version)
if is_windows:
# Look for license file dir
# in system environment, registry, and default location.
envlicdir = os.environ.get("INTEL_LICENSE_FILE", '')
K = ('SOFTWARE\Intel\Licenses')
try:
k = SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_LOCAL_MACHINE, K)
reglicdir = SCons.Util.RegQueryValueEx(k, "w_cpp")[0]
except (AttributeError, SCons.Util.RegError):
reglicdir = ""
defaultlicdir = r'C:\Program Files\Common Files\Intel\Licenses'
licdir = None
for ld in [envlicdir, reglicdir]:
# If the string contains an '@', then assume it's a network
# license (port@system) and good by definition.
if ld and (ld.find('@') != -1 or os.path.exists(ld)):
licdir = ld
break
if not licdir:
licdir = defaultlicdir
if not os.path.exists(licdir):
class ICLLicenseDirWarning(SCons.Warnings.Warning):
pass
SCons.Warnings.enableWarningClass(ICLLicenseDirWarning)
SCons.Warnings.warn(ICLLicenseDirWarning,
"Intel license dir was not found."
" Tried using the INTEL_LICENSE_FILE environment variable (%s), the registry (%s) and the default path (%s)."
" Using the default path as a last resort."
% (envlicdir, reglicdir, defaultlicdir))
env['ENV']['INTEL_LICENSE_FILE'] = licdir | [
"Add Builders and construction variables for Intel C/C++ compiler\n to an Environment.\n args:\n version: (string) compiler version to use, like \"80\"\n abi: (string) 'win32' or whatever Itanium version wants\n topdir: (string) compiler top dir, like\n \"c:\\Program Files\\Intel\\Compiler70\"\n If topdir is used, version and abi are ignored.\n verbose: (int) if >0, prints compiler version used.\n "
] |
Please provide a description of the function:def parse_node_descriptor(desc, model):
try:
data = graph_node.parseString(desc)
except ParseException:
raise # TODO: Fix this to properly encapsulate the parse error
stream_desc = u' '.join(data['node'])
stream = DataStream.FromString(stream_desc)
node = SGNode(stream, model)
inputs = []
if 'input_a' in data:
input_a = data['input_a']
stream_a = DataStreamSelector.FromString(u' '.join(input_a['input_stream']))
trigger_a = None
if 'type' in input_a:
trigger_a = InputTrigger(input_a['type'], input_a['op'], int(input_a['reference'], 0))
inputs.append((stream_a, trigger_a))
if 'input_b' in data:
input_a = data['input_b']
stream_a = DataStreamSelector.FromString(u' '.join(input_a['input_stream']))
trigger_a = None
if 'type' in input_a:
trigger_a = InputTrigger(input_a['type'], input_a['op'], int(input_a['reference'], 0))
inputs.append((stream_a, trigger_a))
if 'combiner' in data and str(data['combiner']) == u'||':
node.trigger_combiner = SGNode.OrTriggerCombiner
else:
node.trigger_combiner = SGNode.AndTriggerCombiner
processing = data['processor']
return node, inputs, processing | [
"Parse a string node descriptor.\n\n The function creates an SGNode object without connecting its inputs and outputs\n and returns a 3-tuple:\n\n SGNode, [(input X, trigger X)], <processing function name>\n\n Args:\n desc (str): A description of the node to be created.\n model (str): A device model for the node to be created that sets any\n device specific limits on how the node is set up.\n "
] |
Please provide a description of the function:def create_binary_descriptor(descriptor):
func_names = {0: 'copy_latest_a', 1: 'average_a',
2: 'copy_all_a', 3: 'sum_a',
4: 'copy_count_a', 5: 'trigger_streamer',
6: 'call_rpc', 7: 'subtract_afromb'}
func_codes = {y: x for x, y in func_names.items()}
node, inputs, processing = parse_node_descriptor(descriptor, DeviceModel())
func_code = func_codes.get(processing)
if func_code is None:
raise ArgumentError("Unknown processing function", function=processing)
stream_a, trigger_a = inputs[0]
stream_a = stream_a.encode()
if len(inputs) == 2:
stream_b, trigger_b = inputs[1]
stream_b = stream_b.encode()
else:
stream_b, trigger_b = 0xFFFF, None
if trigger_a is None:
trigger_a = TrueTrigger()
if trigger_b is None:
trigger_b = TrueTrigger()
ref_a = 0
if isinstance(trigger_a, InputTrigger):
ref_a = trigger_a.reference
ref_b = 0
if isinstance(trigger_b, InputTrigger):
ref_b = trigger_b.reference
trigger_a = _create_binary_trigger(trigger_a)
trigger_b = _create_binary_trigger(trigger_b)
combiner = node.trigger_combiner
bin_desc = struct.pack("<LLHHHBBBB2x", ref_a, ref_b, node.stream.encode(), stream_a, stream_b, func_code, trigger_a, trigger_b, combiner)
return bin_desc | [
"Convert a string node descriptor into a 20-byte binary descriptor.\n\n This is the inverse operation of parse_binary_descriptor and composing\n the two operations is a noop.\n\n Args:\n descriptor (str): A string node descriptor\n\n Returns:\n bytes: A 20-byte binary node descriptor.\n "
] |
Please provide a description of the function:def parse_binary_descriptor(bindata):
func_names = {0: 'copy_latest_a', 1: 'average_a',
2: 'copy_all_a', 3: 'sum_a',
4: 'copy_count_a', 5: 'trigger_streamer',
6: 'call_rpc', 7: 'subtract_afromb'}
if len(bindata) != 20:
raise ArgumentError("Invalid binary node descriptor with incorrect size", size=len(bindata), expected=20, bindata=bindata)
a_trig, b_trig, stream_id, a_id, b_id, proc, a_cond, b_cond, trig_combiner = struct.unpack("<LLHHHBBBB2x", bindata)
node_stream = DataStream.FromEncoded(stream_id)
if a_id == 0xFFFF:
raise ArgumentError("Invalid binary node descriptor with invalid first input", input_selector=a_id)
a_selector = DataStreamSelector.FromEncoded(a_id)
a_trigger = _process_binary_trigger(a_trig, a_cond)
b_selector = None
b_trigger = None
if b_id != 0xFFFF:
b_selector = DataStreamSelector.FromEncoded(b_id)
b_trigger = _process_binary_trigger(b_trig, b_cond)
if trig_combiner == SGNode.AndTriggerCombiner:
comb = '&&'
elif trig_combiner == SGNode.OrTriggerCombiner:
comb = '||'
else:
raise ArgumentError("Invalid trigger combiner in binary node descriptor", combiner=trig_combiner)
if proc not in func_names:
raise ArgumentError("Unknown processing function", function_id=proc, known_functions=func_names)
func_name = func_names[proc]
# Handle one input nodes
if b_selector is None:
return '({} {}) => {} using {}'.format(a_selector, a_trigger, node_stream, func_name)
return '({} {} {} {} {}) => {} using {}'.format(a_selector, a_trigger, comb,
b_selector, b_trigger,
node_stream, func_name) | [
"Convert a binary node descriptor into a string descriptor.\n\n Binary node descriptor are 20-byte binary structures that encode all\n information needed to create a graph node. They are used to communicate\n that information to an embedded device in an efficent format. This\n function exists to turn such a compressed node description back into\n an understandable string.\n\n Args:\n bindata (bytes): The raw binary structure that contains the node\n description.\n\n Returns:\n str: The corresponding string description of the same sensor_graph node\n "
] |
Please provide a description of the function:def _process_binary_trigger(trigger_value, condition):
ops = {
0: ">",
1: "<",
2: ">=",
3: "<=",
4: "==",
5: 'always'
}
sources = {
0: 'value',
1: 'count'
}
encoded_source = condition & 0b1
encoded_op = condition >> 1
oper = ops.get(encoded_op, None)
source = sources.get(encoded_source, None)
if oper is None:
raise ArgumentError("Unknown operation in binary trigger", condition=condition, operation=encoded_op, known_ops=ops)
if source is None:
raise ArgumentError("Unknown value source in binary trigger", source=source, known_sources=sources)
if oper == 'always':
return TrueTrigger()
return InputTrigger(source, oper, trigger_value) | [
"Create an InputTrigger object."
] |
Please provide a description of the function:def _create_binary_trigger(trigger):
ops = {
0: ">",
1: "<",
2: ">=",
3: "<=",
4: "==",
5: 'always'
}
op_codes = {y: x for x, y in ops.items()}
source = 0
if isinstance(trigger, TrueTrigger):
op_code = op_codes['always']
elif isinstance(trigger, FalseTrigger):
raise ArgumentError("Cannot express a never trigger in binary descriptor", trigger=trigger)
else:
op_code = op_codes[trigger.comp_string]
if trigger.use_count:
source = 1
return (op_code << 1) | source | [
"Create an 8-bit binary trigger from an InputTrigger, TrueTrigger, FalseTrigger."
] |
Please provide a description of the function:def _try_assign_utc_time(self, raw_time, time_base):
# Check if the raw time is encoded UTC since y2k or just uptime
if raw_time != IOTileEvent.InvalidRawTime and (raw_time & (1 << 31)):
y2k_offset = self.raw_time ^ (1 << 31)
return self._Y2KReference + datetime.timedelta(seconds=y2k_offset)
if time_base is not None:
return time_base + datetime.timedelta(seconds=raw_time)
return None | [
"Try to assign a UTC time to this reading."
] |
Please provide a description of the function:def asdict(self):
timestamp_str = None
if self.reading_time is not None:
timestamp_str = self.reading_time.isoformat()
return {
'stream': self.stream,
'device_timestamp': self.raw_time,
'streamer_local_id': self.reading_id,
'timestamp': timestamp_str,
'value': self.value
} | [
"Encode the data in this reading into a dictionary.\n\n Returns:\n dict: A dictionary containing the information from this reading.\n "
] |
Please provide a description of the function:def asdict(self):
return {
'stream': self.stream,
'device_timestamp': self.raw_time,
'streamer_local_id': self.reading_id,
'timestamp': self.reading_time,
'extra_data': self.summary_data,
'data': self.raw_data
} | [
"Encode the data in this event into a dictionary.\n\n The dictionary returned from this method is a reference to the data\n stored in the IOTileEvent, not a copy. It should be treated as read\n only.\n\n Returns:\n dict: A dictionary containing the information from this event.\n "
] |
Please provide a description of the function:def FromDict(cls, obj):
timestamp = obj.get('timestamp')
if timestamp is not None:
import dateutil.parser
timestamp = dateutil.parser.parse(timestamp)
return IOTileEvent(obj.get('device_timestamp'), obj.get('stream'), obj.get('extra_data'),
obj.get('data'), reading_id=obj.get('streamer_local_id'), reading_time=timestamp) | [
"Create an IOTileEvent from the result of a previous call to asdict().\n\n Args:\n obj (dict): A dictionary produced by a call to IOTileEvent.asdict()\n\n Returns:\n IOTileEvent: The converted IOTileEvent object.\n "
] |
Please provide a description of the function:def save(self, path):
data = self.encode()
with open(path, "wb") as out:
out.write(data) | [
"Save a binary copy of this report\n\n Args:\n path (string): The path where we should save the binary copy of the report\n "
] |
Please provide a description of the function:def serialize(self):
info = {}
info['received_time'] = self.received_time
info['encoded_report'] = bytes(self.encode())
# Handle python 2 / python 3 differences
report_format = info['encoded_report'][0]
if not isinstance(report_format, int):
report_format = ord(report_format)
info['report_format'] = report_format # Report format is the first byte of the encoded report
info['origin'] = self.origin
return info | [
"Turn this report into a dictionary that encodes all information including received timestamp"
] |
Please provide a description of the function:def get_contents(self):
childsigs = [n.get_csig() for n in self.children()]
return ''.join(childsigs) | [
"The contents of an alias is the concatenation\n of the content signatures of all its sources."
] |
Please provide a description of the function:def get_csig(self):
try:
return self.ninfo.csig
except AttributeError:
pass
contents = self.get_contents()
csig = SCons.Util.MD5signature(contents)
self.get_ninfo().csig = csig
return csig | [
"\n Generate a node's content signature, the digested signature\n of its content.\n\n node - the node\n cache - alternate node to use for the signature cache\n returns - the content signature\n "
] |
Please provide a description of the function:def generate(env):
import SCons.Tool
import SCons.Tool.cc
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
for suffix in CXXSuffixes:
static_obj.add_action(suffix, SCons.Defaults.CXXAction)
shared_obj.add_action(suffix, SCons.Defaults.ShCXXAction)
static_obj.add_emitter(suffix, SCons.Defaults.StaticObjectEmitter)
shared_obj.add_emitter(suffix, SCons.Defaults.SharedObjectEmitter)
SCons.Tool.cc.add_common_cc_variables(env)
if 'CXX' not in env:
env['CXX'] = env.Detect(compilers) or compilers[0]
env['CXXFLAGS'] = SCons.Util.CLVar('')
env['CXXCOM'] = '$CXX -o $TARGET -c $CXXFLAGS $CCFLAGS $_CCCOMCOM $SOURCES'
env['SHCXX'] = '$CXX'
env['SHCXXFLAGS'] = SCons.Util.CLVar('$CXXFLAGS')
env['SHCXXCOM'] = '$SHCXX -o $TARGET -c $SHCXXFLAGS $SHCCFLAGS $_CCCOMCOM $SOURCES'
env['CPPDEFPREFIX'] = '-D'
env['CPPDEFSUFFIX'] = ''
env['INCPREFIX'] = '-I'
env['INCSUFFIX'] = ''
env['SHOBJSUFFIX'] = '.os'
env['OBJSUFFIX'] = '.o'
env['STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME'] = 0
env['CXXFILESUFFIX'] = '.cc' | [
"\n Add Builders and construction variables for Visual Age C++ compilers\n to an Environment.\n "
] |
Please provide a description of the function:def link_to_storage(self, sensor_log):
if self.walker is not None:
self._sensor_log.destroy_walker(self.walker)
self.walker = None
self.walker = sensor_log.create_walker(self.selector)
self._sensor_log = sensor_log | [
"Attach this DataStreamer to an underlying SensorLog.\n\n Calling this method is required if you want to use this DataStreamer\n to generate reports from the underlying data in the SensorLog.\n\n You can call it multiple times and it will unlink itself from any\n previous SensorLog each time.\n\n Args:\n sensor_log (SensorLog): Actually create a StreamWalker to go along with this\n streamer so that we can check if it's triggered.\n "
] |
Please provide a description of the function:def triggered(self, manual=False):
if self.walker is None:
raise InternalError("You can only check if a streamer is triggered if you create it with a SensorLog")
if not self.automatic and not manual:
return False
return self.has_data() | [
"Check if this streamer should generate a report.\n\n Streamers can be triggered automatically whenever they have data\n or they can be triggered manually. This method returns True if the\n streamer is currented triggered.\n\n A streamer is triggered if it:\n - (has data AND is automatic) OR\n - (has data AND is manually triggered)\n\n Args:\n manual (bool): Indicate that the streamer has been manually triggered.\n\n Returns:\n bool: Whether the streamer can generate a report right now.\n "
] |
Please provide a description of the function:def build_report(self, device_id, max_size=None, device_uptime=0, report_id=None, auth_chain=None):
if self.walker is None or self.index is None:
raise InternalError("You can only build a report with a DataStreamer if you create it with a SensorLog and a streamer index")
if self.requires_signing() and auth_chain is None:
raise ArgumentError("You must pass an auth chain to sign this report.")
if self.requires_id() and report_id is None:
raise ArgumentError("You must pass a report_id to serialize this report")
if self.format == 'individual':
reading = self.walker.pop()
highest_id = reading.reading_id
if self.report_type == 'telegram':
return StreamerReport(IndividualReadingReport.FromReadings(device_id, [reading]), 1, highest_id)
elif self.report_type == 'broadcast':
return StreamerReport(BroadcastReport.FromReadings(device_id, [reading], device_uptime), 1, highest_id)
elif self.format == 'hashedlist':
max_readings = (max_size - 20 - 24) // 16
if max_readings <= 0:
raise InternalError("max_size is too small to hold even a single reading", max_size=max_size)
readings = []
highest_id = 0
try:
while len(readings) < max_readings:
reading = self.walker.pop()
readings.append(reading)
if reading.reading_id > highest_id:
highest_id = reading.reading_id
except StreamEmptyError:
if len(readings) == 0:
raise
return StreamerReport(SignedListReport.FromReadings(device_id, readings, report_id=report_id, selector=self.selector.encode(),
streamer=self.index, sent_timestamp=device_uptime), len(readings), highest_id)
raise InternalError("Streamer report format or type is not supported currently", report_format=self.format, report_type=self.report_type) | [
"Build a report with all of the readings in this streamer.\n\n This method will produce an IOTileReport subclass and, if necessary,\n sign it using the passed authentication chain.\n\n Args:\n device_id (int): The UUID of the device to generate a report for.\n max_size (int): Optional maximum number of bytes that the report can be\n device_uptime (int): The device's uptime to use as the sent timestamp of the report\n report_id (int): The report id to use if the report type require serialization.\n auth_chain (AuthChain): An auth chain class to use to sign the report if the report\n type requires signing.\n\n Returns:\n StreamerReport: The report, its highest id and the number of readings in it.\n\n The highest reading id and number of readings are returned\n separately from the report itself because, depending on the format\n of the report (such as whether it is encrypted or does not contain\n reading ids), these details may not be recoverable from the report\n itself.\n\n Raises:\n InternalError: If there was no SensorLog passed when this streamer was created.\n StreamEmptyError: If there is no data to generate a report from. This can only happen\n if a call to triggered() returned False.\n ArgumentError: If the report requires additional metadata that was not passed like a\n signing key or report_id.\n "
] |
Please provide a description of the function:def matches(self, address, name=None):
if self.controller:
return address == 8
return self.address == address | [
"Check if this slot identifier matches the given tile.\n\n Matching can happen either by address or by module name (not currently implemented).\n\n Returns:\n bool: True if there is a match, otherwise False.\n "
] |
Please provide a description of the function:def FromString(cls, desc):
desc = str(desc)
if desc == u'controller':
return SlotIdentifier(controller=True)
words = desc.split()
if len(words) != 2 or words[0] != u'slot':
raise ArgumentError(u"Illegal slot identifier", descriptor=desc)
try:
slot_id = int(words[1], 0)
except ValueError:
raise ArgumentError(u"Could not convert slot identifier to number", descriptor=desc, number=words[1])
return SlotIdentifier(slot=slot_id) | [
"Create a slot identifier from a string description.\n\n The string needs to be either:\n\n controller\n OR\n slot <X> where X is an integer that can be converted with int(X, 0)\n\n Args:\n desc (str): The string description of the slot\n\n Returns:\n SlotIdentifier\n "
] |
Please provide a description of the function:def FromEncoded(cls, bindata):
if len(bindata) != 8:
raise ArgumentError("Invalid binary slot descriptor with invalid length", length=len(bindata), expected=8, data=bindata)
slot, match_op = struct.unpack("<B6xB", bindata)
match_name = cls.KNOWN_MATCH_CODES.get(match_op)
if match_name is None:
raise ArgumentError("Unknown match operation specified in binary slot descriptor", operation=match_op, known_match_ops=cls.KNOWN_MATCH_CODES)
if match_name == 'match_controller':
return SlotIdentifier(controller=True)
if match_name == 'match_slot':
return SlotIdentifier(slot=slot)
raise ArgumentError("Unsupported match operation in binary slot descriptor", match_op=match_name) | [
"Create a slot identifier from an encoded binary descriptor.\n\n These binary descriptors are used to communicate slot targeting\n to an embedded device. They are exactly 8 bytes in length.\n\n Args:\n bindata (bytes): The 8-byte binary descriptor.\n\n Returns:\n SlotIdentifier\n "
] |
Please provide a description of the function:def encode(self):
slot = 0
match_op = self.KNOWN_MATCH_NAMES['match_controller']
if not self.controller:
slot = self.slot
match_op = self.KNOWN_MATCH_NAMES['match_slot']
return struct.pack("<B6xB", slot, match_op) | [
"Encode this slot identifier into a binary descriptor.\n\n Returns:\n bytes: The 8-byte encoded slot identifier\n "
] |
Please provide a description of the function:def _scons_syntax_error(e):
etype, value, tb = sys.exc_info()
lines = traceback.format_exception_only(etype, value)
for line in lines:
sys.stderr.write(line+'\n')
sys.exit(2) | [
"Handle syntax errors. Print out a message and show where the error\n occurred.\n "
] |
Please provide a description of the function:def find_deepest_user_frame(tb):
tb.reverse()
# find the deepest traceback frame that is not part
# of SCons:
for frame in tb:
filename = frame[0]
if filename.find(os.sep+'SCons'+os.sep) == -1:
return frame
return tb[0] | [
"\n Find the deepest stack frame that is not part of SCons.\n\n Input is a \"pre-processed\" stack trace in the form\n returned by traceback.extract_tb() or traceback.extract_stack()\n "
] |
Please provide a description of the function:def _scons_user_error(e):
global print_stacktrace
etype, value, tb = sys.exc_info()
if print_stacktrace:
traceback.print_exception(etype, value, tb)
filename, lineno, routine, dummy = find_deepest_user_frame(traceback.extract_tb(tb))
sys.stderr.write("\nscons: *** %s\n" % value)
sys.stderr.write('File "%s", line %d, in %s\n' % (filename, lineno, routine))
sys.exit(2) | [
"Handle user errors. Print out a message and a description of the\n error, along with the line number and routine where it occured.\n The file and line number will be the deepest stack frame that is\n not part of SCons itself.\n "
] |
Please provide a description of the function:def _scons_user_warning(e):
etype, value, tb = sys.exc_info()
filename, lineno, routine, dummy = find_deepest_user_frame(traceback.extract_tb(tb))
sys.stderr.write("\nscons: warning: %s\n" % e)
sys.stderr.write('File "%s", line %d, in %s\n' % (filename, lineno, routine)) | [
"Handle user warnings. Print out a message and a description of\n the warning, along with the line number and routine where it occured.\n The file and line number will be the deepest stack frame that is\n not part of SCons itself.\n "
] |
Please provide a description of the function:def _scons_internal_warning(e):
filename, lineno, routine, dummy = find_deepest_user_frame(traceback.extract_stack())
sys.stderr.write("\nscons: warning: %s\n" % e.args[0])
sys.stderr.write('File "%s", line %d, in %s\n' % (filename, lineno, routine)) | [
"Slightly different from _scons_user_warning in that we use the\n *current call stack* rather than sys.exc_info() to get our stack trace.\n This is used by the warnings framework to print warnings."
] |
Please provide a description of the function:def _SConstruct_exists(dirname='', repositories=[], filelist=None):
if not filelist:
filelist = ['SConstruct', 'Sconstruct', 'sconstruct']
for file in filelist:
sfile = os.path.join(dirname, file)
if os.path.isfile(sfile):
return sfile
if not os.path.isabs(sfile):
for rep in repositories:
if os.path.isfile(os.path.join(rep, sfile)):
return sfile
return None | [
"This function checks that an SConstruct file exists in a directory.\n If so, it returns the path of the file. By default, it checks the\n current directory.\n "
] |
Please provide a description of the function:def _load_site_scons_dir(topdir, site_dir_name=None):
if site_dir_name:
err_if_not_found = True # user specified: err if missing
else:
site_dir_name = "site_scons"
err_if_not_found = False
site_dir = os.path.join(topdir, site_dir_name)
if not os.path.exists(site_dir):
if err_if_not_found:
raise SCons.Errors.UserError("site dir %s not found."%site_dir)
return
site_init_filename = "site_init.py"
site_init_modname = "site_init"
site_tools_dirname = "site_tools"
# prepend to sys.path
sys.path = [os.path.abspath(site_dir)] + sys.path
site_init_file = os.path.join(site_dir, site_init_filename)
site_tools_dir = os.path.join(site_dir, site_tools_dirname)
if os.path.exists(site_init_file):
import imp, re
try:
try:
fp, pathname, description = imp.find_module(site_init_modname,
[site_dir])
# Load the file into SCons.Script namespace. This is
# opaque and clever; m is the module object for the
# SCons.Script module, and the exec ... in call executes a
# file (or string containing code) in the context of the
# module's dictionary, so anything that code defines ends
# up adding to that module. This is really short, but all
# the error checking makes it longer.
try:
m = sys.modules['SCons.Script']
except Exception as e:
fmt = 'cannot import site_init.py: missing SCons.Script module %s'
raise SCons.Errors.InternalError(fmt % repr(e))
try:
sfx = description[0]
modname = os.path.basename(pathname)[:-len(sfx)]
site_m = {"__file__": pathname, "__name__": modname, "__doc__": None}
re_special = re.compile("__[^_]+__")
for k in list(m.__dict__.keys()):
if not re_special.match(k):
site_m[k] = m.__dict__[k]
# This is the magic.
exec(compile(fp.read(), fp.name, 'exec'), site_m)
except KeyboardInterrupt:
raise
except Exception as e:
fmt = '*** Error loading site_init file %s:\n'
sys.stderr.write(fmt % repr(site_init_file))
raise
else:
for k in site_m:
if not re_special.match(k):
m.__dict__[k] = site_m[k]
except KeyboardInterrupt:
raise
except ImportError as e:
fmt = '*** cannot import site init file %s:\n'
sys.stderr.write(fmt % repr(site_init_file))
raise
finally:
if fp:
fp.close()
if os.path.exists(site_tools_dir):
# prepend to DefaultToolpath
SCons.Tool.DefaultToolpath.insert(0, os.path.abspath(site_tools_dir)) | [
"Load the site_scons dir under topdir.\n Prepends site_scons to sys.path, imports site_scons/site_init.py,\n and prepends site_scons/site_tools to default toolpath."
] |
Please provide a description of the function:def _load_all_site_scons_dirs(topdir, verbose=None):
platform = SCons.Platform.platform_default()
def homedir(d):
return os.path.expanduser('~/'+d)
if platform == 'win32' or platform == 'cygwin':
# Note we use $ here instead of %...% because older
# pythons (prior to 2.6?) didn't expand %...% on Windows.
# This set of dirs should work on XP, Vista, 7 and later.
sysdirs=[
os.path.expandvars('$ALLUSERSPROFILE\\Application Data\\scons'),
os.path.expandvars('$USERPROFILE\\Local Settings\\Application Data\\scons')]
appdatadir = os.path.expandvars('$APPDATA\\scons')
if appdatadir not in sysdirs:
sysdirs.append(appdatadir)
sysdirs.append(homedir('.scons'))
elif platform == 'darwin': # MacOS X
sysdirs=['/Library/Application Support/SCons',
'/opt/local/share/scons', # (for MacPorts)
'/sw/share/scons', # (for Fink)
homedir('Library/Application Support/SCons'),
homedir('.scons')]
elif platform == 'sunos': # Solaris
sysdirs=['/opt/sfw/scons',
'/usr/share/scons',
homedir('.scons')]
else: # Linux, HPUX, etc.
# assume posix-like, i.e. platform == 'posix'
sysdirs=['/usr/share/scons',
homedir('.scons')]
dirs=sysdirs + [topdir]
for d in dirs:
if verbose: # this is used by unit tests.
print("Loading site dir ", d)
_load_site_scons_dir(d) | [
"Load all of the predefined site_scons dir.\n Order is significant; we load them in order from most generic\n (machine-wide) to most specific (topdir).\n The verbose argument is only for testing.\n "
] |
Please provide a description of the function:def make_ready(self):
SCons.Taskmaster.OutOfDateTask.make_ready(self)
if self.out_of_date and self.options.debug_explain:
explanation = self.out_of_date[0].explain()
if explanation:
sys.stdout.write("scons: " + explanation) | [
"Make a task ready for execution"
] |
Please provide a description of the function:def encode(self):
begin_payload = struct.pack("<H8s", self.config_id, self.target.encode())
start_record = SendErrorCheckingRPCRecord(8, self.BEGIN_CONFIG_RPC, begin_payload, 4)
end_record = SendErrorCheckingRPCRecord(8, self.END_CONFIG_RPC, bytearray(), 4)
push_records = []
for i in range(0, len(self.data), 20):
chunk = self.data[i:i+20]
push_record = SendErrorCheckingRPCRecord(8, self.PUSH_CONFIG_RPC, chunk, 4)
push_records.append(push_record)
out_blob = bytearray()
out_blob += start_record.encode()
for push_record in push_records:
out_blob += push_record.encode()
out_blob += end_record.encode()
return out_blob | [
"Encode this record into binary, suitable for embedded into an update script.\n\n This function will create multiple records that correspond to the actual\n underlying rpcs that SetConfigRecord turns into.\n\n Returns:\n bytearary: The binary version of the record that could be parsed via\n a call to UpdateRecord.FromBinary()\n "
] |
Please provide a description of the function:def MatchQuality(cls, record_data, record_count=1):
if record_count == 1:
cmd, address, _resp_length, _payload = SendErrorCheckingRPCRecord._parse_rpc_info(record_data)
if cmd == cls.BEGIN_CONFIG_RPC and address == 8:
return MatchQuality.DeferMatch
return MatchQuality.NoMatch
# To see if this is a set_config variable record set, we need to decode all of
# the records and make sure each is an error checking rpc with the right rpc id
try:
rpcs = SendErrorCheckingRPCRecord.parse_multiple_rpcs(record_data)
push_commands = rpcs[1:-1]
for cmd in push_commands:
cmd_id, addr = cmd[:2]
if cmd_id != cls.PUSH_CONFIG_RPC or addr != 8:
return MatchQuality.NoMatch
last_cmd, last_addr = rpcs[-1][:2]
if last_cmd == cls.END_CONFIG_RPC and last_addr == 8:
return MatchQuality.PerfectMatch
except ArgumentError:
return MatchQuality.NoMatch
return MatchQuality.DeferMatch | [
"Check how well this record matches the given binary data.\n\n This function will only be called if the record matches the type code\n given by calling MatchType() and this functon should check how well\n this record matches and return a quality score between 0 and 100, with\n higher quality matches having higher scores. The default value should\n be MatchQuality.GenericMatch which is 50. If this record does not\n match at all, it should return MatchQuality.NoMatch.\n\n Many times, only a single record type will match a given binary record\n but there are times when multiple different logical records produce\n the same type of record in a script, such as set_version and\n set_userkey both producing a call_rpc record with different RPC\n values. The MatchQuality method is used to allow for rich decoding\n of such scripts back to the best possible record that created them.\n\n Args:\n record_data (bytearay): The raw record that we should check for\n a match.\n record_count (int): The number of binary records that are included\n in record_data.\n\n Returns:\n int: The match quality between 0 and 100. You should use the\n constants defined in MatchQuality as much as possible.\n "
] |
Please provide a description of the function:def FromBinary(cls, record_data, record_count=1):
rpcs = SendErrorCheckingRPCRecord.parse_multiple_rpcs(record_data)
start_rpc = rpcs[0]
push_rpcs = rpcs[1:-1]
try:
config_id, raw_target = struct.unpack("<H8s", start_rpc.payload)
target = SlotIdentifier.FromEncoded(raw_target)
except ValueError:
raise ArgumentError("Could not parse payload on begin config rpc", payload=start_rpc.payload)
payload = bytearray()
for rpc in push_rpcs:
payload += rpc.payload
return SetConfigRecord(target, config_id, payload) | [
"Create an UpdateRecord subclass from binary record data.\n\n This is a multi-action record that matches a pattern of error checking\n RPC calls:\n begin config\n push config data\n <possibly multiple>\n end config\n\n Args:\n record_data (bytearray): The raw record data that we wish to parse.\n record_count (int): The number of records included in record_data.\n\n Raises:\n ArgumentError: If the record_data is malformed and cannot be parsed.\n\n Returns:\n SetConfigRecord: The decoded tile records.\n "
] |
Please provide a description of the function:def _unpack_version(tag_data):
tag = tag_data & ((1 << 20) - 1)
version_data = tag_data >> 20
major = (version_data >> 6) & ((1 << 6) - 1)
minor = (version_data >> 0) & ((1 << 6) - 1)
return (tag, "{}.{}".format(major, minor)) | [
"Parse a packed version info struct into tag and major.minor version.\n\n The tag and version are parsed out according to 20 bits for tag and\n 6 bits each for major and minor. The more interesting part is the\n blacklisting performed for tags that are known to be untrustworthy.\n\n In particular, the following applies to tags.\n\n - tags < 1024 are reserved for development and have only locally defined\n meaning. They are not for use in production.\n - tags in [1024, 2048) are production tags but were used inconsistently\n in the early days of Arch and hence cannot be trusted to correspond with\n an actual device model.\n - tags >= 2048 are reserved for supported production device variants.\n - the tag and version 0 (0.0) is reserved for an unknown wildcard that\n does not convey any information except that the tag and version are\n not known.\n "
] |
Please provide a description of the function:def _handle_reset(self):
self._logger.info("Resetting controller")
self._device.reset_count += 1
super(ReferenceController, self)._handle_reset()
# Load in all default values into our config variables before streaming
# updated data into them.
self.reset_config_variables() | [
"Reset this controller tile.\n\n This process will call _handle_reset() for all of the controller\n subsystem mixins in order to make sure they all return to their proper\n reset state.\n\n It will then reset all of the peripheral tiles to emulate the behavior\n of a physical POD where the controller tile cuts power to all\n peripheral tiles on reset for a clean boot.\n\n This will clear all subsystems of this controller to their reset\n states.\n\n The order of these calls is important to guarantee that everything is\n in the correct state before resetting the next subsystem.\n\n The behavior of this function is different depending on whether\n deferred is True or False. If it's true, this function will only\n clear the config database and then queue all of the config streaming\n rpcs to itself to load in all of our config variables. Once these\n have been sent, it will reset the rest of the controller subsystems.\n "
] |
Please provide a description of the function:async def _reset_vector(self):
# Send ourselves all of our config variable assignments
config_rpcs = self.config_database.stream_matching(8, self.name)
for rpc in config_rpcs:
await self._device.emulator.await_rpc(*rpc)
config_assignments = self.latch_config_variables()
self._logger.info("Latched config variables at reset for controller: %s", config_assignments)
for system in self._post_config_subsystems:
try:
system.clear_to_reset(config_assignments)
await asyncio.wait_for(system.initialize(), timeout=2.0)
except:
self._logger.exception("Error initializing %s", system)
raise
self._logger.info("Finished clearing controller to reset condition")
# Now reset all of the tiles
for address, _ in self._device.iter_tiles(include_controller=False):
self._logger.info("Sending reset signal to tile at address %d", address)
try:
await self._device.emulator.await_rpc(address, rpcs.RESET)
except TileNotFoundError:
pass
except:
self._logger.exception("Error sending reset signal to tile at address %d", address)
raise
self.initialized.set() | [
"Initialize the controller's subsystems inside the emulation thread."
] |
Please provide a description of the function:def dump_state(self):
superstate = super(ReferenceController, self).dump_state()
superstate.update({
'state_name': self.STATE_NAME,
'state_version': self.STATE_VERSION,
'app_info': self.app_info,
'os_info': self.os_info,
# Dump all of the subsystems
'remote_bridge': self.remote_bridge.dump(),
'tile_manager': self.tile_manager.dump(),
'config_database': self.config_database.dump(),
'sensor_log': self.sensor_log.dump()
})
return superstate | [
"Dump the current state of this emulated object as a dictionary.\n\n Returns:\n dict: The current state of the object that could be passed to load_state.\n "
] |
Please provide a description of the function:def restore_state(self, state):
super(ReferenceController, self).restore_state(state)
state_name = state.get('state_name')
state_version = state.get('state_version')
if state_name != self.STATE_NAME or state_version != self.STATE_VERSION:
raise ArgumentError("Invalid emulated device state name or version", found=(state_name, state_version),
expected=(self.STATE_NAME, self.STATE_VERSION))
self.app_info = state.get('app_info', (0, "0.0"))
self.os_info = state.get('os_info', (0, "0.0"))
# Notify all subsystems of our intent to restore in case they need to prepare
self.sensor_log.prepare_for_restore()
# Restore all of the subsystems
self.remote_bridge.restore(state.get('remote_bridge', {}))
self.tile_manager.restore(state.get('tile_manager', {}))
self.config_database.restore(state.get('config_database', {}))
self.sensor_log.restore(state.get('sensor_log', {})) | [
"Restore the current state of this emulated object.\n\n Args:\n state (dict): A previously dumped state produced by dump_state.\n "
] |
Please provide a description of the function:def hardware_version(self):
hardware_string = self.hardware_string
if not isinstance(hardware_string, bytes):
hardware_string = self.hardware_string.encode('utf-8')
if len(hardware_string) > 10:
self._logger.warn("Truncating hardware string that was longer than 10 bytes: %s", self.hardware_string)
if len(hardware_string) < 10:
hardware_string += b'\0'*(10 - len(hardware_string))
return [hardware_string] | [
"Get a hardware identification string."
] |
Please provide a description of the function:def controller_info(self):
return [self._device.iotile_id, _pack_version(*self.os_info), _pack_version(*self.app_info)] | [
"Get the controller UUID, app tag and os tag."
] |
Please provide a description of the function:def set_app_os_tag(self, os_tag, app_tag, update_os, update_app):
update_os = bool(update_os)
update_app = bool(update_app)
if update_os:
self.os_info = _unpack_version(os_tag)
if update_app:
self.app_info = _unpack_version(app_tag)
return [Error.NO_ERROR] | [
"Update the app and/or os tags."
] |
Please provide a description of the function:def load_sgf(self, sgf_data):
if '\n' not in sgf_data:
with open(sgf_data, "r") as infile:
sgf_data = infile.read()
model = DeviceModel()
parser = SensorGraphFileParser()
parser.parse_file(data=sgf_data)
parser.compile(model)
opt = SensorGraphOptimizer()
opt.optimize(parser.sensor_graph, model=model)
sensor_graph = parser.sensor_graph
self._logger.info("Loading sensor_graph with %d nodes, %d streamers and %d configs",
len(sensor_graph.nodes), len(sensor_graph.streamers), len(sensor_graph.config_database))
# Directly load the sensor_graph into our persisted storage
self.sensor_graph.persisted_nodes = sensor_graph.dump_nodes()
self.sensor_graph.persisted_streamers = sensor_graph.dump_streamers()
self.sensor_graph.persisted_constants = []
for stream, value in sorted(sensor_graph.constant_database.items(), key=lambda x: x[0].encode()):
reading = IOTileReading(stream.encode(), 0, value)
self.sensor_graph.persisted_constants.append((stream, reading))
self.sensor_graph.persisted_exists = True
# Clear all config variables and load in those from this sgf file
self.config_database.clear()
for slot in sorted(sensor_graph.config_database, key=lambda x: x.encode()):
for conf_var, (conf_type, conf_val) in sorted(sensor_graph.config_database[slot].items()):
self.config_database.add_direct(slot, conf_var, conf_type, conf_val)
# If we have an app tag and version set program them in
app_tag = sensor_graph.metadata_database.get('app_tag')
app_version = sensor_graph.metadata_database.get('app_version')
if app_tag is not None:
if app_version is None:
app_version = "0.0"
self.app_info = (app_tag, app_version) | [
"Load, persist a sensor_graph file.\n\n The data passed in `sgf_data` can either be a path or the already\n loaded sgf lines as a string. It is determined to be sgf lines if\n there is a '\\n' character in the data, otherwise it is interpreted as\n a path.\n\n Note that this scenario just loads the sensor_graph directly into the\n persisted sensor_graph inside the device. You will still need to\n reset the device for the sensor_graph to enabled and run.\n\n Args:\n sgf_data (str): Either the path to an sgf file or its contents\n as a string.\n "
] |
Please provide a description of the function:def _parse_file(self):
# We need to set the CPU type to pull in the right register definitions
# only preprocess the file (-E) and get rid of gcc extensions that aren't
# supported in ISO C.
args = utilities.build_includes(self.arch.includes())
# args.append('-mcpu=%s' % self.arch.property('chip'))
args.append('-E')
args.append('-D__attribute__(x)=')
args.append('-D__extension__=')
self.ast = parse_file(self.filepath, use_cpp=True, cpp_path='arm-none-eabi-gcc', cpp_args=args) | [
"Preprocess and parse C file into an AST"
] |
Please provide a description of the function:def _clear_queue(to_clear):
while not to_clear.empty():
try:
to_clear.get(False)
to_clear.task_done()
except queue.Empty:
continue | [
"Clear all items from a queue safely."
] |
Please provide a description of the function:def finish(self, status, response):
self.response = binascii.hexlify(response).decode('utf-8')
self.status = status
self.runtime = monotonic() - self._start_time | [
"Mark the end of a recorded RPC."
] |
Please provide a description of the function:def serialize(self):
return "{},{: <26},{:2d},{:#06x},{:#04x},{:5.0f},{: <40},{: <40},{}".\
format(self.connection, self.start_stamp.isoformat(), self.address, self.rpc_id,
self.status, self.runtime * 1000, self.call, self.response, self.error) | [
"Convert this recorded RPC into a string."
] |
Please provide a description of the function:def scan(self, wait=None):
min_scan = self.adapter.get_config('minimum_scan_time', 0.0)
probe_required = self.adapter.get_config('probe_required', False)
# Figure out how long and if we need to wait before returning our scan results
wait_time = None
elapsed = monotonic() - self._start_time
if elapsed < min_scan:
wait_time = min_scan - elapsed
# If we need to probe for devices rather than letting them just bubble up, start the probe
# and then use our min_scan_time to wait for them to arrive via the normal _on_scan event
if probe_required:
self._loop.run_coroutine(self.adapter.probe())
wait_time = min_scan
# If an explicit wait is specified that overrides everything else
if wait is not None:
wait_time = wait
if wait_time is not None:
sleep(wait_time)
to_remove = set()
now = monotonic()
with self._scan_lock:
for name, value in self._scanned_devices.items():
if value['expiration_time'] < now:
to_remove.add(name)
for name in to_remove:
del self._scanned_devices[name]
devices = sorted(self._scanned_devices.values(), key=lambda x: x['uuid'])
return devices | [
"Return the devices that have been found for this device adapter.\n\n If the adapter indicates that we need to explicitly tell it to probe for devices, probe now.\n By default we return the list of seen devices immediately, however there are two cases where\n we will sleep here for a fixed period of time to let devices show up in our result list:\n\n - If we are probing then we wait for 'minimum_scan_time'\n - If we are told an explicit wait time that overrides everything and we wait that long\n "
] |
Please provide a description of the function:def connect(self, uuid_value, wait=None):
if self.connected:
raise HardwareError("Cannot connect when we are already connected")
if uuid_value not in self._scanned_devices:
self.scan(wait=wait)
with self._scan_lock:
if uuid_value not in self._scanned_devices:
raise HardwareError("Could not find device to connect to by UUID", uuid=uuid_value)
connstring = self._scanned_devices[uuid_value]['connection_string']
self.connect_direct(connstring) | [
"Connect to a specific device by its uuid\n\n Attempt to connect to a device that we have previously scanned using its UUID.\n If wait is not None, then it is used in the same was a scan(wait) to override\n default wait times with an explicit value.\n\n Args:\n uuid_value (int): The unique id of the device that we would like to connect to.\n wait (float): Optional amount of time to force the device adapter to wait before\n attempting to connect.\n "
] |
Please provide a description of the function:def connect_direct(self, connection_string, no_rpc=False, force=False):
if not force and self.connected:
raise HardwareError("Cannot connect when we are already connected to '%s'" % self.connection_string)
self._loop.run_coroutine(self.adapter.connect(0, connection_string))
try:
if no_rpc:
self._logger.info("Not opening RPC interface on device %s", self.connection_string)
else:
self._loop.run_coroutine(self.adapter.open_interface(0, 'rpc'))
except HardwareError as exc:
self._logger.exception("Error opening RPC interface on device %s", connection_string)
self._loop.run_coroutine(self.adapter.disconnect(0))
raise exc
except Exception as exc:
self._logger.exception("Error opening RPC interface on device %s", connection_string)
self._loop.run_coroutine(self.adapter.disconnect(0))
raise HardwareError("Could not open RPC interface on device due to an exception: %s" % str(exc)) from exc
self.connected = True
self.connection_string = connection_string
self.connection_interrupted = False | [
"Directly connect to a device using its stream specific connection string.\n\n Normally, all connections to a device include opening the RPC\n interface to send RPCs. However, there are certain, very specific,\n circumstances when you would not want to or be able to open the RPC\n interface (such as when you are using the debug interface on a bare\n MCU that has not been programmed yet). In those cases you can pass\n no_rpc=True to not attempt to open the RPC interface. If you do not\n open the RPC interface at connection time, there is no public\n interface to open it later, so you must disconnect and reconnect to\n the device in order to open the interface.\n\n Args:\n connection_string (str): The connection string that identifies the desired device.\n no_rpc (bool): Do not open the RPC interface on the device (default=False).\n force (bool): Whether to force another connection even if we think we are currently\n connected. This is for internal use and not designed to be set externally.\n "
] |
Please provide a description of the function:def disconnect(self):
if not self.connected:
raise HardwareError("Cannot disconnect when we are not connected")
# Close the streaming and tracing interfaces when we disconnect
self._reports = None
self._traces = None
self._loop.run_coroutine(self.adapter.disconnect(0))
self.connected = False
self.connection_interrupted = False
self.connection_string = None | [
"Disconnect from the device that we are currently connected to."
] |
Please provide a description of the function:def _try_reconnect(self):
try:
if self.connection_interrupted:
self.connect_direct(self.connection_string, force=True)
self.connection_interrupted = False
self.connected = True
# Reenable streaming interface if that was open before as well
if self._reports is not None:
self._loop.run_coroutine(self.adapter.open_interface(0, 'streaming'))
# Reenable tracing interface if that was open before as well
if self._traces is not None:
self._loop.run_coroutine(self.adapter.open_interface(0, 'tracing'))
except HardwareError as exc:
self._logger.exception("Error reconnecting to device after an unexpected disconnect")
raise HardwareError("Device disconnected unexpectedly and we could not reconnect", reconnect_error=exc) from exc | [
"Try to recover an interrupted connection."
] |
Please provide a description of the function:def send_rpc(self, address, rpc_id, call_payload, timeout=3.0):
if not self.connected:
raise HardwareError("Cannot send an RPC if we are not in a connected state")
if timeout is None:
timeout = 3.0
status = -1
payload = b''
recording = None
if self.connection_interrupted:
self._try_reconnect()
if self._record is not None:
recording = _RecordedRPC(self.connection_string, address, rpc_id, call_payload)
recording.start()
try:
payload = self._loop.run_coroutine(self.adapter.send_rpc(0, address, rpc_id, call_payload, timeout))
status, payload = pack_rpc_response(payload, None)
except VALID_RPC_EXCEPTIONS as exc:
status, payload = pack_rpc_response(payload, exc)
if self._record is not None:
recording.finish(status, payload)
self._recording.append(recording)
if self.connection_interrupted:
self._try_reconnect()
return unpack_rpc_response(status, payload, rpc_id, address) | [
"Send an rpc to our connected device.\n\n The device must already be connected and the rpc interface open. This\n method will synchronously send an RPC and wait for the response. Any\n RPC errors will be raised as exceptions and if there were no errors, the\n RPC's response payload will be returned as a binary bytearray.\n\n See :meth:`AbstractDeviceAdapter.send_rpc` for documentation of the possible\n exceptions that can be raised here.\n\n Args:\n address (int): The tile address containing the RPC\n rpc_id (int): The ID of the RPC that we wish to call.\n call_payload (bytes): The payload containing encoded arguments for the\n RPC.\n timeout (float): The maximum number of seconds to wait for the RPC to\n finish. Defaults to 3s.\n\n Returns:\n bytearray: The RPC's response payload.\n "
] |
Please provide a description of the function:def send_highspeed(self, data, progress_callback):
if not self.connected:
raise HardwareError("Cannot send a script if we are not in a connected state")
if isinstance(data, str) and not isinstance(data, bytes):
raise ArgumentError("You must send bytes or bytearray to _send_highspeed", type=type(data))
if not isinstance(data, bytes):
data = bytes(data)
try:
self._on_progress = progress_callback
self._loop.run_coroutine(self.adapter.send_script(0, data))
finally:
self._on_progress = None | [
"Send a script to a device at highspeed, reporting progress.\n\n This method takes a binary blob and downloads it to the device as fast\n as possible, calling the passed progress_callback periodically with\n updates on how far it has gotten.\n\n Args:\n data (bytes): The binary blob that should be sent to the device at highspeed.\n progress_callback (callable): A function that will be called periodically to\n report progress. The signature must be callback(done_count, total_count)\n where done_count and total_count will be passed as integers.\n "
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.