Code
stringlengths 103
85.9k
| Summary
sequencelengths 0
94
|
---|---|
Please provide a description of the function:def scan_storage(self, area_name, callable, start=0, stop=None):
if area_name == u'storage':
data = self.storage_data
elif area_name == u'streaming':
data = self.streaming_data
else:
raise ArgumentError("Unknown area name in scan_storage (%s) should be storage or streaming" % area_name)
if len(data) == 0:
return 0
if stop is None:
stop = len(data) - 1
elif stop >= len(data):
raise ArgumentError("Given stop offset is greater than the highest offset supported", length=len(data), stop_offset=stop)
scanned = 0
for i in range(start, stop + 1):
scanned += 1
should_break = callable(i, data[i])
if should_break is True:
break
return scanned | [
"Iterate over streaming or storage areas, calling callable.\n\n Args:\n area_name (str): Either 'storage' or 'streaming' to indicate which\n storage area to scan.\n callable (callable): A function that will be called as (offset, reading)\n for each reading between start_offset and end_offset (inclusive). If\n the scan function wants to stop early it can return True. If it returns\n anything else (including False or None), scanning will continue.\n start (int): Optional offset to start at (included in scan).\n stop (int): Optional offset to end at (included in scan).\n\n Returns:\n int: The number of entries scanned.\n "
] |
Please provide a description of the function:def push(self, value):
stream = DataStream.FromEncoded(value.stream)
if stream.stream_type == DataStream.OutputType:
if len(self.streaming_data) == self.streaming_length:
raise StorageFullError('Streaming buffer full')
self.streaming_data.append(value)
else:
if len(self.storage_data) == self.storage_length:
raise StorageFullError('Storage buffer full')
self.storage_data.append(value) | [
"Store a new value for the given stream.\n\n Args:\n value (IOTileReading): The value to store. The stream\n parameter must have the correct value\n "
] |
Please provide a description of the function:def get(self, buffer_type, offset):
if buffer_type == u'streaming':
chosen_buffer = self.streaming_data
else:
chosen_buffer = self.storage_data
if offset >= len(chosen_buffer):
raise StreamEmptyError("Invalid index given in get command", requested=offset, stored=len(chosen_buffer), buffer=buffer_type)
return chosen_buffer[offset] | [
"Get a reading from the buffer at offset.\n\n Offset is specified relative to the start of the data buffer.\n This means that if the buffer rolls over, the offset for a given\n item will appear to change. Anyone holding an offset outside of this\n engine object will need to be notified when rollovers happen (i.e.\n popn is called so that they can update their offset indices)\n\n Args:\n buffer_type (str): The buffer to pop from (either u\"storage\" or u\"streaming\")\n offset (int): The offset of the reading to get\n "
] |
Please provide a description of the function:def popn(self, buffer_type, count):
buffer_type = str(buffer_type)
if buffer_type == u'streaming':
chosen_buffer = self.streaming_data
else:
chosen_buffer = self.storage_data
if count > len(chosen_buffer):
raise StreamEmptyError("Not enough data in buffer for popn command", requested=count, stored=len(chosen_buffer), buffer=buffer_type)
popped = chosen_buffer[:count]
remaining = chosen_buffer[count:]
if buffer_type == u'streaming':
self.streaming_data = remaining
else:
self.storage_data = remaining
return popped | [
"Remove and return the oldest count values from the named buffer\n\n Args:\n buffer_type (str): The buffer to pop from (either u\"storage\" or u\"streaming\")\n count (int): The number of readings to pop\n\n Returns:\n list(IOTileReading): The values popped from the buffer\n "
] |
Please provide a description of the function:async def connect(self, conn_id, connection_string):
self._ensure_connection(conn_id, False)
msg = dict(connection_string=connection_string)
await self._send_command(OPERATIONS.CONNECT, msg, COMMANDS.ConnectResponse)
self._setup_connection(conn_id, connection_string) | [
"Connect to a device.\n\n See :meth:`AbstractDeviceAdapter.connect`.\n "
] |
Please provide a description of the function:async def disconnect(self, conn_id):
self._ensure_connection(conn_id, True)
msg = dict(connection_string=self._get_property(conn_id, "connection_string"))
try:
await self._send_command(OPERATIONS.DISCONNECT, msg, COMMANDS.DisconnectResponse)
finally:
self._teardown_connection(conn_id) | [
"Disconnect from a connected device.\n\n See :meth:`AbstractDeviceAdapter.disconnect`.\n "
] |
Please provide a description of the function:async def open_interface(self, conn_id, interface):
self._ensure_connection(conn_id, True)
connection_string = self._get_property(conn_id, "connection_string")
msg = dict(interface=interface, connection_string=connection_string)
await self._send_command(OPERATIONS.OPEN_INTERFACE, msg, COMMANDS.OpenInterfaceResponse) | [
"Open an interface on an IOTile device.\n\n See :meth:`AbstractDeviceAdapter.open_interface`.\n "
] |
Please provide a description of the function:async def close_interface(self, conn_id, interface):
self._ensure_connection(conn_id, True)
connection_string = self._get_property(conn_id, "connection_string")
msg = dict(interface=interface, connection_string=connection_string)
await self._send_command(OPERATIONS.CLOSE_INTERFACE, msg, COMMANDS.CloseInterfaceResponse) | [
"Close an interface on this IOTile device.\n\n See :meth:`AbstractDeviceAdapter.close_interface`.\n "
] |
Please provide a description of the function:async def send_rpc(self, conn_id, address, rpc_id, payload, timeout):
self._ensure_connection(conn_id, True)
connection_string = self._get_property(conn_id, "connection_string")
msg = dict(address=address, rpc_id=rpc_id, payload=base64.b64encode(payload),
timeout=timeout, connection_string=connection_string)
response = await self._send_command(OPERATIONS.SEND_RPC, msg, COMMANDS.SendRPCResponse,
timeout=timeout)
return unpack_rpc_response(response.get('status'), response.get('payload'),
rpc_id=rpc_id, address=address) | [
"Send an RPC to a device.\n\n See :meth:`AbstractDeviceAdapter.send_rpc`.\n "
] |
Please provide a description of the function:async def send_script(self, conn_id, data):
self._ensure_connection(conn_id, True)
connection_string = self._get_property(conn_id, "connection_string")
msg = dict(connection_string=connection_string, fragment_count=1, fragment_index=0,
script=base64.b64encode(data))
await self._send_command(OPERATIONS.SEND_SCRIPT, msg, COMMANDS.SendScriptResponse) | [
"Send a a script to this IOTile device\n\n Args:\n conn_id (int): A unique identifier that will refer to this connection\n data (bytes): the script to send to the device\n "
] |
Please provide a description of the function:async def _on_report_notification(self, event):
conn_string = event.get('connection_string')
report = self._report_parser.deserialize_report(event.get('serialized_report'))
self.notify_event(conn_string, 'report', report) | [
"Callback function called when a report event is received.\n\n Args:\n event (dict): The report_event\n "
] |
Please provide a description of the function:async def _on_trace_notification(self, trace_event):
conn_string = trace_event.get('connection_string')
payload = trace_event.get('payload')
await self.notify_event(conn_string, 'trace', payload) | [
"Callback function called when a trace chunk is received.\n\n Args:\n trace_chunk (dict): The received trace chunk information\n "
] |
Please provide a description of the function:async def _on_progress_notification(self, progress):
conn_string = progress.get('connection_string')
done = progress.get('done_count')
total = progress.get('total_count')
operation = progress.get('operation')
await self.notify_progress(conn_string, operation, done, total, wait=True) | [
"Callback function called when a progress notification is received.\n\n Args:\n progress (dict): The received notification containing the progress information\n "
] |
Please provide a description of the function:async def _on_websocket_disconnect(self, _event):
self.logger.info('Forcibly disconnected from the WebSocket server')
conns = self._connections.copy()
for conn_id in conns:
conn_string = self._get_property(conn_id, 'connection_string')
self._teardown_connection(conn_id)
self.notify_event(conn_string, 'disconnect', "Websocket connection closed") | [
"Callback function called when we have been disconnected from the server (by error or not).\n Allows to clean all if the disconnection was unexpected."
] |
Please provide a description of the function:def _complete_parameters(param, variables):
if isinstance(param, list):
return [_complete_parameters(x, variables) for x in param]
elif isinstance(param, dict):
return {key: _complete_parameters(value, variables) for key, value in param.items()}
elif isinstance(param, str):
try:
return Template(param).substitute(variables)
except KeyError as exc:
raise RecipeVariableNotPassed("Variable undefined in recipe", undefined_variable=exc.args[0])
return param | [
"Replace any parameters passed as {} in the yaml file with the variable names that are passed in\n\n Only strings, lists of strings, and dictionaries of strings can have\n replaceable values at the moment.\n\n "
] |
Please provide a description of the function:def _extract_variables(param):
variables = set()
if isinstance(param, list):
variables.update(*[_extract_variables(x) for x in param])
elif isinstance(param, dict):
variables.update(*[_extract_variables(x) for x in param.values()])
elif isinstance(param, str):
for match in re.finditer(TEMPLATE_REGEX, param):
if match.group('short_id') is not None:
variables.add(match.group('short_id'))
else:
variables.add(match.group('long_id'))
return variables | [
"Find all template variables in args."
] |
Please provide a description of the function:def _run_step(step_obj, step_declaration, initialized_resources):
start_time = time.time()
# Open any resources that need to be opened before we run this step
for res_name in step_declaration.resources.opened:
initialized_resources[res_name].open()
# Create a dictionary of all of the resources that are required for this step
used_resources = {local_name: initialized_resources[global_name] for local_name, global_name in step_declaration.resources.used.items()}
# Allow steps with no resources to not need a resources keyword parameter
if len(used_resources) > 0:
out = step_obj.run(resources=used_resources)
else:
out = step_obj.run()
# Close any resources that need to be closed before we run this step
for res_name in step_declaration.resources.closed:
initialized_resources[res_name].close()
end_time = time.time()
return (end_time - start_time, out) | [
"Actually run a step."
] |
Please provide a description of the function:def archive(self, output_path):
if self.path is None:
raise ArgumentError("Cannot archive a recipe yet without a reference to its original yaml file in self.path")
outfile = zipfile.ZipFile(output_path, 'w', zipfile.ZIP_DEFLATED)
outfile.write(self.path, arcname="recipe_script.yaml")
written_files = set()
for _factory, args, _resources, files in self.steps:
for arg_name in files:
file_path = args[arg_name]
if file_path in written_files:
continue
if os.path.basename(file_path) != file_path:
raise ArgumentError("Cannot archive a recipe yet that references file not in the same directory as the recipe")
full_path = os.path.join(os.path.dirname(self.path), file_path)
outfile.write(full_path, arcname=file_path)
written_files.add(file_path) | [
"Archive this recipe and all associated files into a .ship archive.\n\n Args:\n output_path (str): The path where the .ship file should be saved.\n "
] |
Please provide a description of the function:def FromArchive(cls, path, actions_dict, resources_dict, temp_dir=None):
if not path.endswith(".ship"):
raise ArgumentError("Attempted to unpack a recipe archive from a file that did not end in .ship", path=path)
name = os.path.basename(path)[:-5]
if temp_dir is None:
temp_dir = tempfile.mkdtemp()
extract_path = os.path.join(temp_dir, name)
archive = zipfile.ZipFile(path, "r")
archive.extractall(extract_path)
recipe_yaml = os.path.join(extract_path, 'recipe_script.yaml')
return cls.FromFile(recipe_yaml, actions_dict, resources_dict, name=name) | [
"Create a RecipeObject from a .ship archive.\n\n This archive should have been generated from a previous call to\n iotile-ship -a <path to yaml file>\n\n or via iotile-build using autobuild_shiparchive().\n\n Args:\n path (str): The path to the recipe file that we wish to load\n actions_dict (dict): A dictionary of named RecipeActionObject\n types that is used to look up all of the steps listed in\n the recipe file.\n resources_dict (dict): A dictionary of named RecipeResource types\n that is used to look up all of the shared resources listed in\n the recipe file.\n file_format (str): The file format of the recipe file. Currently\n we only support yaml.\n temp_dir (str): An optional temporary directory where this archive\n should be unpacked. Otherwise a system wide temporary directory\n is used.\n "
] |
Please provide a description of the function:def FromFile(cls, path, actions_dict, resources_dict, file_format="yaml", name=None):
format_map = {
"yaml": cls._process_yaml
}
format_handler = format_map.get(file_format)
if format_handler is None:
raise ArgumentError("Unknown file format or file extension", file_format=file_format, \
known_formats=[x for x in format_map if format_map[x] is not None])
recipe_info = format_handler(path)
if name is None:
name, _ext = os.path.splitext(os.path.basename(path))
# Validate that the recipe file is correctly formatted
try:
recipe_info = RecipeSchema.verify(recipe_info)
except ValidationError as exc:
raise RecipeFileInvalid("Recipe file does not match expected schema", file=path, error_message=exc.msg, **exc.params)
description = recipe_info.get('description')
# Parse out global default and shared resource information
try:
resources = cls._parse_resource_declarations(recipe_info.get('resources', []), resources_dict)
defaults = cls._parse_variable_defaults(recipe_info.get("defaults", []))
steps = []
for i, action in enumerate(recipe_info.get('actions', [])):
action_name = action.pop('name')
if action_name is None:
raise RecipeFileInvalid("Action is missing required name parameter", \
parameters=action, path=path)
action_class = actions_dict.get(action_name)
if action_class is None:
raise UnknownRecipeActionType("Unknown step specified in recipe", \
action=action_name, step=i + 1, path=path)
# Parse out any resource usage in this step and make sure we only
# use named resources
step_resources = cls._parse_resource_usage(action, declarations=resources)
fixed_files, _variable_files = cls._parse_file_usage(action_class, action)
step = RecipeStep(action_class, action, step_resources, fixed_files)
steps.append(step)
return RecipeObject(name, description, steps, resources, defaults, path)
except RecipeFileInvalid as exc:
cls._future_raise(RecipeFileInvalid, RecipeFileInvalid(exc.msg, recipe=name, **exc.params),
sys.exc_info()[2]) | [
"Create a RecipeObject from a file.\n\n The file should be a specially constructed yaml file that describes\n the recipe as well as the actions that it performs.\n\n Args:\n path (str): The path to the recipe file that we wish to load\n actions_dict (dict): A dictionary of named RecipeActionObject\n types that is used to look up all of the steps listed in\n the recipe file.\n resources_dict (dict): A dictionary of named RecipeResource types\n that is used to look up all of the shared resources listed in\n the recipe file.\n file_format (str): The file format of the recipe file. Currently\n we only support yaml.\n name (str): The name of this recipe if we created it originally from an\n archive.\n "
] |
Please provide a description of the function:def _parse_file_usage(cls, action_class, args):
fixed_files = {}
variable_files = []
if not hasattr(action_class, 'FILES'):
return fixed_files, variable_files
for file_arg in action_class.FILES:
arg_value = args.get(file_arg)
if arg_value is None:
raise RecipeFileInvalid("Action lists a file argument but none was given", declared_argument=file_arg, passed_arguments=args)
variables = _extract_variables(arg_value)
if len(variables) == 0:
fixed_files[file_arg] = arg_value
else:
variable_files.append(arg_value)
return fixed_files, variable_files | [
"Find all external files referenced by an action."
] |
Please provide a description of the function:def _parse_resource_declarations(cls, declarations, resource_map):
resources = {}
for decl in declarations:
name = decl.pop('name')
typename = decl.pop('type')
desc = decl.pop('description', None)
autocreate = decl.pop('autocreate', False)
args = decl
res_type = resource_map.get(typename)
if res_type is None:
raise UnknownRecipeResourceType("Could not find shared resource type", type=typename, name=name)
# If the resource defines an argument schema, make sure we enforce it.
if hasattr(res_type, "ARG_SCHEMA"):
try:
args = res_type.ARG_SCHEMA.verify(args)
except ValidationError as exc:
raise RecipeFileInvalid("Recipe file resource declarttion has invalid parameters", resource=name, error_message=exc.msg, **exc.params)
if name in resources:
raise RecipeFileInvalid("Attempted to add two shared resources with the same name", name=name)
res = ResourceDeclaration(name, resource_map.get(typename), args, autocreate, desc, typename)
resources[name] = res
return resources | [
"Parse out what resources are declared as shared for this recipe."
] |
Please provide a description of the function:def _parse_variable_defaults(cls, defaults):
default_dict = {}
for item in defaults:
key = next(iter(item))
value = item[key]
if key in default_dict:
raise RecipeFileInvalid("Default variable value specified twice", name=key, old_value=default_dict[key], new_value=value)
default_dict[key] = value
return default_dict | [
"Parse out all of the variable defaults."
] |
Please provide a description of the function:def _parse_resource_usage(cls, action_dict, declarations):
raw_used = action_dict.pop('use', [])
opened = [x.strip() for x in action_dict.pop('open_before', [])]
closed = [x.strip() for x in action_dict.pop('close_after', [])]
used = {}
for resource in raw_used:
if 'as' in resource:
global_name, _, local_name = resource.partition('as')
global_name = global_name.strip()
local_name = local_name.strip()
if len(global_name) == 0 or len(local_name) == 0:
raise RecipeFileInvalid("Resource usage specified in action with invalid name using 'as' statement", global_name=global_name, local_name=local_name, statement=resource)
else:
global_name = resource.strip()
local_name = global_name
if local_name in used:
raise RecipeFileInvalid("Resource specified twice for action", args=action_dict, resource=local_name, used_resources=used)
used[local_name] = global_name
# Make sure we only use, open and close declared resources
for name in (x for x in used.values() if x not in declarations):
raise RecipeFileInvalid("Action makes use of non-declared shared resource", name=name)
for name in (x for x in opened if x not in declarations):
raise RecipeFileInvalid("Action specified a non-declared shared resource in open_before", name=name)
for name in (x for x in closed if x not in declarations):
raise RecipeFileInvalid("Action specified a non-declared shared resource in close_after", name=name)
return ResourceUsage(used, opened, closed) | [
"Parse out what resources are used, opened and closed in an action step."
] |
Please provide a description of the function:def prepare(self, variables):
initializedsteps = []
if variables is None:
variables = dict()
for step, params, _resources, _files in self.steps:
new_params = _complete_parameters(params, variables)
initializedsteps.append(step(new_params))
return initializedsteps | [
"Initialize all steps in this recipe using their parameters.\n\n Args:\n variables (dict): A dictionary of global variable definitions\n that may be used to replace or augment the parameters given\n to each step.\n\n Returns:\n list of RecipeActionObject like instances: The list of instantiated\n steps that can be used to execute this recipe.\n "
] |
Please provide a description of the function:def _prepare_resources(self, variables, overrides=None):
if overrides is None:
overrides = {}
res_map = {}
own_map = {}
for decl in self.resources.values():
resource = overrides.get(decl.name)
if resource is None:
args = _complete_parameters(decl.args, variables)
resource = decl.type(args)
own_map[decl.name] = resource
if decl.autocreate:
resource.open()
res_map[decl.name] = resource
return res_map, own_map | [
"Create and optionally open all shared resources."
] |
Please provide a description of the function:def _cleanup_resources(self, initialized_resources):
cleanup_errors = []
# Make sure we clean up all resources that we can and don't error out at the
# first one.
for name, res in initialized_resources.items():
try:
if res.opened:
res.close()
except Exception:
_type, value, traceback = sys.exc_info()
cleanup_errors.append((name, value, traceback))
if len(cleanup_errors) > 0:
raise RecipeResourceManagementError(operation="resource cleanup", errors=cleanup_errors) | [
"Cleanup all resources that we own that are open."
] |
Please provide a description of the function:def run(self, variables=None, overrides=None):
old_dir = os.getcwd()
try:
os.chdir(self.run_directory)
initialized_steps = self.prepare(variables)
owned_resources = {}
try:
print("Running in %s" % self.run_directory)
initialized_resources, owned_resources = self._prepare_resources(variables, overrides)
for i, (step, decl) in enumerate(zip(initialized_steps, self.steps)):
print("===> Step %d: %s\t Description: %s" % (i+1, self.steps[i][0].__name__, \
self.steps[i][1].get('description', '')))
runtime, out = _run_step(step, decl, initialized_resources)
print("======> Time Elapsed: %.2f seconds" % runtime)
if out is not None:
print(out[1])
finally:
self._cleanup_resources(owned_resources)
finally:
os.chdir(old_dir) | [
"Initialize and run this recipe.\n\n By default all necessary shared resources are created and destroyed in\n this function unless you pass them preinitizlied in overrides, in\n which case they are used as is. The overrides parameter is designed\n to allow testability of iotile-ship recipes by inspecting the shared\n resources after the recipe has finished to ensure that it was properly\n set up.\n\n Args:\n variables (dict): An optional dictionary of variable assignments.\n There must be a single assignment for all free variables that\n do not have a default value, otherwise the recipe will not\n run.\n overrides (dict): An optional dictionary of shared resource\n objects that should be used instead of creating that resource\n and destroying it inside this function.\n "
] |
Please provide a description of the function:def generate(env):
c_file, cxx_file = SCons.Tool.createCFileBuilders(env)
# C
c_file.add_action('.y', YaccAction)
c_file.add_emitter('.y', yEmitter)
c_file.add_action('.yacc', YaccAction)
c_file.add_emitter('.yacc', yEmitter)
# Objective-C
c_file.add_action('.ym', YaccAction)
c_file.add_emitter('.ym', ymEmitter)
# C++
cxx_file.add_action('.yy', YaccAction)
cxx_file.add_emitter('.yy', yyEmitter)
env['YACC'] = env.Detect('bison') or 'yacc'
env['YACCFLAGS'] = SCons.Util.CLVar('')
env['YACCCOM'] = '$YACC $YACCFLAGS -o $TARGET $SOURCES'
env['YACCHFILESUFFIX'] = '.h'
env['YACCHXXFILESUFFIX'] = '.hpp'
env['YACCVCGFILESUFFIX'] = '.vcg' | [
"Add Builders and construction variables for yacc to an Environment."
] |
Please provide a description of the function:def generate(env):
SCons.Tool.createSharedLibBuilder(env)
SCons.Tool.createProgBuilder(env)
env['LINK'] = '$CC'
env['LINKFLAGS'] = SCons.Util.CLVar('')
env['LINKCOM'] = '$LINK -q $LINKFLAGS -e$TARGET $SOURCES $LIBS'
env['LIBDIRPREFIX']=''
env['LIBDIRSUFFIX']=''
env['LIBLINKPREFIX']=''
env['LIBLINKSUFFIX']='$LIBSUFFIX' | [
"Add Builders and construction variables for Borland ilink to an\n Environment."
] |
Please provide a description of the function:def find_sdk_dir(self):
if not SCons.Util.can_read_reg:
debug('find_sdk_dir(): can not read registry')
return None
hkey = self.HKEY_FMT % self.hkey_data
debug('find_sdk_dir(): checking registry:{}'.format(hkey))
try:
sdk_dir = common.read_reg(hkey)
except SCons.Util.WinError as e:
debug('find_sdk_dir(): no SDK registry key {}'.format(repr(hkey)))
return None
debug('find_sdk_dir(): Trying SDK Dir: {}'.format(sdk_dir))
if not os.path.exists(sdk_dir):
debug('find_sdk_dir(): {} not on file system'.format(sdk_dir))
return None
ftc = os.path.join(sdk_dir, self.sanity_check_file)
if not os.path.exists(ftc):
debug("find_sdk_dir(): sanity check {} not found".format(ftc))
return None
return sdk_dir | [
"Try to find the MS SDK from the registry.\n\n Return None if failed or the directory does not exist.\n "
] |
Please provide a description of the function:def get_sdk_dir(self):
try:
return self._sdk_dir
except AttributeError:
sdk_dir = self.find_sdk_dir()
self._sdk_dir = sdk_dir
return sdk_dir | [
"Return the MSSSDK given the version string."
] |
Please provide a description of the function:def get_sdk_vc_script(self,host_arch, target_arch):
if (host_arch == 'amd64' and target_arch == 'x86'):
# No cross tools needed compiling 32 bits on 64 bit machine
host_arch=target_arch
arch_string=target_arch
if (host_arch != target_arch):
arch_string='%s_%s'%(host_arch,target_arch)
debug("sdk.py: get_sdk_vc_script():arch_string:%s host_arch:%s target_arch:%s"%(arch_string,
host_arch,
target_arch))
file=self.vc_setup_scripts.get(arch_string,None)
debug("sdk.py: get_sdk_vc_script():file:%s"%file)
return file | [
" Return the script to initialize the VC compiler installed by SDK\n "
] |
Please provide a description of the function:def execute(self, sensor_graph, scope_stack):
parent = scope_stack[-1]
try:
slot = parent.resolve_identifier('current_slot', SlotIdentifier)
except UnresolvedIdentifierError:
raise SensorGraphSemanticError("set config statement used outside of config block")
if self.explicit_type is None or not isinstance(self.identifier, int):
raise SensorGraphSemanticError("Config variable type definitions are not yet supported")
if isinstance(self.value, (bytes, bytearray)) and not self.explicit_type == 'binary':
raise SensorGraphSemanticError("You must pass the binary variable type when using encoded binary data")
if not isinstance(self.value, (bytes, bytearray)) and self.explicit_type == 'binary':
raise SensorGraphSemanticError("You must pass an encoded binary value with binary type config variables")
sensor_graph.add_config(slot, self.identifier, self.explicit_type, self.value) | [
"Execute this statement on the sensor_graph given the current scope tree.\n\n This adds a single config variable assignment to the current sensor graph\n\n Args:\n sensor_graph (SensorGraph): The sensor graph that we are building or\n modifying\n scope_stack (list(Scope)): A stack of nested scopes that may influence\n how this statement allocates clocks or other stream resources.\n "
] |
Please provide a description of the function:def format_rpc(data):
address, rpc_id, args, resp, _status = data
name = rpc_name(rpc_id)
if isinstance(args, (bytes, bytearray)):
arg_str = hexlify(args)
else:
arg_str = repr(args)
if isinstance(resp, (bytes, bytearray)):
resp_str = hexlify(resp)
else:
resp_str = repr(resp)
#FIXME: Check and print status as well
return "%s called on address %d, payload=%s, response=%s" % (name, address, arg_str, resp_str) | [
"Format an RPC call and response.\n\n Args:\n data (tuple): A tuple containing the address, rpc_id, argument and\n response payloads and any error code.\n\n Returns:\n str: The formated RPC string.\n "
] |
Please provide a description of the function:def execute(self, sensor_graph, scope_stack):
self.execute_before(sensor_graph, scope_stack)
for child in self.children:
child.execute(sensor_graph, scope_stack)
self.execute_after(sensor_graph, scope_stack) | [
"Execute this statement on the sensor_graph given the current scope tree.\n\n This function will likely modify the sensor_graph and will possibly\n also add to or remove from the scope_tree. If there are children nodes\n they will be called after execute_before and before execute_after,\n allowing block statements to sandwich their children in setup and teardown\n functions.\n\n Args:\n sensor_graph (SensorGraph): The sensor graph that we are building or\n modifying\n scope_stack (list(Scope)): A stack of nested scopes that may influence\n how this statement allocates clocks or other stream resources.\n "
] |
Please provide a description of the function:async def _cleanup_old_connections(self):
retval = await self._command_task.future_command(['_query_systemstate'])
for conn in retval['active_connections']:
self._logger.info("Forcible disconnecting connection %d", conn)
await self._command_task.future_command(['_disconnect', conn]) | [
"Remove all active connections and query the maximum number of supported connections\n "
] |
Please provide a description of the function:async def start(self):
self._command_task.start()
try:
await self._cleanup_old_connections()
except Exception:
await self.stop()
raise
#FIXME: This is a temporary hack, get the actual device we are serving.
iotile_id = next(iter(self.adapter.devices))
self.device = self.adapter.devices[iotile_id]
self._logger.info("Serving device 0x%04X over BLED112", iotile_id)
await self._update_advertisement()
self.setup_client(self.CLIENT_ID, scan=False, broadcast=True) | [
"Start serving access to devices over bluetooth."
] |
Please provide a description of the function:async def stop(self):
await self._command_task.future_command(['_set_mode', 0, 0]) # Disable advertising
await self._cleanup_old_connections()
self._command_task.stop()
self._stream.stop()
self._serial_port.close()
await super(BLED112Server, self).stop() | [
"Safely shut down this interface"
] |
Please provide a description of the function:async def _call_rpc(self, header):
length, _, cmd, feature, address = struct.unpack("<BBBBB", bytes(header))
rpc_id = (feature << 8) | cmd
payload = self.rpc_payload[:length]
self._logger.debug("Calling RPC %d:%04X with %s", address, rpc_id, binascii.hexlify(payload))
exception = None
response = None
try:
response = await self.send_rpc(self.CLIENT_ID, str(self.device.iotile_id), address, rpc_id, bytes(payload), timeout=30.0)
except VALID_RPC_EXCEPTIONS as err:
exception = err
except Exception as err:
self._logger.exception("Error calling RPC %d:%04X", address, rpc_id)
exception = err
status, response = pack_rpc_response(response, exception)
resp_header = struct.pack("<BBBB", status, 0, 0, len(response))
await self._send_notification(self.ReceiveHeaderHandle, resp_header)
if len(response) > 0:
await self._send_notification(self.ReceivePayloadHandle, response) | [
"Call an RPC given a header and possibly a previously sent payload\n\n Args:\n header (bytearray): The RPC header we should call\n "
] |
Please provide a description of the function:def format_script(sensor_graph):
records = []
records.append(SetGraphOnlineRecord(False, address=8))
records.append(ClearDataRecord(address=8))
records.append(ResetGraphRecord(address=8))
for node in sensor_graph.nodes:
records.append(AddNodeRecord(str(node), address=8))
for streamer in sensor_graph.streamers:
records.append(AddStreamerRecord(streamer, address=8))
for stream, value in sorted(sensor_graph.constant_database.items(), key=lambda x: x[0].encode()):
records.append(SetConstantRecord(stream, value, address=8))
records.append(PersistGraphRecord(address=8))
records.append(ClearConfigVariablesRecord())
for slot in sorted(sensor_graph.config_database, key=lambda x: x.encode()):
for config_id in sorted(sensor_graph.config_database[slot]):
config_type, value = sensor_graph.config_database[slot][config_id]
byte_value = _convert_to_bytes(config_type, value)
records.append(SetConfigRecord(slot, config_id, byte_value))
# If we have an app tag and version set program them in
app_tag = sensor_graph.metadata_database.get('app_tag')
app_version = sensor_graph.metadata_database.get('app_version')
if app_tag is not None:
records.append(SetDeviceTagRecord(app_tag=app_tag, app_version=app_version))
script = UpdateScript(records)
return script.encode() | [
"Create a binary script containing this sensor graph.\n\n This function produces a repeatable script by applying a known sorting\n order to all constants and config variables when iterating over those\n dictionaries.\n\n Args:\n sensor_graph (SensorGraph): the sensor graph that we want to format\n\n Returns:\n bytearray: The binary script data.\n\n "
] |
Please provide a description of the function:def dump(self):
walkers = {}
walkers.update({str(walker.selector): walker.dump() for walker in self._queue_walkers})
walkers.update({str(walker.selector): walker.dump() for walker in self._virtual_walkers})
return {
u'engine': self._engine.dump(),
u'rollover_storage': self._rollover_storage,
u'rollover_streaming': self._rollover_streaming,
u'last_values': {str(stream): reading.asdict() for stream, reading in self._last_values.items()},
u'walkers': walkers
} | [
"Dump the state of this SensorLog.\n\n The purpose of this method is to be able to restore the same state\n later. However there are links in the SensorLog for stream walkers.\n\n So the dump process saves the state of each stream walker and upon\n restore, it looks through the current set of stream walkers and\n restores each one that existed when dump() was called to its state.\n\n Returns:\n dict: The serialized state of this SensorLog.\n "
] |
Please provide a description of the function:def restore(self, state, permissive=False):
self._engine.restore(state.get(u'engine'))
self._last_values = {DataStream.FromString(stream): IOTileReading.FromDict(reading) for
stream, reading in state.get(u"last_values", {}).items()}
self._rollover_storage = state.get(u'rollover_storage', True)
self._rollover_streaming = state.get(u'rollover_streaming', True)
old_walkers = {DataStreamSelector.FromString(selector): dump for selector, dump in
state.get(u"walkers").items()}
for walker in self._virtual_walkers:
if walker.selector in old_walkers:
walker.restore(old_walkers[walker.selector])
elif not permissive:
raise ArgumentError("Cannot restore SensorLog, walker %s exists in restored log but did not exist before" % str(walker.selector))
for walker in self._queue_walkers:
if walker.selector in old_walkers:
walker.restore(old_walkers[walker.selector])
elif not permissive:
raise ArgumentError("Cannot restore SensorLog, walker %s exists in restored log but did not exist before" % str(walker.selector)) | [
"Restore a state previously dumped by a call to dump().\n\n The purpose of this method is to be able to restore a previously\n dumped state. However there are links in the SensorLog for stream\n walkers.\n\n So the restore process looks through the current set of stream walkers\n and restores each one that existed when dump() was called to its\n state. If there are walkers allocated that were not present when\n dump() was called, an exception is raised unless permissive=True,\n in which case they are ignored.\n\n Args:\n state (dict): The previous state to restore, from a prior call\n to dump().\n permissive (bool): Whether to raise an exception is new stream\n walkers are present that do not have dumped contents().\n\n Raises:\n ArgumentError: There are new stream walkers present in the current\n SensorLog and permissive==False.\n "
] |
Please provide a description of the function:def set_rollover(self, area, enabled):
if area == u'streaming':
self._rollover_streaming = enabled
elif area == u'storage':
self._rollover_storage = enabled
else:
raise ArgumentError("You must pass one of 'storage' or 'streaming' to set_rollover", area=area) | [
"Configure whether rollover is enabled for streaming or storage streams.\n\n Normally a SensorLog is used in ring-buffer mode which means that old\n readings are automatically overwritten as needed when new data is saved.\n\n However, you can configure it into fill-stop mode by using:\n set_rollover(\"streaming\"|\"storage\", True|False)\n\n By default rollover is set to True for both streaming and storage and can\n be controlled individually for each one.\n\n Args:\n area (str): Either streaming or storage.\n enabled (bool): Whether to enable or disable rollover.\n "
] |
Please provide a description of the function:def dump_constants(self):
constants = []
for walker in self._virtual_walkers:
if not walker.selector.inexhaustible:
continue
constants.append((walker.selector.as_stream(), walker.reading))
return constants | [
"Dump (stream, value) pairs for all constant streams.\n\n This method walks the internal list of defined stream walkers and\n dumps the current value for all constant streams.\n\n Returns:\n list of (DataStream, IOTileReading): A list of all of the defined constants.\n "
] |
Please provide a description of the function:def watch(self, selector, callback):
if selector not in self._monitors:
self._monitors[selector] = set()
self._monitors[selector].add(callback) | [
"Call a function whenever a stream changes.\n\n Args:\n selector (DataStreamSelector): The selector to watch.\n If this is None, it is treated as a wildcard selector\n that matches every stream.\n callback (callable): The function to call when a new\n reading is pushed. Callback is called as:\n callback(stream, value)\n "
] |
Please provide a description of the function:def create_walker(self, selector, skip_all=True):
if selector.buffered:
walker = BufferedStreamWalker(selector, self._engine, skip_all=skip_all)
self._queue_walkers.append(walker)
return walker
if selector.match_type == DataStream.CounterType:
walker = CounterStreamWalker(selector)
else:
walker = VirtualStreamWalker(selector)
self._virtual_walkers.append(walker)
return walker | [
"Create a stream walker based on the given selector.\n\n This function returns a StreamWalker subclass that will\n remain up to date and allow iterating over and popping readings\n from the stream(s) specified by the selector.\n\n When the stream walker is done, it should be passed to\n destroy_walker so that it is removed from internal lists that\n are used to always keep it in sync.\n\n Args:\n selector (DataStreamSelector): The selector describing the\n streams that we want to iterate over.\n skip_all (bool): Whether to start at the beginning of the data\n or to skip everything and start at the end. Defaults\n to skipping everything. This parameter only has any\n effect on buffered stream selectors.\n\n Returns:\n StreamWalker: A properly updating stream walker with the given selector.\n "
] |
Please provide a description of the function:def destroy_walker(self, walker):
if walker.buffered:
self._queue_walkers.remove(walker)
else:
self._virtual_walkers.remove(walker) | [
"Destroy a previously created stream walker.\n\n Args:\n walker (StreamWalker): The walker to remove from internal updating\n lists.\n "
] |
Please provide a description of the function:def restore_walker(self, dumped_state):
selector_string = dumped_state.get(u'selector')
if selector_string is None:
raise ArgumentError("Invalid stream walker state in restore_walker, missing 'selector' key", state=dumped_state)
selector = DataStreamSelector.FromString(selector_string)
walker = self.create_walker(selector)
walker.restore(dumped_state)
return walker | [
"Restore a stream walker that was previously serialized.\n\n Since stream walkers need to be tracked in an internal list for\n notification purposes, we need to be careful with how we restore\n them to make sure they remain part of the right list.\n\n Args:\n dumped_state (dict): The dumped state of a stream walker\n from a previous call to StreamWalker.dump()\n\n Returns:\n StreamWalker: The correctly restored StreamWalker subclass.\n "
] |
Please provide a description of the function:def clear(self):
for walker in self._virtual_walkers:
walker.skip_all()
self._engine.clear()
for walker in self._queue_walkers:
walker.skip_all()
self._last_values = {} | [
"Clear all data from this sensor_log.\n\n All readings in all walkers are skipped and buffered data is\n destroyed.\n "
] |
Please provide a description of the function:def push(self, stream, reading):
# Make sure the stream is correct
reading = copy.copy(reading)
reading.stream = stream.encode()
if stream.buffered:
output_buffer = stream.output
if self.id_assigner is not None:
reading.reading_id = self.id_assigner(stream, reading)
try:
self._engine.push(reading)
except StorageFullError:
# If we are in fill-stop mode, don't auto erase old data.
if (stream.output and not self._rollover_streaming) or (not stream.output and not self._rollover_storage):
raise
self._erase_buffer(stream.output)
self._engine.push(reading)
for walker in self._queue_walkers:
# Only notify the walkers that are on this queue
if walker.selector.output == output_buffer:
walker.notify_added(stream)
# Activate any monitors we have for this stream
for selector in self._monitors:
if selector is None or selector.matches(stream):
for callback in self._monitors[selector]:
callback(stream, reading)
# Virtual streams live only in their walkers, so update each walker
# that contains this stream.
for walker in self._virtual_walkers:
if walker.matches(stream):
walker.push(stream, reading)
self._last_values[stream] = reading | [
"Push a reading into a stream, updating any associated stream walkers.\n\n Args:\n stream (DataStream): the stream to push the reading into\n reading (IOTileReading): the reading to push\n "
] |
Please provide a description of the function:def _erase_buffer(self, output_buffer):
erase_size = self._model.get(u'buffer_erase_size')
buffer_type = u'storage'
if output_buffer:
buffer_type = u'streaming'
old_readings = self._engine.popn(buffer_type, erase_size)
# Now go through all of our walkers that could match and
# update their availability counts and data buffer pointers
for reading in old_readings:
stream = DataStream.FromEncoded(reading.stream)
for walker in self._queue_walkers:
# Only notify the walkers that are on this queue
if walker.selector.output == output_buffer:
walker.notify_rollover(stream) | [
"Erase readings in the specified buffer to make space."
] |
Please provide a description of the function:def inspect_last(self, stream, only_allocated=False):
if only_allocated:
found = False
for walker in self._virtual_walkers:
if walker.matches(stream):
found = True
break
if not found:
raise UnresolvedIdentifierError("inspect_last could not find an allocated virtual streamer for the desired stream", stream=stream)
if stream in self._last_values:
return self._last_values[stream]
raise StreamEmptyError(u"inspect_last called on stream that has never been written to", stream=stream) | [
"Return the last value pushed into a stream.\n\n This function works even if the stream is virtual and no\n virtual walker has been created for it. It is primarily\n useful to aid in debugging sensor graphs.\n\n Args:\n stream (DataStream): The stream to inspect.\n only_allocated (bool): Optional parameter to only allow inspection\n of allocated virtual streams. This is useful for mimicking the\n behavior of an embedded device that does not have a _last_values\n array.\n\n Returns:\n IOTileReading: The data in the stream\n\n Raises:\n StreamEmptyError: if there has never been data written to\n the stream.\n UnresolvedIdentifierError: if only_allocated is True and there has not\n been a virtual stream walker allocated to listen to this stream.\n "
] |
Please provide a description of the function:def _run_exitfuncs():
while _exithandlers:
func, targs, kargs = _exithandlers.pop()
func(*targs, **kargs) | [
"run any registered exit functions\n\n _exithandlers is traversed in reverse order so functions are executed\n last in, first out.\n "
] |
Please provide a description of the function:def generate(env):
link.generate(env)
if env['PLATFORM'] == 'hpux':
env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS -shared -fPIC')
# __RPATH is set to $_RPATH in the platform specification if that
# platform supports it.
env['RPATHPREFIX'] = '-Wl,-rpath='
env['RPATHSUFFIX'] = ''
env['_RPATH'] = '${_concat(RPATHPREFIX, RPATH, RPATHSUFFIX, __env__)}'
# OpenBSD doesn't usually use SONAME for libraries
use_soname = not sys.platform.startswith('openbsd')
link._setup_versioned_lib_variables(env, tool = 'gnulink', use_soname = use_soname)
env['LINKCALLBACKS'] = link._versioned_lib_callbacks()
# For backward-compatibility with older SCons versions
env['SHLIBVERSIONFLAGS'] = SCons.Util.CLVar('-Wl,-Bsymbolic') | [
"Add Builders and construction variables for gnulink to an Environment."
] |
Please provide a description of the function:def _windowsLdmodTargets(target, source, env, for_signature):
return _dllTargets(target, source, env, for_signature, 'LDMODULE') | [
"Get targets for loadable modules."
] |
Please provide a description of the function:def _windowsLdmodSources(target, source, env, for_signature):
return _dllSources(target, source, env, for_signature, 'LDMODULE') | [
"Get sources for loadable modules."
] |
Please provide a description of the function:def _dllEmitter(target, source, env, paramtp):
SCons.Tool.msvc.validate_vars(env)
extratargets = []
extrasources = []
dll = env.FindIxes(target, '%sPREFIX' % paramtp, '%sSUFFIX' % paramtp)
no_import_lib = env.get('no_import_lib', 0)
if not dll:
raise SCons.Errors.UserError('A shared library should have exactly one target with the suffix: %s' % env.subst('$%sSUFFIX' % paramtp))
insert_def = env.subst("$WINDOWS_INSERT_DEF")
if not insert_def in ['', '0', 0] and \
not env.FindIxes(source, "WINDOWSDEFPREFIX", "WINDOWSDEFSUFFIX"):
# append a def file to the list of sources
extrasources.append(
env.ReplaceIxes(dll,
'%sPREFIX' % paramtp, '%sSUFFIX' % paramtp,
"WINDOWSDEFPREFIX", "WINDOWSDEFSUFFIX"))
version_num, suite = SCons.Tool.msvs.msvs_parse_version(env.get('MSVS_VERSION', '6.0'))
if version_num >= 8.0 and \
(env.get('WINDOWS_INSERT_MANIFEST', 0) or env.get('WINDOWS_EMBED_MANIFEST', 0)):
# MSVC 8 and above automatically generate .manifest files that must be installed
extratargets.append(
env.ReplaceIxes(dll,
'%sPREFIX' % paramtp, '%sSUFFIX' % paramtp,
"WINDOWSSHLIBMANIFESTPREFIX", "WINDOWSSHLIBMANIFESTSUFFIX"))
if 'PDB' in env and env['PDB']:
pdb = env.arg2nodes('$PDB', target=target, source=source)[0]
extratargets.append(pdb)
target[0].attributes.pdb = pdb
if version_num >= 11.0 and env.get('PCH', 0):
# MSVC 11 and above need the PCH object file to be added to the link line,
# otherwise you get link error LNK2011.
pchobj = SCons.Util.splitext(str(env['PCH']))[0] + '.obj'
# print "prog_emitter, version %s, appending pchobj %s"%(version_num, pchobj)
if pchobj not in extrasources:
extrasources.append(pchobj)
if not no_import_lib and \
not env.FindIxes(target, "LIBPREFIX", "LIBSUFFIX"):
# Append an import library to the list of targets.
extratargets.append(
env.ReplaceIxes(dll,
'%sPREFIX' % paramtp, '%sSUFFIX' % paramtp,
"LIBPREFIX", "LIBSUFFIX"))
# and .exp file is created if there are exports from a DLL
extratargets.append(
env.ReplaceIxes(dll,
'%sPREFIX' % paramtp, '%sSUFFIX' % paramtp,
"WINDOWSEXPPREFIX", "WINDOWSEXPSUFFIX"))
return (target+extratargets, source+extrasources) | [
"Common implementation of dll emitter."
] |
Please provide a description of the function:def embedManifestDllCheck(target, source, env):
if env.get('WINDOWS_EMBED_MANIFEST', 0):
manifestSrc = target[0].get_abspath() + '.manifest'
if os.path.exists(manifestSrc):
ret = (embedManifestDllAction) ([target[0]],None,env)
if ret:
raise SCons.Errors.UserError("Unable to embed manifest into %s" % (target[0]))
return ret
else:
print('(embed: no %s.manifest found; not embedding.)'%str(target[0]))
return 0 | [
"Function run by embedManifestDllCheckAction to check for existence of manifest\n and other conditions, and embed the manifest by calling embedManifestDllAction if so."
] |
Please provide a description of the function:def embedManifestExeCheck(target, source, env):
if env.get('WINDOWS_EMBED_MANIFEST', 0):
manifestSrc = target[0].get_abspath() + '.manifest'
if os.path.exists(manifestSrc):
ret = (embedManifestExeAction) ([target[0]],None,env)
if ret:
raise SCons.Errors.UserError("Unable to embed manifest into %s" % (target[0]))
return ret
else:
print('(embed: no %s.manifest found; not embedding.)'%str(target[0]))
return 0 | [
"Function run by embedManifestExeCheckAction to check for existence of manifest\n and other conditions, and embed the manifest by calling embedManifestExeAction if so."
] |
Please provide a description of the function:def generate(env):
SCons.Tool.createSharedLibBuilder(env)
SCons.Tool.createProgBuilder(env)
env['SHLINK'] = '$LINK'
env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS /dll')
env['_SHLINK_TARGETS'] = windowsShlinkTargets
env['_SHLINK_SOURCES'] = windowsShlinkSources
env['SHLINKCOM'] = compositeShLinkAction
env.Append(SHLIBEMITTER = [windowsLibEmitter])
env.Append(LDMODULEEMITTER = [windowsLibEmitter])
env['LINK'] = 'link'
env['LINKFLAGS'] = SCons.Util.CLVar('/nologo')
env['_PDB'] = pdbGenerator
env['LINKCOM'] = compositeLinkAction
env.Append(PROGEMITTER = [prog_emitter])
env['LIBDIRPREFIX']='/LIBPATH:'
env['LIBDIRSUFFIX']=''
env['LIBLINKPREFIX']=''
env['LIBLINKSUFFIX']='$LIBSUFFIX'
env['WIN32DEFPREFIX'] = ''
env['WIN32DEFSUFFIX'] = '.def'
env['WIN32_INSERT_DEF'] = 0
env['WINDOWSDEFPREFIX'] = '${WIN32DEFPREFIX}'
env['WINDOWSDEFSUFFIX'] = '${WIN32DEFSUFFIX}'
env['WINDOWS_INSERT_DEF'] = '${WIN32_INSERT_DEF}'
env['WIN32EXPPREFIX'] = ''
env['WIN32EXPSUFFIX'] = '.exp'
env['WINDOWSEXPPREFIX'] = '${WIN32EXPPREFIX}'
env['WINDOWSEXPSUFFIX'] = '${WIN32EXPSUFFIX}'
env['WINDOWSSHLIBMANIFESTPREFIX'] = ''
env['WINDOWSSHLIBMANIFESTSUFFIX'] = '${SHLIBSUFFIX}.manifest'
env['WINDOWSPROGMANIFESTPREFIX'] = ''
env['WINDOWSPROGMANIFESTSUFFIX'] = '${PROGSUFFIX}.manifest'
env['REGSVRACTION'] = regServerCheck
env['REGSVR'] = os.path.join(SCons.Platform.win32.get_system_root(),'System32','regsvr32')
env['REGSVRFLAGS'] = '/s '
env['REGSVRCOM'] = '$REGSVR $REGSVRFLAGS ${TARGET.windows}'
env['WINDOWS_EMBED_MANIFEST'] = 0
env['MT'] = 'mt'
#env['MTFLAGS'] = ['-hashupdate']
env['MTFLAGS'] = SCons.Util.CLVar('/nologo')
# Note: use - here to prevent build failure if no manifest produced.
# This seems much simpler than a fancy system using a function action to see
# if the manifest actually exists before trying to run mt with it.
env['MTEXECOM'] = '-$MT $MTFLAGS -manifest ${TARGET}.manifest $_MANIFEST_SOURCES -outputresource:$TARGET;1'
env['MTSHLIBCOM'] = '-$MT $MTFLAGS -manifest ${TARGET}.manifest $_MANIFEST_SOURCES -outputresource:$TARGET;2'
# TODO Future work garyo 27-Feb-11
env['_MANIFEST_SOURCES'] = None # _windowsManifestSources
# Set-up ms tools paths
msvc_setup_env_once(env)
# Loadable modules are on Windows the same as shared libraries, but they
# are subject to different build parameters (LDMODULE* variables).
# Therefore LDMODULE* variables correspond as much as possible to
# SHLINK*/SHLIB* ones.
SCons.Tool.createLoadableModuleBuilder(env)
env['LDMODULE'] = '$SHLINK'
env['LDMODULEPREFIX'] = '$SHLIBPREFIX'
env['LDMODULESUFFIX'] = '$SHLIBSUFFIX'
env['LDMODULEFLAGS'] = '$SHLINKFLAGS'
env['_LDMODULE_TARGETS'] = _windowsLdmodTargets
env['_LDMODULE_SOURCES'] = _windowsLdmodSources
env['LDMODULEEMITTER'] = [ldmodEmitter]
env['LDMODULECOM'] = compositeLdmodAction | [
"Add Builders and construction variables for ar to an Environment."
] |
Please provide a description of the function:def DviPsStrFunction(target = None, source= None, env=None):
if env.GetOption("no_exec"):
result = env.subst('$PSCOM',0,target,source)
else:
result = ''
return result | [
"A strfunction for dvipdf that returns the appropriate\n command string for the no_exec options."
] |
Please provide a description of the function:def generate(env):
global PSAction
if PSAction is None:
PSAction = SCons.Action.Action('$PSCOM', '$PSCOMSTR')
global DVIPSAction
if DVIPSAction is None:
DVIPSAction = SCons.Action.Action(DviPsFunction, strfunction = DviPsStrFunction)
global PSBuilder
if PSBuilder is None:
PSBuilder = SCons.Builder.Builder(action = PSAction,
prefix = '$PSPREFIX',
suffix = '$PSSUFFIX',
src_suffix = '.dvi',
src_builder = 'DVI',
single_source=True)
env['BUILDERS']['PostScript'] = PSBuilder
env['DVIPS'] = 'dvips'
env['DVIPSFLAGS'] = SCons.Util.CLVar('')
# I'm not quite sure I got the directories and filenames right for variant_dir
# We need to be in the correct directory for the sake of latex \includegraphics eps included files.
env['PSCOM'] = 'cd ${TARGET.dir} && $DVIPS $DVIPSFLAGS -o ${TARGET.file} ${SOURCE.file}'
env['PSPREFIX'] = ''
env['PSSUFFIX'] = '.ps' | [
"Add Builders and construction variables for dvips to an Environment."
] |
Please provide a description of the function:def execute(self, sensor_graph, scope_stack):
streamer = DataStreamer(self.selector, self.dest, self.report_format, self.auto, report_type=self.report_type, with_other=self.with_other)
sensor_graph.add_streamer(streamer) | [
"Execute this statement on the sensor_graph given the current scope tree.\n\n This adds a single DataStreamer to the current sensor graph\n\n Args:\n sensor_graph (SensorGraph): The sensor graph that we are building or\n modifying\n scope_stack (list(Scope)): A stack of nested scopes that may influence\n how this statement allocates clocks or other stream resources.\n "
] |
Please provide a description of the function:def build_program(tile, elfname, chip, patch=True):
dirs = chip.build_dirs()
output_name = '%s_%s.elf' % (elfname, chip.arch_name(),)
output_binname = '%s_%s.bin' % (elfname, chip.arch_name(),)
patched_name = '%s_%s_patched.elf' % (elfname, chip.arch_name(),)
patchfile_name = '%s_%s_patchcommand.txt' % (elfname, chip.arch_name(),)
map_name = '%s_%s.map' % (elfname, chip.arch_name(),)
VariantDir(dirs['build'], os.path.join('firmware', 'src'), duplicate=0)
args_file = os.path.join('.', dirs['build'], 'gcc_args.txt')
prog_env = setup_environment(chip, args_file=args_file)
prog_env['OUTPUT'] = output_name
prog_env['BUILD_DIR'] = dirs['build']
prog_env['OUTPUT_PATH'] = os.path.join(dirs['build'], output_name)
prog_env['OUTPUTBIN'] = os.path.join(dirs['build'], output_binname)
prog_env['PATCHED'] = os.path.join(dirs['build'], patched_name)
prog_env['PATCH_FILE'] = os.path.join(dirs['build'], patchfile_name)
prog_env['PATCH_FILENAME'] = patchfile_name
prog_env['MODULE'] = elfname
# Setup all of our dependencies and make sure our output depends on them being built
tilebus_defs = setup_dependencies(tile, prog_env)
# Setup specific linker flags for building a program
# Specify the linker script
# We can find a linker script in one of two places, either in a dependency or in an explicit 'linker' property
# First check for a linker script in our dependencies
ldscripts = list(itertools.chain(*[x.find_products('linker_script') for x in prog_env['DEPENDENCIES']]))
# Make sure we don't have multiple linker scripts coming in from dependencies
if len(ldscripts) > 1:
raise BuildError("Multiple linker scripts included from dependencies, at most one may be included",
linker_scripts=ldscripts)
# Make sure we don't have a linker script from a dependency and explicity specified
if len(ldscripts) == 1 and chip.property('linker', None) is not None:
raise BuildError("Linker script specified in dependency and explicitly in module_settings",
explicit_script=chip.property('linker'), dependency_script=ldscripts[0])
if len(ldscripts) == 1:
ldscript = ldscripts[0]
else:
ldscript = utilities.join_path(chip.property('linker'))
# Find the linker script directory in case it includes other linker scripts in the same directory
lddir = os.path.abspath(os.path.dirname(ldscript))
prog_env['LIBPATH'] += [lddir]
prog_env['LINKFLAGS'].append('-T"%s"' % ldscript)
# Specify the output map file
prog_env['LINKFLAGS'].extend(['-Xlinker', '-Map="%s"' % os.path.join(dirs['build'], map_name)])
Clean(os.path.join(dirs['build'], output_name), [os.path.join(dirs['build'], map_name)])
# Compile the TileBus command and config variable definitions
# Try to use the modern 'tilebus' directory or the old 'cdb' directory
tbname = os.path.join('firmware', 'src', 'tilebus', prog_env["MODULE"] + ".bus")
if not os.path.exists(tbname):
tbname = os.path.join('firmware', 'src', 'cdb', prog_env["MODULE"] + ".cdb")
compile_tilebus(tilebus_defs + [tbname], prog_env)
# Ensure that our argument file to gcc is created
args_node = prog_env.Command([args_file], [],
action=prog_env.Action(create_arg_file, "Creating GCC Arguments"))
prog_env.AlwaysBuild(args_node)
# Compile an elf for the firmware image
objs = SConscript(os.path.join(dirs['build'], 'SConscript'), exports='prog_env')
for obj in objs:
Depends(obj, args_file)
outfile = prog_env.Program(os.path.join(dirs['build'], prog_env['OUTPUT']), objs)
if patch:
# Create a patched ELF including a proper checksum
# First create a binary dump of the program flash
outbin = prog_env.Command(prog_env['OUTPUTBIN'], os.path.join(dirs['build'], prog_env['OUTPUT']),
"arm-none-eabi-objcopy -O binary $SOURCES $TARGET")
# Now create a command file containing the linker command needed to patch the elf
outhex = prog_env.Command(prog_env['PATCH_FILE'], outbin, action=prog_env.Action(checksum_creation_action,
"Generating checksum file"))
# Next relink a new version of the binary using that patch file to define the image checksum
patch_env = prog_env.Clone()
patch_env['LINKFLAGS'].append(['-Xlinker', '@%s' % patch_env['PATCH_FILE']])
patched_file = patch_env.Program(prog_env['PATCHED'], objs)
patch_env.Depends(patched_file, [os.path.join(dirs['build'], output_name), patch_env['PATCH_FILE']])
prog_env.Depends(os.path.join(dirs['build'], output_name), [ldscript])
prog_env.InstallAs(os.path.join(dirs['output'], output_name), os.path.join(dirs['build'], patched_name))
else:
prog_env.InstallAs(os.path.join(dirs['output'], output_name), outfile)
prog_env.InstallAs(os.path.join(dirs['output'], map_name), os.path.join(dirs['build'], map_name))
return os.path.join(dirs['output'], output_name) | [
"\n Build an ARM cortex executable\n "
] |
Please provide a description of the function:def build_library(tile, libname, chip):
dirs = chip.build_dirs()
output_name = '%s_%s.a' % (libname, chip.arch_name())
# Support both firmware/src and just src locations for source code
if os.path.exists('firmware'):
VariantDir(dirs['build'], os.path.join('firmware', 'src'), duplicate=0)
else:
VariantDir(dirs['build'], 'src', duplicate=0)
library_env = setup_environment(chip)
library_env['OUTPUT'] = output_name
library_env['OUTPUT_PATH'] = os.path.join(dirs['build'], output_name)
library_env['BUILD_DIR'] = dirs['build']
# Check for any dependencies this library has
tilebus_defs = setup_dependencies(tile, library_env)
# Create header files for all tilebus config variables and commands that are defined in ourselves
# or in our dependencies
tilebus_defs += tile.find_products('tilebus_definitions')
compile_tilebus(tilebus_defs, library_env, header_only=True)
SConscript(os.path.join(dirs['build'], 'SConscript'), exports='library_env')
library_env.InstallAs(os.path.join(dirs['output'], output_name), os.path.join(dirs['build'], output_name))
# See if we should copy any files over to the output:
for src, dst in chip.property('copy_files', []):
srcpath = os.path.join(*src)
destpath = os.path.join(dirs['output'], dst)
library_env.InstallAs(destpath, srcpath)
return os.path.join(dirs['output'], output_name) | [
"Build a static ARM cortex library"
] |
Please provide a description of the function:def setup_environment(chip, args_file=None):
config = ConfigManager()
# Make sure we never get MSVC settings for windows since that has the wrong command line flags for gcc
if platform.system() == 'Windows':
env = Environment(tools=['mingw'], ENV=os.environ)
else:
env = Environment(tools=['default'], ENV=os.environ)
env['INCPREFIX'] = '-I"'
env['INCSUFFIX'] = '"'
env['CPPDEFPREFIX'] = ''
env['CPPDEFSUFFIX'] = ''
env['CPPPATH'] = chip.includes()
env['ARCH'] = chip
# Setup Cross Compiler
env['CC'] = 'arm-none-eabi-gcc'
env['AS'] = 'arm-none-eabi-gcc'
env['LINK'] = 'arm-none-eabi-gcc'
env['AR'] = 'arm-none-eabi-ar'
env['RANLIB'] = 'arm-none-eabi-ranlib'
# AS command line is by default setup for call as directly so we need
# to modify it to call via *-gcc to allow for preprocessing
env['ASCOM'] = "$AS $ASFLAGS -o $TARGET -c $SOURCES"
# Setup nice display strings unless we're asked to show raw commands
if not config.get('build:show-commands'):
env['CCCOMSTR'] = "Compiling $TARGET"
env['ARCOMSTR'] = "Building static library $TARGET"
env['RANLIBCOMSTR'] = "Indexing static library $TARGET"
env['LINKCOMSTR'] = "Linking $TARGET"
# Setup Compiler Flags
env['CCFLAGS'] = chip.combined_properties('cflags')
env['LINKFLAGS'] = chip.combined_properties('ldflags')
env['ARFLAGS'].append(chip.combined_properties('arflags')) # There are default ARFLAGS that are necessary to keep
env['ASFLAGS'].append(chip.combined_properties('asflags'))
# Add in compile tile definitions
defines = utilities.build_defines(chip.property('defines', {}))
env['CPPDEFINES'] = defines
if args_file is not None:
env['CCCOM'] = "$CC $CCFLAGS $CPPFLAGS @{} -c -o $TARGET $SOURCES".format(args_file)
# Setup Target Architecture
env['CCFLAGS'].append('-mcpu=%s' % chip.property('cpu'))
env['ASFLAGS'].append('-mcpu=%s' % chip.property('cpu'))
env['LINKFLAGS'].append('-mcpu=%s' % chip.property('cpu'))
# Initialize library paths (all libraries are added via dependencies)
env['LIBPATH'] = []
env['LIBS'] = []
return env | [
"Setup the SCons environment for compiling arm cortex code.\n\n This will return an env that has all of the correct settings and create a\n command line arguments file for GCC that contains all of the required\n flags. The use of a command line argument file passed with @./file_path is\n important since there can be many flags that exceed the maximum allowed length\n of a command line on Windows.\n "
] |
Please provide a description of the function:def compile_tilebus(files, env, outdir=None, header_only=False):
if outdir is None:
dirs = env["ARCH"].build_dirs()
outdir = dirs['build']
cmdmap_c_path = os.path.join(outdir, 'command_map_c.c')
cmdmap_h_path = os.path.join(outdir, 'command_map_c.h')
config_c_path = os.path.join(outdir, 'config_variables_c.c')
config_h_path = os.path.join(outdir, 'config_variables_c.h')
if header_only:
return env.Command([cmdmap_h_path, config_h_path], files,
action=env.Action(tb_h_file_creation, "Creating header files from TileBus definitions"))
else:
env['MIBFILE'] = '#' + cmdmap_c_path
return env.Command([cmdmap_c_path, cmdmap_h_path, config_c_path, config_h_path], files,
action=env.Action(tb_c_file_creation, "Compiling TileBus commands and config variables")) | [
"Given a path to a *.cdb file, process it and generate c tables and/or headers containing the information."
] |
Please provide a description of the function:def tb_c_file_creation(target, source, env):
files = [str(x) for x in source]
try:
desc = TBDescriptor(files)
except pyparsing.ParseException as e:
raise BuildError("Could not parse tilebus file", parsing_exception=e)
block = desc.get_block()
block.render_template(block.CommandFileTemplate, out_path=str(target[0]))
block.render_template(block.CommandHeaderTemplate, out_path=str(target[1]))
block.render_template(block.ConfigFileTemplate, out_path=str(target[2]))
block.render_template(block.ConfigHeaderTemplate, out_path=str(target[3])) | [
"Compile tilebus file into a .h/.c pair for compilation into an ARM object"
] |
Please provide a description of the function:def tb_h_file_creation(target, source, env):
files = [str(x) for x in source]
try:
desc = TBDescriptor(files)
except pyparsing.ParseException as e:
raise BuildError("Could not parse tilebus file", parsing_exception=e)
block = desc.get_block(config_only=True)
block.render_template(block.CommandHeaderTemplate, out_path=str(target[0]))
block.render_template(block.ConfigHeaderTemplate, out_path=str(target[1])) | [
"Compile tilebus file into only .h files corresponding to config variables for inclusion in a library"
] |
Please provide a description of the function:def checksum_creation_action(target, source, env):
# Important Notes:
# There are apparently many ways to calculate a CRC-32 checksum, we use the following options
# Initial seed value prepended to the input: 0xFFFFFFFF
# Whether the input is fed into the shift register least-significant bit or most-significant bit first: LSB
# Whether each data word is inverted: No
# Whether the final CRC value is inverted: No
# *These settings must agree between the executive and this function*
import crcmod
crc32_func = crcmod.mkCrcFun(0x104C11DB7, initCrc=0xFFFFFFFF, rev=False, xorOut=0)
with open(str(source[0]), 'rb') as f:
data = f.read()
# Ignore the last four bytes of the file since that is where the checksum will go
data = data[:-4]
# Make sure the magic number is correct so that we're dealing with an actual firmware image
magicbin = data[-4:]
magic, = struct.unpack('<L', magicbin)
if magic != 0xBAADDAAD:
raise BuildError("Attempting to patch a file that is not a CDB binary or has the wrong size", reason="invalid magic number found", actual_magic=magic, desired_magic=0xBAADDAAD)
# Calculate CRC32 in the same way as its done in the target microcontroller
checksum = crc32_func(data) & 0xFFFFFFFF
with open(str(target[0]), 'w') as f:
# hex strings end with L on windows and possibly some other systems
checkhex = hex(checksum)
if checkhex[-1] == 'L':
checkhex = checkhex[:-1]
f.write("--defsym=__image_checksum=%s\n" % checkhex) | [
"Create a linker command file for patching an application checksum into a firmware image"
] |
Please provide a description of the function:def create_arg_file(target, source, env):
output_name = str(target[0])
with open(output_name, "w") as outfile:
for define in env.get('CPPDEFINES', []):
outfile.write(define + '\n')
include_folders = target[0].RDirs(tuple(env.get('CPPPATH', [])))
include_folders.append('.')
for include_folder in include_folders:
include_folder = str(include_folder)
if not include_folder.startswith('build'):
include_folder = os.path.join('firmware', 'src', include_folder)
outfile.write('"-I{}"\n'.format(include_folder.replace('\\', '\\\\'))) | [
"Create an argument file containing -I and -D arguments to gcc.\n\n This file will be passed to gcc using @<path>.\n "
] |
Please provide a description of the function:def merge_hex_executables(target, source, env):
output_name = str(target[0])
hex_final = IntelHex()
for image in source:
file = str(image)
root, ext = os.path.splitext(file)
file_format = ext[1:]
if file_format == 'elf':
file = root + '.hex'
hex_data = IntelHex(file)
# merge will throw errors on mismatched Start Segment Addresses, which we don't need
# See <https://stackoverflow.com/questions/26295776/what-are-the-intel-hex-records-type-03-or-05-doing-in-ihex-program-for-arm>
hex_data.start_addr = None
hex_final.merge(hex_data, overlap='error')
with open(output_name, 'w') as outfile:
hex_final.write_hex_file(outfile) | [
"Combine all hex files into a singular executable file."
] |
Please provide a description of the function:def ensure_image_is_hex(input_path):
family = utilities.get_family('module_settings.json')
target = family.platform_independent_target()
build_dir = target.build_dirs()['build']
if platform.system() == 'Windows':
env = Environment(tools=['mingw'], ENV=os.environ)
else:
env = Environment(tools=['default'], ENV=os.environ)
input_path = str(input_path)
image_name = os.path.basename(input_path)
root, ext = os.path.splitext(image_name)
if len(ext) == 0:
raise BuildError("Unknown file format or missing file extension in ensure_image_is_hex", file_name=input_path)
file_format = ext[1:]
if file_format == 'hex':
return input_path
if file_format == 'elf':
new_file = os.path.join(build_dir, root + '.hex')
if new_file not in CONVERTED_HEX_FILES:
env.Command(new_file, input_path, action=Action("arm-none-eabi-objcopy -O ihex $SOURCE $TARGET",
"Creating intel hex file from: $SOURCE"))
CONVERTED_HEX_FILES.add(new_file)
return new_file
raise BuildError("Unknown file format extension in ensure_image_is_hex",
file_name=input_path, extension=file_format) | [
"Return a path to a hex version of a firmware image.\n\n If the input file is already in hex format then input_path\n is returned and nothing is done. If it is not in hex format\n then an SCons action is added to convert it to hex and the\n target output file path is returned.\n\n A cache is kept so that each file is only converted once.\n\n Args:\n input_path (str): A path to a firmware image.\n\n Returns:\n str: The path to a hex version of input_path, this may\n be equal to input_path if it is already in hex format.\n "
] |
Please provide a description of the function:def _dispatch_rpc(self, address, rpc_id, arg_payload):
if self.emulator.is_tile_busy(address):
self._track_change('device.rpc_busy_response', (address, rpc_id, arg_payload, None, None), formatter=format_rpc)
raise BusyRPCResponse()
try:
# Send the RPC immediately and wait for the response
resp = super(EmulatedDevice, self).call_rpc(address, rpc_id, arg_payload)
self._track_change('device.rpc_sent', (address, rpc_id, arg_payload, resp, None), formatter=format_rpc)
return resp
except AsynchronousRPCResponse:
self._track_change('device.rpc_started', (address, rpc_id, arg_payload, None, None), formatter=format_rpc)
raise
except Exception as exc:
self._track_change('device.rpc_exception', (address, rpc_id, arg_payload, None, exc), formatter=format_rpc)
raise | [
"Background work queue handler to dispatch RPCs."
] |
Please provide a description of the function:def finish_async_rpc(self, address, rpc_id, response):
try:
self.emulator.finish_async_rpc(address, rpc_id, response)
self._track_change('device.rpc_finished', (address, rpc_id, None, response, None), formatter=format_rpc)
except Exception as exc:
self._track_change('device.rpc_exception', (address, rpc_id, None, response, exc), formatter=format_rpc)
raise | [
"Finish a previous asynchronous RPC.\n\n This method should be called by a peripheral tile that previously\n had an RPC called on it and chose to response asynchronously by\n raising ``AsynchronousRPCResponse`` in the RPC handler itself.\n\n The response passed to this function will be returned to the caller\n as if the RPC had returned it immediately.\n\n The rpc response will be sent in the RPC thread. By default this\n method will block until the response is finished. If you don't\n want to block, you can pass sync=False\n\n Args:\n address (int): The tile address the RPC was called on.\n rpc_id (int): The ID of the RPC that was called.\n response (bytes): The bytes that should be returned to\n the caller of the RPC.\n "
] |
Please provide a description of the function:def start(self, channel=None):
super(EmulatedDevice, self).start(channel)
self.emulator.start() | [
"Start this emulated device.\n\n This triggers the controller to call start on all peripheral tiles in\n the device to make sure they start after the controller does and then\n it waits on each one to make sure they have finished initializing\n before returning.\n\n Args:\n channel (IOTilePushChannel): the channel with a stream and trace\n routine for streaming and tracing data through a VirtualInterface\n "
] |
Please provide a description of the function:def dump_state(self):
state = {}
state['tile_states'] = {}
for address, tile in self._tiles.items():
state['tile_states'][address] = tile.dump_state()
return state | [
"Dump the current state of this emulated object as a dictionary.\n\n Returns:\n dict: The current state of the object that could be passed to load_state.\n "
] |
Please provide a description of the function:def rpc(self, address, rpc_id, *args, **kwargs):
if isinstance(rpc_id, RPCDeclaration):
arg_format = rpc_id.arg_format
resp_format = rpc_id.resp_format
rpc_id = rpc_id.rpc_id
else:
arg_format = kwargs.get('arg_format', None)
resp_format = kwargs.get('resp_format', None)
arg_payload = b''
if arg_format is not None:
arg_payload = pack_rpc_payload(arg_format, args)
self._logger.debug("Sending rpc to %d:%04X, payload=%s", address, rpc_id, args)
resp_payload = self.call_rpc(address, rpc_id, arg_payload)
if resp_format is None:
return []
resp = unpack_rpc_payload(resp_format, resp_payload)
return resp | [
"Immediately dispatch an RPC inside this EmulatedDevice.\n\n This function is meant to be used for testing purposes as well as by\n tiles inside a complex EmulatedDevice subclass that need to\n communicate with each other. It should only be called from the main\n virtual device thread where start() was called from.\n\n **Background workers may not call this method since it may cause them to deadlock.**\n\n Args:\n address (int): The address of the tile that has the RPC.\n rpc_id (int): The 16-bit id of the rpc we want to call\n *args: Any required arguments for the RPC as python objects.\n **kwargs: Only two keyword arguments are supported:\n - arg_format: A format specifier for the argument list\n - result_format: A format specifier for the result\n\n Returns:\n list: A list of the decoded response members from the RPC.\n "
] |
Please provide a description of the function:def call_rpc(self, address, rpc_id, payload=b""):
return self.emulator.call_rpc_external(address, rpc_id, payload) | [
"Call an RPC by its address and ID.\n\n This will send the RPC to the background rpc dispatch thread and\n synchronously wait for the response.\n\n Args:\n address (int): The address of the mock tile this RPC is for\n rpc_id (int): The number of the RPC\n payload (bytes): A byte string of payload parameters up to 20 bytes\n\n Returns:\n bytes: The response payload from the RPC\n "
] |
Please provide a description of the function:def trace_sync(self, data, timeout=5.0):
done = AwaitableResponse()
self.trace(data, callback=done.set_result)
return done.wait(timeout) | [
"Send tracing data and wait for it to finish.\n\n This awaitable coroutine wraps VirtualIOTileDevice.trace() and turns\n the callback into an awaitable object. The appropriate usage of this\n method is by calling it inside the event loop as:\n\n await device.trace_sync(data)\n\n Args:\n data (bytes): The raw data that should be traced.\n timeout (float): The maximum number of seconds to wait before\n timing out.\n\n Returns:\n awaitable: An awaitable object with the result.\n\n The result will be True if the data was sent successfully\n or False if the data could not be sent in its entirety.\n\n When False is returned, there is no guarantee about how much of\n the data was sent, if any, just that it was not known to be\n successfully sent.\n "
] |
Please provide a description of the function:def stream_sync(self, report, timeout=120.0):
done = AwaitableResponse()
self.stream(report, callback=done.set_result)
return done.wait(timeout) | [
"Send a report and wait for it to finish.\n\n This awaitable coroutine wraps VirtualIOTileDevice.stream() and turns\n the callback into an awaitable object. The appropriate usage of this\n method is by calling it inside the event loop as:\n\n await device.stream_sync(data)\n\n Args:\n report (IOTileReport): The report that should be streamed.\n timeout (float): The maximum number of seconds to wait before\n timing out.\n\n Returns:\n awaitable: An awaitable object with the result.\n\n The result will be True if the data was sent successfully\n or False if the data could not be sent in its entirety.\n\n When False is returned, there is no guarantee about how much of\n the data was sent, if any, just that it was not known to be\n successfully sent.\n "
] |
Please provide a description of the function:def synchronize_task(self, func, *args, **kwargs):
async def _runner():
return func(*args, **kwargs)
return self.emulator.run_task_external(_runner()) | [
"Run callable in the rpc thread and wait for it to finish.\n\n The callable ``func`` will be passed into the EmulationLoop and run\n there. This method will block until ``func`` is finished and\n return/raise whatever that callable returns/raises.\n\n This method is mainly useful for performing an activity that needs to\n be synchronized with the rpc thread for safety reasons.\n\n If this method is called from the rpc thread itself, it will just\n run the task and return its result.\n\n Args:\n func (callable): A method with signature callable(*args, **kwargs),\n that will be called with the optional *args and **kwargs passed\n to this method.\n *args: Arguments that will be passed to callable.\n **kwargs: Keyword arguments that will be passed to callable.\n\n Returns:\n object: Whatever callable returns after it runs.\n "
] |
Please provide a description of the function:def restore_state(self, state):
tile_states = state.get('tile_states', {})
for address, tile_state in tile_states.items():
address = int(address)
tile = self._tiles.get(address)
if tile is None:
raise DataError("Invalid dumped state, tile does not exist at address %d" % address, address=address)
tile.restore_state(tile_state) | [
"Restore the current state of this emulated device.\n\n Args:\n state (dict): A previously dumped state produced by dump_state.\n "
] |
Please provide a description of the function:def load_metascenario(self, scenario_list):
for scenario in scenario_list:
name = scenario.get('name')
if name is None:
raise DataError("Scenario in scenario list is missing a name parameter", scenario=scenario)
tile_address = scenario.get('tile')
args = scenario.get('args', {})
dest = self
if tile_address is not None:
dest = self._tiles.get(tile_address)
if dest is None:
raise DataError("Attempted to load a scenario into a tile address that does not exist", address=tile_address, valid_addresses=list(self._tiles))
dest.load_scenario(name, **args) | [
"Load one or more scenarios from a list.\n\n Each entry in scenario_list should be a dict containing at least a\n name key and an optional tile key and args key. If tile is present\n and its value is not None, the scenario specified will be loaded into\n the given tile only. Otherwise it will be loaded into the entire\n device.\n\n If the args key is specified is will be passed as keyword arguments\n to load_scenario.\n\n Args:\n scenario_list (list): A list of dicts for each scenario that should\n be loaded.\n "
] |
Please provide a description of the function:def generate(env):
SCons.Tool.createStaticLibBuilder(env)
# Set-up ms tools paths
msvc_setup_env_once(env)
env['AR'] = 'lib'
env['ARFLAGS'] = SCons.Util.CLVar('/nologo')
env['ARCOM'] = "${TEMPFILE('$AR $ARFLAGS /OUT:$TARGET $SOURCES','$ARCOMSTR')}"
env['LIBPREFIX'] = ''
env['LIBSUFFIX'] = '.lib' | [
"Add Builders and construction variables for lib to an Environment."
] |
Please provide a description of the function:def call_rpc(self, rpc_id, payload=bytes()):
# If we define the RPC locally, call that one. We use this for reporting
# our status
if super(ServiceDelegateTile, self).has_rpc(rpc_id):
return super(ServiceDelegateTile, self).call_rpc(rpc_id, payload)
async def _awaitable_wrapper():
# FIXME: We set the timeout here to a very large number since we don't
# know what an appropriate timeout is and don't want to restrict the
# run time of RPCs that could be long running. The caller of the RPC
# through the tile will know what an appropriate timeout is for the
# RPC that they are trying to call.
resp = await self._client.send_rpc(self._service, rpc_id, payload, timeout=120.0)
result = resp['result']
if result == 'success':
return resp['response']
elif result == 'service_not_found':
raise TileNotFoundError("Could not find service by name", name=self._service)
elif result == 'rpc_not_found':
raise RPCNotFoundError("Could not find RPC on service", name=self._service, rpc_id=rpc_id)
elif result == 'invalid_arguments':
raise RPCInvalidArgumentsError("Invalid arguments to RPC", name=self._service, rpc_id=rpc_id)
elif result == 'invalid_response':
raise RPCInvalidReturnValueError("Invalid response from RPC", name=self._service, rpc_id=rpc_id)
elif result == 'execution_exception':
raise InternalError("Exception raised during processing RPC", name=self._service, rpc_id=rpc_id)
else:
raise InternalError("Unknown response received from delegated RPC", name=self._service, rpc_id=rpc_id, result=result)
return _awaitable_wrapper() | [
"Call an RPC by its ID.\n\n Args:\n rpc_id (int): The number of the RPC\n payload (bytes): A byte string of payload parameters up to 20 bytes\n\n Returns:\n str: The response payload from the RPC\n "
] |
Please provide a description of the function:def associated_stream(self):
if not self.important:
raise InternalError("You may only call autocopied_stream on when DataStream.important is True", stream=self)
if self.stream_id >= DataStream.ImportantSystemStorageStart:
stream_type = DataStream.BufferedType
else:
stream_type = DataStream.OutputType
return DataStream(stream_type, self.stream_id, True) | [
"Return the corresponding output or storage stream for an important system input.\n\n Certain system inputs are designed as important and automatically\n copied to output streams without requiring any manual interaction.\n\n This method returns the corresponding stream for an important system\n input. It will raise an InternalError unlesss the self.important\n property is True.\n\n Returns:\n DataStream: The corresponding output or storage stream.\n\n Raises:\n InternalError: If this stream is not marked as an important system input.\n "
] |
Please provide a description of the function:def FromString(cls, string_rep):
rep = str(string_rep)
parts = rep.split()
if len(parts) > 3:
raise ArgumentError("Too many whitespace separated parts of stream designator", input_string=string_rep)
elif len(parts) == 3 and parts[0] != u'system':
raise ArgumentError("Too many whitespace separated parts of stream designator", input_string=string_rep)
elif len(parts) < 2:
raise ArgumentError("Too few components in stream designator", input_string=string_rep)
# Now actually parse the string
if len(parts) == 3:
system = True
stream_type = parts[1]
stream_id = parts[2]
else:
system = False
stream_type = parts[0]
stream_id = parts[1]
try:
stream_id = int(stream_id, 0)
except ValueError as exc:
raise ArgumentError("Could not convert stream id to integer", error_string=str(exc), stream_id=stream_id)
try:
stream_type = cls.StringToType[stream_type]
except KeyError:
raise ArgumentError("Invalid stream type given", stream_type=stream_type, known_types=cls.StringToType.keys())
return DataStream(stream_type, stream_id, system) | [
"Create a DataStream from a string representation.\n\n The format for stream designators when encoded as strings is:\n [system] (buffered|unbuffered|constant|input|count|output) <integer>\n\n Args:\n string_rep (str): The string representation to turn into a\n DataStream\n "
] |
Please provide a description of the function:def FromEncoded(self, encoded):
stream_type = (encoded >> 12) & 0b1111
stream_system = bool(encoded & (1 << 11))
stream_id = (encoded & ((1 << 11) - 1))
return DataStream(stream_type, stream_id, stream_system) | [
"Create a DataStream from an encoded 16-bit unsigned integer.\n\n Returns:\n DataStream: The decoded DataStream object\n "
] |
Please provide a description of the function:def as_stream(self):
if not self.singular:
raise ArgumentError("Attempted to convert a non-singular selector to a data stream, it matches multiple", selector=self)
return DataStream(self.match_type, self.match_id, self.match_spec == DataStreamSelector.MatchSystemOnly) | [
"Convert this selector to a DataStream.\n\n This function will only work if this is a singular selector that\n matches exactly one DataStream.\n "
] |
Please provide a description of the function:def FromStream(cls, stream):
if stream.system:
specifier = DataStreamSelector.MatchSystemOnly
else:
specifier = DataStreamSelector.MatchUserOnly
return DataStreamSelector(stream.stream_type, stream.stream_id, specifier) | [
"Create a DataStreamSelector from a DataStream.\n\n Args:\n stream (DataStream): The data stream that we want to convert.\n "
] |
Please provide a description of the function:def FromEncoded(cls, encoded):
match_spec = encoded & ((1 << 11) | (1 << 15))
match_type = (encoded & (0b111 << 12)) >> 12
match_id = encoded & ((1 << 11) - 1)
if match_spec not in cls.SpecifierEncodingMap:
raise ArgumentError("Unknown encoded match specifier", match_spec=match_spec, known_specifiers=cls.SpecifierEncodingMap.keys())
spec_name = cls.SpecifierEncodingMap[match_spec]
# Handle wildcard matches
if match_id == cls.MatchAllCode:
match_id = None
return DataStreamSelector(match_type, match_id, spec_name) | [
"Create a DataStreamSelector from an encoded 16-bit value.\n\n The binary value must be equivalent to what is produced by\n a call to self.encode() and will turn that value back into\n a a DataStreamSelector.\n\n Note that the following operation is a no-op:\n\n DataStreamSelector.FromEncode(value).encode()\n\n Args:\n encoded (int): The encoded binary representation of a\n DataStreamSelector.\n\n Returns:\n DataStreamSelector: The decoded selector.\n "
] |
Please provide a description of the function:def FromString(cls, string_rep):
rep = str(string_rep)
rep = rep.replace(u'node', '')
rep = rep.replace(u'nodes', '')
if rep.startswith(u'all'):
parts = rep.split()
spec_string = u''
if len(parts) == 3:
spec_string = parts[1]
stream_type = parts[2]
elif len(parts) == 2:
stream_type = parts[1]
else:
raise ArgumentError("Invalid wildcard stream selector", string_rep=string_rep)
try:
# Remove pluralization that can come with e.g. 'all system outputs'
if stream_type.endswith(u's'):
stream_type = stream_type[:-1]
stream_type = DataStream.StringToType[stream_type]
except KeyError:
raise ArgumentError("Invalid stream type given", stream_type=stream_type, known_types=DataStream.StringToType.keys())
stream_spec = DataStreamSelector.SpecifierNames.get(spec_string, None)
if stream_spec is None:
raise ArgumentError("Invalid stream specifier given (should be system, user, combined or blank)", string_rep=string_rep, spec_string=spec_string)
return DataStreamSelector(stream_type, None, stream_spec)
# If we're not matching a wildcard stream type, then the match is exactly
# the same as a DataStream identifier, so use that to match it.
stream = DataStream.FromString(rep)
return DataStreamSelector.FromStream(stream) | [
"Create a DataStreamSelector from a string.\n\n The format of the string should either be:\n\n all <type>\n OR\n <type> <id>\n\n Where type is [system] <stream type>, with <stream type>\n defined as in DataStream\n\n Args:\n rep (str): The string representation to convert to a DataStreamSelector\n "
] |
Please provide a description of the function:def matches(self, stream):
if self.match_type != stream.stream_type:
return False
if self.match_id is not None:
return self.match_id == stream.stream_id
if self.match_spec == DataStreamSelector.MatchUserOnly:
return not stream.system
elif self.match_spec == DataStreamSelector.MatchSystemOnly:
return stream.system
elif self.match_spec == DataStreamSelector.MatchUserAndBreaks:
return (not stream.system) or (stream.system and (stream.stream_id in DataStream.KnownBreakStreams))
# The other case is that match_spec is MatchCombined, which matches everything
# regardless of system of user flag
return True | [
"Check if this selector matches the given stream\n\n Args:\n stream (DataStream): The stream to check\n\n Returns:\n bool: True if this selector matches the stream\n "
] |
Please provide a description of the function:def encode(self):
match_id = self.match_id
if match_id is None:
match_id = (1 << 11) - 1
return (self.match_type << 12) | DataStreamSelector.SpecifierEncodings[self.match_spec] | match_id | [
"Encode this stream as a packed 16-bit unsigned integer.\n\n Returns:\n int: The packed encoded stream\n "
] |
Please provide a description of the function:def EnumVariable(key, help, default, allowed_values, map={}, ignorecase=0):
help = '%s (%s)' % (help, '|'.join(allowed_values))
# define validator
if ignorecase >= 1:
validator = lambda key, val, env: \
_validator(key, val.lower(), env, allowed_values)
else:
validator = lambda key, val, env: \
_validator(key, val, env, allowed_values)
# define converter
if ignorecase == 2:
converter = lambda val: map.get(val.lower(), val).lower()
elif ignorecase == 1:
converter = lambda val: map.get(val.lower(), val)
else:
converter = lambda val: map.get(val, val)
return (key, help, default, validator, converter) | [
"\n The input parameters describe an option with only certain values\n allowed. They are returned with an appropriate converter and\n validator appended. The result is usable for input to\n Variables.Add().\n\n 'key' and 'default' are the values to be passed on to Variables.Add().\n\n 'help' will be appended by the allowed values automatically\n\n 'allowed_values' is a list of strings, which are allowed as values\n for this option.\n\n The 'map'-dictionary may be used for converting the input value\n into canonical values (e.g. for aliases).\n\n 'ignorecase' defines the behaviour of the validator:\n\n If ignorecase == 0, the validator/converter are case-sensitive.\n If ignorecase == 1, the validator/converter are case-insensitive.\n If ignorecase == 2, the validator/converter is case-insensitive and the converted value will always be lower-case.\n\n The 'validator' tests whether the value is in the list of allowed values. The 'converter' converts input values\n according to the given 'map'-dictionary (unmapped input values are returned unchanged).\n "
] |
Please provide a description of the function:def generate(env):
M4Action = SCons.Action.Action('$M4COM', '$M4COMSTR')
bld = SCons.Builder.Builder(action = M4Action, src_suffix = '.m4')
env['BUILDERS']['M4'] = bld
# .m4 files might include other files, and it would be pretty hard
# to write a scanner for it, so let's just cd to the dir of the m4
# file and run from there.
# The src_suffix setup is like so: file.c.m4 -> file.c,
# file.cpp.m4 -> file.cpp etc.
env['M4'] = 'm4'
env['M4FLAGS'] = SCons.Util.CLVar('-E')
env['M4COM'] = 'cd ${SOURCE.rsrcdir} && $M4 $M4FLAGS < ${SOURCE.file} > ${TARGET.abspath}' | [
"Add Builders and construction variables for m4 to an Environment."
] |
Please provide a description of the function:async def future_command(self, cmd):
if self._asyncio_cmd_lock is None:
raise HardwareError("Cannot use future_command because no event loop attached")
async with self._asyncio_cmd_lock:
return await self._future_command_unlocked(cmd) | [
"Run command as a coroutine and return a future.\n\n Args:\n loop (BackgroundEventLoop): The loop that we should attach\n the future too.\n cmd (list): The command and arguments that we wish to call.\n\n Returns:\n asyncio.Future: An awaitable future with the result of the operation.\n "
] |
Please provide a description of the function:def _future_command_unlocked(self, cmd):
future = self._loop.create_future()
asyncio_loop = self._loop.get_loop()
def _done_callback(result):
retval = result['return_value']
if not result['result']:
future.set_exception(HardwareError("Error executing synchronous command",
command=cmd, return_value=retval))
else:
future.set_result(retval)
callback = functools.partial(asyncio_loop.call_soon_threadsafe, _done_callback)
self._commands.put((cmd, callback, True, None))
return future | [
"Run command as a coroutine and return a future.\n\n Args:\n loop (BackgroundEventLoop): The loop that we should attach\n the future too.\n cmd (list): The command and arguments that we wish to call.\n\n Returns:\n asyncio.Future: An awaitable future with the result of the operation.\n "
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.