Code
stringlengths 103
85.9k
| Summary
sequencelengths 0
94
|
---|---|
Please provide a description of the function:def clear(self, timestamp):
self.storage.clear()
self.push(streams.DATA_CLEARED, timestamp, 1) | [
"Clear all data from the RSL.\n\n This pushes a single reading once we clear everything so that\n we keep track of the highest ID that we have allocated to date.\n\n This needs the current timestamp to be able to properly timestamp\n the cleared storage reading that it pushes.\n\n Args:\n timestamp (int): The current timestamp to store with the\n reading.\n "
] |
Please provide a description of the function:def clear_to_reset(self, config_vars):
self._logger.info("Config vars in sensor log reset: %s", config_vars)
super(SensorLogSubsystem, self).clear_to_reset(config_vars)
self.storage.destroy_all_walkers()
self.dump_walker = None
if config_vars.get('storage_fillstop', False):
self._logger.debug("Marking storage log fill/stop")
self.storage.set_rollover('storage', False)
if config_vars.get('streaming_fillstop', False):
self._logger.debug("Marking streaming log fill/stop")
self.storage.set_rollover('streaming', False) | [
"Clear all volatile information across a reset."
] |
Please provide a description of the function:def push(self, stream_id, timestamp, value):
stream = DataStream.FromEncoded(stream_id)
reading = IOTileReading(stream_id, timestamp, value)
try:
self.storage.push(stream, reading)
return Error.NO_ERROR
except StorageFullError:
return pack_error(ControllerSubsystem.SENSOR_LOG, SensorLogError.RING_BUFFER_FULL) | [
"Push a value to a stream.\n\n Args:\n stream_id (int): The stream we want to push to.\n timestamp (int): The raw timestamp of the value we want to\n store.\n value (int): The 32-bit integer value we want to push.\n Returns:\n int: Packed 32-bit error code.\n "
] |
Please provide a description of the function:def inspect_virtual(self, stream_id):
stream = DataStream.FromEncoded(stream_id)
if stream.buffered:
return [pack_error(ControllerSubsystem.SENSOR_LOG, SensorLogError.VIRTUAL_STREAM_NOT_FOUND), 0]
try:
reading = self.storage.inspect_last(stream, only_allocated=True)
return [Error.NO_ERROR, reading.value]
except StreamEmptyError:
return [Error.NO_ERROR, 0]
except UnresolvedIdentifierError:
return [pack_error(ControllerSubsystem.SENSOR_LOG, SensorLogError.VIRTUAL_STREAM_NOT_FOUND), 0] | [
"Inspect the last value written into a virtual stream.\n\n Args:\n stream_id (int): The virtual stream was want to inspect.\n\n Returns:\n (int, int): An error code and the stream value.\n "
] |
Please provide a description of the function:def dump_begin(self, selector_id):
if self.dump_walker is not None:
self.storage.destroy_walker(self.dump_walker)
selector = DataStreamSelector.FromEncoded(selector_id)
self.dump_walker = self.storage.create_walker(selector, skip_all=False)
return Error.NO_ERROR, Error.NO_ERROR, self.dump_walker.count() | [
"Start dumping a stream.\n\n Args:\n selector_id (int): The buffered stream we want to dump.\n\n Returns:\n (int, int, int): Error code, second error code, number of available readings\n "
] |
Please provide a description of the function:def dump_seek(self, reading_id):
if self.dump_walker is None:
return (pack_error(ControllerSubsystem.SENSOR_LOG, SensorLogError.STREAM_WALKER_NOT_INITIALIZED),
Error.NO_ERROR, 0)
try:
exact = self.dump_walker.seek(reading_id, target='id')
except UnresolvedIdentifierError:
return (pack_error(ControllerSubsystem.SENSOR_LOG, SensorLogError.NO_MORE_READINGS),
Error.NO_ERROR, 0)
error = Error.NO_ERROR
if not exact:
error = pack_error(ControllerSubsystem.SENSOR_LOG, SensorLogError.ID_FOUND_FOR_ANOTHER_STREAM)
return (error, error.NO_ERROR, self.dump_walker.count()) | [
"Seek the dump streamer to a given ID.\n\n Returns:\n (int, int, int): Two error codes and the count of remaining readings.\n\n The first error code covers the seeking process.\n The second error code covers the stream counting process (cannot fail)\n The third item in the tuple is the number of readings left in the stream.\n "
] |
Please provide a description of the function:def dump_next(self):
if self.dump_walker is None:
return pack_error(ControllerSubsystem.SENSOR_LOG, SensorLogError.STREAM_WALKER_NOT_INITIALIZED)
try:
return self.dump_walker.pop()
except StreamEmptyError:
return None | [
"Dump the next reading from the stream.\n\n Returns:\n IOTileReading: The next reading or None if there isn't one\n "
] |
Please provide a description of the function:def highest_stored_id(self):
shared = [0]
def _keep_max(_i, reading):
if reading.reading_id > shared[0]:
shared[0] = reading.reading_id
self.engine.scan_storage('storage', _keep_max)
self.engine.scan_storage('streaming', _keep_max)
return shared[0] | [
"Scan through the stored readings and report the highest stored id.\n\n Returns:\n int: The highest stored id.\n "
] |
Please provide a description of the function:def rsl_push_reading(self, value, stream_id):
#FIXME: Fix this with timestamp from clock manager task
err = self.sensor_log.push(stream_id, 0, value)
return [err] | [
"Push a reading to the RSL directly."
] |
Please provide a description of the function:def rsl_push_many_readings(self, value, count, stream_id):
#FIXME: Fix this with timestamp from clock manager task
for i in range(1, count+1):
err = self.sensor_log.push(stream_id, 0, value)
if err != Error.NO_ERROR:
return [err, i]
return [Error.NO_ERROR, count] | [
"Push many copies of a reading to the RSL."
] |
Please provide a description of the function:def rsl_count_readings(self):
storage, output = self.sensor_log.count()
return [Error.NO_ERROR, storage, output] | [
"Count how many readings are stored in the RSL."
] |
Please provide a description of the function:def rsl_dump_stream_begin(self, stream_id):
err, err2, count = self.sensor_log.dump_begin(stream_id)
#FIXME: Fix this with the uptime of the clock manager task
return [err, err2, count, 0] | [
"Begin dumping the contents of a stream."
] |
Please provide a description of the function:def rsl_dump_stream_next(self, output_format):
timestamp = 0
stream_id = 0
value = 0
reading_id = 0
error = Error.NO_ERROR
reading = self.sensor_log.dump_next()
if reading is not None:
timestamp = reading.raw_time
stream_id = reading.stream
value = reading.value
reading_id = reading.reading_id
else:
error = pack_error(ControllerSubsystem.SENSOR_LOG, SensorLogError.NO_MORE_READINGS)
if output_format == 0:
return [struct.pack("<LLL", error, timestamp, value)]
elif output_format != 1:
raise ValueError("Output format other than 1 not yet supported")
return [struct.pack("<LLLLH2x", error, timestamp, value, reading_id, stream_id)] | [
"Dump the next reading from the output stream."
] |
Please provide a description of the function:def parse_size_name(type_name):
if ' ' in type_name:
raise ArgumentError("There should not be a space in config variable type specifier", specifier=type_name)
variable = False
count = 1
base_type = type_name
if type_name[-1] == ']':
variable = True
start_index = type_name.find('[')
if start_index == -1:
raise ArgumentError("Could not find matching [ for ] character", specifier=type_name)
count = int(type_name[start_index+1:-1], 0)
base_type = type_name[:start_index]
matched_type = TYPE_CODES.get(base_type)
if matched_type is None:
raise ArgumentError("Could not find base type name", base_type=base_type, type_string=type_name)
base_size = struct.calcsize("<%s" % matched_type)
total_size = base_size*count
return total_size, base_size, matched_type, variable | [
"Calculate size and encoding from a type name.\n\n This method takes a C-style type string like uint8_t[10] and returns\n - the total size in bytes\n - the unit size of each member (if it's an array)\n - the scruct.{pack,unpack} format code for decoding the base type\n - whether it is an array.\n "
] |
Please provide a description of the function:def _validate_python_type(self, python_type):
if python_type == 'bool':
if self.variable:
raise ArgumentError("You can only specify a bool python type on a scalar (non-array) type_name", type_name=self.type_name)
return
if python_type == 'string':
if not (self.variable and self.unit_size == 1):
raise ArgumentError("You can only pass a string python type on an array of 1-byte objects", type_name=self.type_name)
return
if python_type is not None:
raise ArgumentError("You can only declare a bool or string python type. Otherwise it must be passed as None", python_type=python_type) | [
"Validate the possible combinations of python_type and type_name."
] |
Please provide a description of the function:def _convert_default_value(self, default):
if default is None:
return None
if isinstance(default, str):
if self.special_type == 'string':
return default.encode('utf-8') + b'\0'
raise DataError("You can only pass a unicode string if you are declaring a string type config variable", default=default)
if isinstance(default, (bytes, bytearray)):
if self.special_type == 'string' and isinstance(default, bytes):
default += b'\0'
return default
if isinstance(default, int):
default = [default]
format_string = "<" + (self.base_type*len(default))
return struct.pack(format_string, *default) | [
"Convert the passed default value to binary.\n\n The default value (if passed) may be specified as either a `bytes`\n object or a python int or list of ints. If an int or list of ints is\n passed, it is converted to binary. Otherwise, the raw binary data is\n used.\n\n If you pass a bytes object with python_type as True, do not null terminate\n it, an additional null termination will be added.\n\n Passing a unicode string is only allowed if as_string is True and it\n will be encoded as utf-8 and null terminated for use as a default value.\n "
] |
Please provide a description of the function:def clear(self):
if self.default_value is None:
self.current_value = bytearray()
else:
self.current_value = bytearray(self.default_value) | [
"Clear this config variable to its reset value."
] |
Please provide a description of the function:def update_value(self, offset, value):
if offset + len(value) > self.total_size:
return Error.INPUT_BUFFER_TOO_LONG
if len(self.current_value) < offset:
self.current_value += bytearray(offset - len(self.current_value))
if len(self.current_value) > offset:
self.current_value = self.current_value[:offset]
self.current_value += bytearray(value)
return 0 | [
"Update the binary value currently stored for this config value.\n\n Returns:\n int: An opaque error code that can be returned from a set_config rpc\n "
] |
Please provide a description of the function:def latch(self):
if len(self.current_value) == 0:
raise DataError("There was no data in a config variable during latching", name=self.name)
# Make sure the data ends on a unit boundary. This would have happened automatically
# in an actual device by the C runtime 0 padding out the storage area.
remaining = len(self.current_value) % self.unit_size
if remaining > 0:
self.current_value += bytearray(remaining)
if self.special_type == 'string':
if self.current_value[-1] != 0:
raise DataError("String type was specified by data did not end with a null byte", data=self.current_value, name=self.name)
return bytes(self.current_value[:-1]).decode('utf-8')
fmt_code = "<" + (self.base_type * (len(self.current_value) // self.unit_size))
data = struct.unpack(fmt_code, self.current_value)
if self.variable:
data = list(data)
else:
data = data[0]
if self.special_type == 'bool':
data = bool(data)
return data | [
"Convert the current value inside this config descriptor to a python object.\n\n The conversion proceeds by mapping the given type name to a native\n python class and performing the conversion. You can override what\n python object is used as the destination class by passing a\n python_type parameter to __init__.\n\n The default mapping is:\n - char (u)int8_t, (u)int16_t, (u)int32_t: int\n - char[] (u)int8_t[], (u)int16_t[]0, u(int32_t): list of int\n\n If you want to parse a char[] or uint8_t[] as a python string, it\n needs to be null terminated and you should pass python_type='string'.\n\n If you are declaring a scalar integer type and wish it to be decoded\n as a bool, you can pass python_type='bool' to the constructor.\n\n All integers are decoded as little-endian.\n\n Returns:\n object: The corresponding python object.\n\n This will either be an int, list of int or string based on the\n type_name specified and the optional python_type keyword argument\n to the constructor.\n\n Raises:\n DataError: if the object cannot be converted to the desired type.\n ArgumentError: if an invalid python_type was specified during construction.\n "
] |
Please provide a description of the function:def declare_config_variable(self, name, config_id, type_name, default=None, convert=None): #pylint:disable=too-many-arguments;These are all necessary with sane defaults.
config = ConfigDescriptor(config_id, type_name, default, name=name, python_type=convert)
self._config_variables[config_id] = config | [
"Declare a config variable that this emulated tile accepts.\n\n The default value (if passed) may be specified as either a `bytes`\n object or a python int or list of ints. If an int or list of ints is\n passed, it is converted to binary. Otherwise, the raw binary data is\n used.\n\n Passing a unicode string is only allowed if as_string is True and it\n will be encoded as utf-8 and null terminated for use as a default value.\n\n Args:\n name (str): A user friendly name for this config variable so that it can\n be printed nicely.\n config_id (int): A 16-bit integer id number to identify the config variable.\n type_name (str): An encoded type name that will be parsed by parse_size_name()\n default (object): The default value if there is one. This should be a\n python object that will be converted to binary according to the rules for\n the config variable type specified in type_name.\n convert (str): whether this variable should be converted to a\n python string or bool rather than an int or a list of ints. You can\n pass either 'bool', 'string' or None\n "
] |
Please provide a description of the function:def latch_config_variables(self):
return {desc.name: desc.latch() for desc in self._config_variables.values()} | [
"Latch the current value of all config variables as python objects.\n\n This function will capture the current value of all config variables\n at the time that this method is called. It must be called after\n start() has been called so that any default values in the config\n variables have been properly set otherwise DataError will be thrown.\n\n Conceptually this method performs the operation that happens just\n before a tile executive hands control to the tile application\n firmware. It latches in the value of all config variables at that\n point in time.\n\n For convenience, this method does all necessary binary -> python\n native object conversion so that you just get python objects back.\n\n Returns:\n dict: A dict of str -> object with the config variable values.\n\n The keys in the dict will be the name passed to\n `declare_config_variable`.\n\n The values will be the python objects that result from calling\n latch() on each config variable. Consult ConfigDescriptor.latch()\n for documentation on how that method works.\n "
] |
Please provide a description of the function:def dump_state(self):
return {
"config_variables": {x: base64.b64encode(y.current_value).decode('utf-8') for x, y in self._config_variables.items()},
} | [
"Dump the current state of this emulated tile as a dictionary.\n\n This function just dumps the status of the config variables. It is designed to\n be called in a chained fashion to serialize the complete state of a tile subclass.\n\n Returns:\n dict: The current state of the object that could be passed to load_state.\n "
] |
Please provide a description of the function:def restore_state(self, state):
config_vars = state.get('config_variables', {})
for str_name, str_value in config_vars.items():
name = int(str_name)
value = base64.b64decode(str_value)
if name in self._config_variables:
self._config_variables[name].current_value = value | [
"Restore the current state of this emulated object.\n\n Args:\n state (dict): A previously dumped state produced by dump_state.\n "
] |
Please provide a description of the function:async def reset(self):
await self._device.emulator.stop_tasks(self.address)
self._handle_reset()
self._logger.info("Tile at address %d has reset itself.", self.address)
self._logger.info("Starting main task for tile at address %d", self.address)
self._device.emulator.add_task(self.address, self._reset_vector()) | [
"Synchronously reset a tile.\n\n This method must be called from the emulation loop and will\n synchronously shut down all background tasks running this tile, clear\n it to reset state and then restart the initialization background task.\n "
] |
Please provide a description of the function:def list_config_variables(self, offset):
names = sorted(self._config_variables)
names = names[offset:offset + 9]
count = len(names)
if len(names) < 9:
names += [0]*(9 - count)
return [count] + names | [
"List defined config variables up to 9 at a time."
] |
Please provide a description of the function:def describe_config_variable(self, config_id):
config = self._config_variables.get(config_id)
if config is None:
return [Error.INVALID_ARRAY_KEY, 0, 0, 0, 0]
packed_size = config.total_size
packed_size |= int(config.variable) << 15
return [0, 0, 0, config_id, packed_size] | [
"Describe the config variable by its id."
] |
Please provide a description of the function:def set_config_variable(self, config_id, offset, value):
if self.initialized.is_set():
return [Error.STATE_CHANGE_AT_INVALID_TIME]
config = self._config_variables.get(config_id)
if config is None:
return [Error.INVALID_ARRAY_KEY]
error = config.update_value(offset, value)
return [error] | [
"Set a chunk of the current config value's value."
] |
Please provide a description of the function:def get_config_variable(self, config_id, offset):
config = self._config_variables.get(config_id)
if config is None:
return [b""]
return [bytes(config.current_value[offset:offset + 20])] | [
"Get a chunk of a config variable's value."
] |
Please provide a description of the function:def get_config(self, name, default=MISSING):
res = self.config.get(name, default)
if res is MISSING:
raise ArgumentError("Could not find config value by name and no default supplied", name=name)
return res | [
"Get a config value from this adapter by name\n\n Args:\n name (string): The name of the config variable\n default (object): The default value to return if config is not found\n\n Returns:\n object: the value associated with the name\n\n Raises:\n ArgumentError: if the name is not found and no default is supplied\n "
] |
Please provide a description of the function:def add_callback(self, name, func):
if name not in self.callbacks:
raise ValueError("Unknown callback name: %s" % name)
self.callbacks[name].add(func) | [
"Add a callback when Device events happen\n\n Args:\n name (str): currently support 'on_scan' and 'on_disconnect'\n func (callable): the function that should be called\n "
] |
Please provide a description of the function:def connect_async(self, connection_id, connection_string, callback):
if callback is not None:
callback(connection_id, self.id, False, "connect command is not supported in device adapter") | [
"Asynchronously connect to a device\n\n Args:\n connection_id (int): A unique identifier that will refer to this connection\n connection_string (string): A DeviceAdapter specific string that can be used to connect to\n a device using this DeviceAdapter.\n callback (callable): A function that will be called when the connection attempt finishes as\n callback(connection_id, adapter_id, success: bool, failure_reason: string or None)\n "
] |
Please provide a description of the function:def connect_sync(self, connection_id, connection_string):
calldone = threading.Event()
results = {}
def connect_done(callback_connid, callback_adapterid, callback_success, failure_reason):
results['success'] = callback_success
results['failure_reason'] = failure_reason
calldone.set() # Be sure to set after all operations are done to prevent race condition
self.connect_async(connection_id, connection_string, connect_done)
calldone.wait()
return results | [
"Synchronously connect to a device\n\n Args:\n connection_id (int): A unique identifier that will refer to this connection\n connection_string (string): A DeviceAdapter specific string that can be used to connect to\n a device using this DeviceAdapter.\n\n Returns:\n dict: A dictionary with two elements\n 'success': a bool with the result of the connection attempt\n 'failure_reason': a string with the reason for the failure if we failed\n "
] |
Please provide a description of the function:def disconnect_sync(self, conn_id):
done = threading.Event()
result = {}
def disconnect_done(conn_id, adapter_id, status, reason):
result['success'] = status
result['failure_reason'] = reason
done.set()
self.disconnect_async(conn_id, disconnect_done)
done.wait()
return result | [
"Synchronously disconnect from a connected device\n\n Args:\n conn_id (int): A unique identifier that will refer to this connection\n\n Returns:\n dict: A dictionary with two elements\n 'success': a bool with the result of the connection attempt\n 'failure_reason': a string with the reason for the failure if we failed\n "
] |
Please provide a description of the function:def open_interface_async(self, conn_id, interface, callback, connection_string=None):
if interface not in {"rpc", "script", "streaming", "tracing", "debug"}:
callback(conn_id, self.id, False, "invalid interface name in call to open_interface_async")
return
if interface == "rpc":
self._open_rpc_interface(conn_id, callback)
elif interface == 'script':
self._open_script_interface(conn_id, callback)
elif interface == 'streaming':
self._open_streaming_interface(conn_id, callback)
elif interface == 'tracing':
self._open_tracing_interface(conn_id, callback)
elif interface == 'debug':
self._open_debug_interface(conn_id, callback, connection_string)
else:
callback(conn_id, self.id, False, "interface not supported yet") | [
"Asynchronously open an interface to this IOTile device\n\n The interface parameter must be one of (rpc, script, streaming,\n tracing, debug). Not all interfaces will be supported by all\n DeviceAdapters.\n\n Args:\n interface (string): The interface to open\n conn_id (int): A unique identifier that will refer to this connection\n callback (callable): A callback that will be called as\n callback(conn_id, adapter_id, success, failure_reason)\n connection_string (string): An optional DeviceAdapter specific string that can be used to connect to\n a device using this DeviceAdapter.\n "
] |
Please provide a description of the function:def open_interface_sync(self, conn_id, interface, connection_string=None):
done = threading.Event()
result = {}
def open_interface_done(conn_id, adapter_id, status, reason):
result['success'] = status
result['failure_reason'] = reason
done.set()
self.open_interface_async(conn_id, interface, open_interface_done, connection_string)
done.wait()
return result | [
"Asynchronously open an interface to this IOTile device\n\n interface must be one of (rpc, script, streaming, tracing, debug)\n\n Args:\n interface (string): The interface to open\n conn_id (int): A unique identifier that will refer to this connection\n connection_string (string): An optional DeviceAdapter specific string that can\n be used to connect to a device using this DeviceAdapter.\n Returns:\n dict: A dictionary with four elements\n 'success': a bool with the result of the connection attempt\n 'failure_reason': a string with the reason for the failure if we failed\n "
] |
Please provide a description of the function:def probe_sync(self):
done = threading.Event()
result = {}
def probe_done(adapter_id, status, reason):
result['success'] = status
result['failure_reason'] = reason
done.set()
self.probe_async(probe_done)
done.wait()
return result | [
"Synchronously probe for devices on this adapter."
] |
Please provide a description of the function:def send_rpc_async(self, conn_id, address, rpc_id, payload, timeout, callback):
callback(conn_id, self.id, False, 'RPCs are not supported on this adapter', None, None) | [
"Asynchronously send an RPC to this IOTile device\n\n Args:\n conn_id (int): A unique identifier that will refer to this connection\n address (int): the addres of the tile that we wish to send the RPC to\n rpc_id (int): the 16-bit id of the RPC we want to call\n payload (bytearray): the payload of the command\n timeout (float): the number of seconds to wait for the RPC to execute\n callback (callable): A callback for when we have finished the RPC. The callback will be called as\"\n callback(connection_id, adapter_id, success, failure_reason, status, payload)\n 'connection_id': the connection id\n 'adapter_id': this adapter's id\n 'success': a bool indicating whether we received a response to our attempted RPC\n 'failure_reason': a string with the reason for the failure if success == False\n 'status': the one byte status code returned for the RPC if success == True else None\n 'payload': a bytearray with the payload returned by RPC if success == True else None\n "
] |
Please provide a description of the function:def send_rpc_sync(self, conn_id, address, rpc_id, payload, timeout):
done = threading.Event()
result = {}
def send_rpc_done(conn_id, adapter_id, status, reason, rpc_status, resp_payload):
result['success'] = status
result['failure_reason'] = reason
result['status'] = rpc_status
result['payload'] = resp_payload
done.set()
self.send_rpc_async(conn_id, address, rpc_id, payload, timeout, send_rpc_done)
done.wait()
return result | [
"Synchronously send an RPC to this IOTile device\n\n Args:\n conn_id (int): A unique identifier that will refer to this connection\n address (int): the address of the tile that we wish to send the RPC to\n rpc_id (int): the 16-bit id of the RPC we want to call\n payload (bytearray): the payload of the command\n timeout (float): the number of seconds to wait for the RPC to execute\n\n Returns:\n dict: A dictionary with four elements\n 'success': a bool indicating whether we received a response to our attempted RPC\n 'failure_reason': a string with the reason for the failure if success == False\n 'status': the one byte status code returned for the RPC if success == True else None\n 'payload': a bytearray with the payload returned by RPC if success == True else None\n "
] |
Please provide a description of the function:def debug_async(self, conn_id, cmd_name, cmd_args, progress_callback, callback):
callback(conn_id, self.id, False, None, "Debug commands are not supported by this DeviceAdapter") | [
"Asynchronously complete a named debug command.\n\n The command name and arguments are passed to the underlying device adapter\n and interpreted there. If the command is long running, progress_callback\n may be used to provide status updates. Callback is called when the command\n has finished.\n\n Args:\n conn_id (int): A unique identifier that will refer to this connection\n cmd_name (string): the name of the debug command we want to invoke\n cmd_args (dict): any arguments that we want to send with this command.\n progress_callback (callable): A function to be called with status on our progress, called as:\n progress_callback(done_count, total_count)\n callback (callable): A callback for when we have finished the debug command, called as:\n callback(connection_id, adapter_id, success, retval, failure_reason)\n 'connection_id': the connection id\n 'adapter_id': this adapter's id\n 'success': a bool indicating whether we received a response to our attempted RPC\n 'retval': A command specific dictionary of return value information\n 'failure_reason': a string with the reason for the failure if success == False\n "
] |
Please provide a description of the function:def debug_sync(self, conn_id, cmd_name, cmd_args, progress_callback):
done = threading.Event()
result = {}
def _debug_done(conn_id, adapter_id, success, retval, reason):
result['success'] = success
result['failure_reason'] = reason
result['return_value'] = retval
done.set()
self.debug_async(conn_id, cmd_name, cmd_args, progress_callback, _debug_done)
done.wait()
return result | [
"Asynchronously complete a named debug command.\n\n The command name and arguments are passed to the underlying device adapter\n and interpreted there. If the command is long running, progress_callback\n may be used to provide status updates. Callback is called when the command\n has finished.\n\n Args:\n conn_id (int): A unique identifier that will refer to this connection\n cmd_name (string): the name of the debug command we want to invoke\n cmd_args (dict): any arguments that we want to send with this command.\n progress_callback (callable): A function to be called with status on our progress, called as:\n progress_callback(done_count, total_count)\n "
] |
Please provide a description of the function:def send_script_async(self, conn_id, data, progress_callback, callback):
callback(conn_id, self.id, False, 'Sending scripts is not supported by this device adapter') | [
"Asynchronously send a a script to this IOTile device\n\n Args:\n conn_id (int): A unique identifier that will refer to this connection\n data (string): the script to send to the device\n progress_callback (callable): A function to be called with status on our progress, called as:\n progress_callback(done_count, total_count)\n callback (callable): A callback for when we have finished sending the script. The callback will be called as\n callback(connection_id, adapter_id, success, failure_reason)\n 'connection_id': the connection id\n 'adapter_id': this adapter's id\n 'success': a bool indicating whether we received a response to our attempted RPC\n 'failure_reason': a string with the reason for the failure if success == False\n "
] |
Please provide a description of the function:def send_script_sync(self, conn_id, data, progress_callback):
done = threading.Event()
result = {}
def send_script_done(conn_id, adapter_id, status, reason):
result['success'] = status
result['failure_reason'] = reason
done.set()
self.send_script_async(conn_id, data, progress_callback, send_script_done)
done.wait()
return result | [
"Asynchronously send a a script to this IOTile device\n\n Args:\n conn_id (int): A unique identifier that will refer to this connection\n data (string): the script to send to the device\n progress_callback (callable): A function to be called with status on our progress, called as:\n progress_callback(done_count, total_count)\n\n Returns:\n dict: a dict with the following two entries set\n 'success': a bool indicating whether we received a response to our attempted RPC\n 'failure_reason': a string with the reason for the failure if success == False\n "
] |
Please provide a description of the function:def FindByName(cls, name):
reg = ComponentRegistry()
for _, entry in reg.load_extensions('iotile.auth_provider', name_filter=name):
return entry | [
"Find a specific installed auth provider by name."
] |
Please provide a description of the function:def DeriveReportKey(cls, root_key, report_id, sent_timestamp):
signed_data = struct.pack("<LLL", AuthProvider.ReportKeyMagic, report_id, sent_timestamp)
hmac_calc = hmac.new(root_key, signed_data, hashlib.sha256)
return bytearray(hmac_calc.digest()) | [
"Derive a standard one time use report signing key.\n\n The standard method is HMAC-SHA256(root_key, MAGIC_NUMBER || report_id || sent_timestamp)\n where MAGIC_NUMBER is 0x00000002 and all integers are in little endian.\n "
] |
Please provide a description of the function:def declare(self, name):
if name in self._data:
raise KeyError("Declared name {} that already existed".format(name))
self._data[name] = self._loop.create_future() | [
"Declare that a key will be set in the future.\n\n This will create a future for the key that is used to\n hold its result and allow awaiting it.\n\n Args:\n name (str): The unique key that will be used.\n "
] |
Please provide a description of the function:async def get(self, name, timeout=None, autoremove=True):
self._ensure_declared(name)
try:
await asyncio.wait_for(self._data[name], timeout, loop=self._loop.get_loop())
return self._data[name].result()
finally:
if autoremove:
self._data[name].cancel()
del self._data[name] | [
"Wait for a value to be set for a key.\n\n This is the primary way to receive values from AwaitableDict.\n You pass in the name of the key you want to wait for, the maximum\n amount of time you want to wait and then you can await the result\n and it will resolve to value from the call to set or an\n asyncio.TimeoutError.\n\n You should generally leave autoremove as the default True value. This\n causes the key to be removed from the dictionary after get returns.\n Normally you have a single user calling ``get`` and another calling\n ``set`` so you want to automatically clean up after the getter\n returns, no matter what.\n\n If the key has not already been declared, it will be declared\n automatically inside this function so it is not necessary to call\n :meth:`declare` manually in most use cases.\n\n Args:\n name (str): The name of the key to wait on.\n timeout (float): The maximum timeout to wait.\n autoremove (bool): Whether to automatically remove the\n key when get() returns.\n\n Returns:\n object: Whatever was set in the key by :meth:`set`.\n\n Raises:\n asyncio.TimeoutError: The key was not set within the timeout.\n "
] |
Please provide a description of the function:def get_nowait(self, name, default=_MISSING, autoremove=False):
self._ensure_declared(name)
try:
future = self._data[name]
if future.done():
return future.result()
if default is _MISSING:
raise KeyError("Key {} has not been assigned a value and no default given".format(name))
return default
finally:
if autoremove:
self._data[name].cancel()
del self._data[name] | [
"Get the value of a key if it is already set.\n\n This method allows you to check if a key has already been set\n without blocking. If the key has not been set you will get the\n default value you pass in or KeyError() if no default is passed.\n\n When this method returns the key is automatically removed unless\n you pass ``autoremove=False``.\n\n This method is not a coroutine and does not block.\n\n Args:\n name (str): The name of the key to wait on.\n default (object): The default value to return if the key\n has not yet been set. Defaults to raising KeyError().\n autoremove (bool): Whether to automatically remove the\n key when get() returns.\n\n Returns:\n object: Whatever was set in the key by :meth:`set`.\n "
] |
Please provide a description of the function:def set(self, name, value, autodeclare=False):
if not autodeclare and name not in self._data:
raise KeyError("Key {} has not been declared and autodeclare=False".format(name))
self._ensure_declared(name)
self._data[name].set_result(value) | [
"Set the value of a key.\n\n This method will cause anyone waiting on a key (and any future\n waiters) to unblock and be returned the value you pass here.\n\n If the key has not been declared previously, a KeyError() is\n raised unless you pass ``autodeclare=True`` which will cause\n the key to be declared. Normally you don't want to autodeclare.\n\n This method is not a coroutine and does not block.\n\n Args:\n name (str): The key to set\n value (object): The value to set\n autodeclare (bool): Whether to automatically declare the\n key if is has not already been declared. Defaults to\n False.\n "
] |
Please provide a description of the function:def verify(self, obj):
if obj != self._literal:
raise ValidationError("Object is not equal to literal",
reason='%s is not equal to %s' % (str(obj), str(self._literal)), object=obj)
return obj | [
"Verify that the object conforms to this verifier's schema\n\n Args:\n obj (object): A python object to verify\n\n Raises:\n ValidationError: If there is a problem verifying the dictionary, a\n ValidationError is thrown with at least the reason key set indicating\n the reason for the lack of validation.\n "
] |
Please provide a description of the function:def format(self, indent_level, indent_size=4):
name = self.format_name('Literal', indent_size)
if self.long_desc is not None:
name += '\n'
name += self.wrap_lines('value: %s\n' % str(self._literal), 1, indent_size)
return self.wrap_lines(name, indent_level, indent_size) | [
"Format this verifier\n\n Returns:\n string: A formatted string\n "
] |
Please provide a description of the function:def dump_tree(self, statement=None, indent_level=0):
out = u""
indent = u" "*indent_level
if statement is None:
for root_statement in self.statements:
out += self.dump_tree(root_statement, indent_level)
else:
out += indent + str(statement) + u'\n'
if len(statement.children) > 0:
for child in statement.children:
out += self.dump_tree(child, indent_level=indent_level+4)
return out | [
"Dump the AST for this parsed file.\n\n Args:\n statement (SensorGraphStatement): the statement to print\n if this function is called recursively.\n indent_level (int): The number of spaces to indent this\n statement. Used for recursively printing blocks of\n statements.\n Returns:\n str: The AST for this parsed sg file as a nested\n tree with one node per line and blocks indented.\n "
] |
Please provide a description of the function:def parse_file(self, sg_file=None, data=None):
if sg_file is not None and data is not None:
raise ArgumentError("You must pass either a path to an sgf file or the sgf contents but not both")
if sg_file is None and data is None:
raise ArgumentError("You must pass either a path to an sgf file or the sgf contents, neither passed")
if sg_file is not None:
try:
with open(sg_file, "r") as inf:
data = inf.read()
except IOError:
raise ArgumentError("Could not read sensor graph file", path=sg_file)
# convert tabs to spaces so our line numbers match correctly
data = data.replace(u'\t', u' ')
lang = get_language()
result = lang.parseString(data)
for statement in result:
parsed = self.parse_statement(statement, orig_contents=data)
self.statements.append(parsed) | [
"Parse a sensor graph file into an AST describing the file.\n\n This function builds the statements list for this parser.\n If you pass ``sg_file``, it will be interpreted as the path to a file\n to parse. If you pass ``data`` it will be directly interpreted as the\n string to parse.\n "
] |
Please provide a description of the function:def compile(self, model):
log = SensorLog(InMemoryStorageEngine(model), model)
self.sensor_graph = SensorGraph(log, model)
allocator = StreamAllocator(self.sensor_graph, model)
self._scope_stack = []
# Create a root scope
root = RootScope(self.sensor_graph, allocator)
self._scope_stack.append(root)
for statement in self.statements:
statement.execute(self.sensor_graph, self._scope_stack)
self.sensor_graph.initialize_remaining_constants()
self.sensor_graph.sort_nodes() | [
"Compile this file into a SensorGraph.\n\n You must have preivously called parse_file to parse a\n sensor graph file into statements that are then executed\n by this command to build a sensor graph.\n\n The results are stored in self.sensor_graph and can be\n inspected before running optimization passes.\n\n Args:\n model (DeviceModel): The device model that we should compile\n this sensor graph for.\n "
] |
Please provide a description of the function:def parse_statement(self, statement, orig_contents):
children = []
is_block = False
name = statement.getName()
# Recursively parse all children statements in a block
# before parsing the block itself.
# If this is a non-block statement, parse it using the statement
# parser to figure out what specific statement it is before
# processing it further.
# This two step process produces better syntax error messsages
if name == 'block':
children_statements = statement[1]
for child in children_statements:
parsed = self.parse_statement(child, orig_contents=orig_contents)
children.append(parsed)
locn = statement[0]['location']
statement = statement[0][1]
name = statement.getName()
is_block = True
else:
stmt_language = get_statement()
locn = statement['location']
statement = statement['match']
statement_string = str(u"".join(statement.asList()))
# Try to parse this generic statement into an actual statement.
# Do this here in a separate step so we have good error messages when there
# is a problem parsing a step.
try:
statement = stmt_language.parseString(statement_string)[0]
except (pyparsing.ParseException, pyparsing.ParseSyntaxException) as exc:
raise SensorGraphSyntaxError("Error parsing statement in sensor graph file", message=exc.msg, line=pyparsing.line(locn, orig_contents).strip(), line_number=pyparsing.lineno(locn, orig_contents), column=pyparsing.col(locn, orig_contents))
except SensorGraphSemanticError as exc:
# Reraise semantic errors with line information
raise SensorGraphSemanticError(exc.msg, line=pyparsing.line(locn, orig_contents).strip(), line_number=pyparsing.lineno(locn, orig_contents), **exc.params)
name = statement.getName()
if name not in statement_map:
raise ArgumentError("Unknown statement in sensor graph file", parsed_statement=statement, name=name)
# Save off our location information so we can give good error and warning information
line = pyparsing.line(locn, orig_contents).strip()
line_number = pyparsing.lineno(locn, orig_contents)
column = pyparsing.col(locn, orig_contents)
location_info = LocationInfo(line, line_number, column)
if is_block:
return statement_map[name](statement, children=children, location=location_info)
return statement_map[name](statement, location_info) | [
"Parse a statement, possibly called recursively.\n\n Args:\n statement (int, ParseResult): The pyparsing parse result that\n contains one statement prepended with the match location\n orig_contents (str): The original contents of the file that we're\n parsing in case we need to convert an index into a line, column\n pair.\n\n Returns:\n SensorGraphStatement: The parsed statement.\n "
] |
Please provide a description of the function:def start(self, channel):
if self._started:
raise InternalError("The method start() was called twice on VirtualIOTileDevice.")
self._push_channel = channel
self.start_workers() | [
"Start running this virtual device including any necessary worker threads.\n\n Args:\n channel (IOTilePushChannel): the channel with a stream and trace\n routine for streaming and tracing data through a VirtualInterface\n "
] |
Please provide a description of the function:def stream(self, report, callback=None):
if self._push_channel is None:
return
self._push_channel.stream(report, callback=callback) | [
"Stream a report asynchronously.\n\n If no one is listening for the report, the report may be dropped,\n otherwise it will be queued for sending\n\n Args:\n report (IOTileReport): The report that should be streamed\n callback (callable): Optional callback to get notified when\n this report is actually sent.\n "
] |
Please provide a description of the function:def stream_realtime(self, stream, value):
if not self.stream_iface_open:
return
reading = IOTileReading(0, stream, value)
report = IndividualReadingReport.FromReadings(self.iotile_id, [reading])
self.stream(report) | [
"Stream a realtime value as an IndividualReadingReport.\n\n If the streaming interface of the VirtualInterface this\n VirtualDevice is attached to is not opened, the realtime\n reading may be dropped.\n\n Args:\n stream (int): The stream id to send\n value (int): The stream value to send\n "
] |
Please provide a description of the function:def trace(self, data, callback=None):
if self._push_channel is None:
return
self._push_channel.trace(data, callback=callback) | [
"Trace data asynchronously.\n\n If no one is listening for traced data, it will be dropped\n otherwise it will be queued for sending.\n\n Args:\n data (bytearray, string): Unstructured data to trace to any\n connected client.\n callback (callable): Optional callback to get notified when\n this data is actually sent.\n "
] |
Please provide a description of the function:def register_rpc(self, address, rpc_id, func):
if rpc_id < 0 or rpc_id > 0xFFFF:
raise RPCInvalidIDError("Invalid RPC ID: {}".format(rpc_id))
if address not in self._rpc_overlays:
self._rpc_overlays[address] = RPCDispatcher()
self._rpc_overlays[address].add_rpc(rpc_id, func) | [
"Register a single RPC handler with the given info.\n\n This function can be used to directly register individual RPCs,\n rather than delegating all RPCs at a given address to a virtual\n Tile.\n\n If calls to this function are mixed with calls to add_tile for\n the same address, these RPCs will take precedence over what is\n defined in the tiles.\n\n Args:\n address (int): The address of the mock tile this RPC is for\n rpc_id (int): The number of the RPC\n func (callable): The function that should be called to handle the\n RPC. func is called as func(payload) and must return a single\n string object of up to 20 bytes with its response\n "
] |
Please provide a description of the function:def call_rpc(self, address, rpc_id, payload=b""):
if rpc_id < 0 or rpc_id > 0xFFFF:
raise RPCInvalidIDError("Invalid RPC ID: {}".format(rpc_id))
if address not in self._rpc_overlays and address not in self._tiles:
raise TileNotFoundError("Unknown tile address, no registered handler", address=address)
overlay = self._rpc_overlays.get(address, None)
tile = self._tiles.get(address, None)
if overlay is not None and overlay.has_rpc(rpc_id):
return overlay.call_rpc(rpc_id, payload)
elif tile is not None and tile.has_rpc(rpc_id):
return tile.call_rpc(rpc_id, payload)
raise RPCNotFoundError("Could not find RPC 0x%X at address %d" % (rpc_id, address)) | [
"Call an RPC by its address and ID.\n\n Args:\n address (int): The address of the mock tile this RPC is for\n rpc_id (int): The number of the RPC\n payload (bytes): A byte string of payload parameters up to 20 bytes\n\n Returns:\n bytes: The response payload from the RPC\n "
] |
Please provide a description of the function:def add_tile(self, address, tile):
if address in self._tiles:
raise ArgumentError("Tried to add two tiles at the same address", address=address)
self._tiles[address] = tile | [
"Add a tile to handle all RPCs at a given address.\n\n Args:\n address (int): The address of the tile\n tile (RPCDispatcher): A tile object that inherits from RPCDispatcher\n "
] |
Please provide a description of the function:def encode_contents(self):
header = struct.pack("<LL", self.offset, len(self.raw_data))
return bytearray(header) + self.raw_data | [
"Encode the contents of this update record without including a record header.\n\n Returns:\n bytearary: The encoded contents.\n "
] |
Please provide a description of the function:def FromBinary(cls, record_data, record_count=1):
if len(record_data) < ReflashControllerRecord.RecordHeaderLength:
raise ArgumentError("Record was too short to contain a full reflash record header",
length=len(record_data), header_length=ReflashControllerRecord.RecordHeaderLength)
offset, data_length = struct.unpack_from("<LL", record_data)
bindata = record_data[ReflashControllerRecord.RecordHeaderLength:]
if len(bindata) != data_length:
raise ArgumentError("Embedded firmware length did not agree with actual length of embeded data",
length=len(bindata), embedded_length=data_length)
return ReflashControllerRecord(bindata, offset) | [
"Create an UpdateRecord subclass from binary record data.\n\n This should be called with a binary record blob (NOT including the\n record type header) and it will decode it into a ReflashControllerRecord.\n\n Args:\n record_data (bytearray): The raw record data that we wish to parse\n into an UpdateRecord subclass NOT including its 8 byte record header.\n record_count (int): The number of records included in record_data.\n\n Raises:\n ArgumentError: If the record_data is malformed and cannot be parsed.\n\n Returns:\n ReflashControllerRecord: The decoded reflash tile record.\n "
] |
Please provide a description of the function:def iter_tiles(self, include_controller=True):
for address, tile in sorted(self._tiles.items()):
if address == 8 and not include_controller:
continue
yield address, tile | [
"Iterate over all tiles in this device in order.\n\n The ordering is by tile address which places the controller tile\n first in the list.\n\n Args:\n include_controller (bool): Include the controller tile in the\n results.\n\n Yields:\n int, EmulatedTile: A tuple with the tile address and tile object.\n "
] |
Please provide a description of the function:def start(self, channel=None):
super(ReferenceDevice, self).start(channel)
try:
self.controller.start(channel)
# Guarantee an initialization order so that our trace files are deterministic
for address, tile in sorted(self._tiles.items()):
if address == 8:
continue
if not isinstance(tile, EmulatedPeripheralTile):
raise DataError("An emulated ReferenceDevice can only have a single controller and all other tiles must inherit from EmulatedPeripheralTile",
address=address)
tile.start(channel)
async def _launch_tiles():
await self.controller.reset()
await asyncio.wait_for(self.controller.initialized.wait(), 2.0)
# Note that we do not explicitly reset the tiles.
# The controller resets all tiles in its reset method.
for address, tile in sorted(self._tiles.items()):
if address == 8:
continue
await asyncio.wait_for(tile.initialized.wait(), 2.0)
self.emulator.run_task_external(_launch_tiles())
if self._simulating_time:
self.emulator.add_task(None, self._time_ticker())
except:
self.stop()
raise | [
"Start this emulated device.\n\n This triggers the controller to call start on all peripheral tiles in\n the device to make sure they start after the controller does and then\n it waits on each one to make sure they have finished initializing\n before returning.\n\n Args:\n channel (IOTilePushChannel): the channel with a stream and trace\n routine for streaming and tracing data through a VirtualInterface\n "
] |
Please provide a description of the function:def open_streaming_interface(self):
super(ReferenceDevice, self).open_streaming_interface()
self.rpc(8, rpcs.SG_GRAPH_INPUT, 8, streams.COMM_TILE_OPEN)
return [] | [
"Called when someone opens a streaming interface to the device.\n\n This method will automatically notify sensor_graph that there is a\n streaming interface opened.\n\n Returns:\n list: A list of IOTileReport objects that should be sent out\n the streaming interface.\n "
] |
Please provide a description of the function:def close_streaming_interface(self):
super(ReferenceDevice, self).close_streaming_interface()
self.rpc(8, rpcs.SG_GRAPH_INPUT, 8, streams.COMM_TILE_CLOSED) | [
"Called when someone closes the streaming interface to the device.\n\n This method will automatically notify sensor_graph that there is a no\n longer a streaming interface opened.\n "
] |
Please provide a description of the function:def dump_state(self):
# Dump the state of all of the tiles
def _background_dump():
state = super(ReferenceDevice, self).dump_state()
state['state_name'] = self.STATE_NAME
state['state_version'] = self.STATE_VERSION
state['reset_count'] = self.reset_count
state['received_script'] = base64.b64encode(self.script).decode('utf-8')
return state
return self.synchronize_task(_background_dump) | [
"Dump the current state of this emulated object as a dictionary.\n\n Note that dump_state happens synchronously in the emulation thread to\n avoid any race conditions with accessing data members and ensure a\n consistent view of all state data.\n\n Returns:\n dict: The current state of the object that could be passed to load_state.\n "
] |
Please provide a description of the function:def restore_state(self, state):
state_name = state.get('state_name')
state_version = state.get('state_version')
if state_name != self.STATE_NAME or state_version != self.STATE_VERSION:
raise ArgumentError("Invalid emulated device state name or version", found=(state_name, state_version),
expected=(self.STATE_NAME, self.STATE_VERSION))
def _background_restore():
# Restore the state of all of the tiles
super(ReferenceDevice, self).restore_state(state)
self.reset_count = state.get('reset_count', 0)
self.script = base64.b64decode(state.get('received_script'))
self.synchronize_task(_background_restore) | [
"Restore the current state of this emulated device.\n\n Note that restore_state happens synchronously in the emulation thread\n to avoid any race conditions with accessing data members and ensure a\n consistent atomic restoration process.\n\n This method will block while the background restore happens.\n\n Args:\n state (dict): A previously dumped state produced by dump_state.\n "
] |
Please provide a description of the function:def build_parser():
parser = argparse.ArgumentParser(description="The IOTile task supervisor")
parser.add_argument('-c', '--config', help="config json with options")
parser.add_argument('-v', '--verbose', action="count", default=0, help="Increase logging verbosity")
return parser | [
"Build the script's argument parser."
] |
Please provide a description of the function:def configure_logging(verbosity):
root = logging.getLogger()
formatter = logging.Formatter('%(asctime)s.%(msecs)03d %(levelname).3s %(name)s %(message)s',
'%y-%m-%d %H:%M:%S')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
loglevels = [logging.CRITICAL, logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG]
if verbosity >= len(loglevels):
verbosity = len(loglevels) - 1
level = loglevels[verbosity]
root.setLevel(level)
root.addHandler(handler) | [
"Set up the global logging level.\n\n Args:\n verbosity (int): The logging verbosity\n "
] |
Please provide a description of the function:def createProgBuilder(env):
try:
program = env['BUILDERS']['Program']
except KeyError:
import SCons.Defaults
program = SCons.Builder.Builder(action = SCons.Defaults.LinkAction,
emitter = '$PROGEMITTER',
prefix = '$PROGPREFIX',
suffix = '$PROGSUFFIX',
src_suffix = '$OBJSUFFIX',
src_builder = 'Object',
target_scanner = ProgramScanner)
env['BUILDERS']['Program'] = program
return program | [
"This is a utility function that creates the Program\n Builder in an Environment if it is not there already.\n\n If it is already there, we return the existing one.\n "
] |
Please provide a description of the function:def createStaticLibBuilder(env):
try:
static_lib = env['BUILDERS']['StaticLibrary']
except KeyError:
action_list = [ SCons.Action.Action("$ARCOM", "$ARCOMSTR") ]
if env.get('RANLIB',False) or env.Detect('ranlib'):
ranlib_action = SCons.Action.Action("$RANLIBCOM", "$RANLIBCOMSTR")
action_list.append(ranlib_action)
static_lib = SCons.Builder.Builder(action = action_list,
emitter = '$LIBEMITTER',
prefix = '$LIBPREFIX',
suffix = '$LIBSUFFIX',
src_suffix = '$OBJSUFFIX',
src_builder = 'StaticObject')
env['BUILDERS']['StaticLibrary'] = static_lib
env['BUILDERS']['Library'] = static_lib
return static_lib | [
"This is a utility function that creates the StaticLibrary\n Builder in an Environment if it is not there already.\n\n If it is already there, we return the existing one.\n "
] |
Please provide a description of the function:def _call_linker_cb(env, callback, args, result = None):
Verbose = False
if Verbose:
print('_call_linker_cb: args=%r' % args)
print('_call_linker_cb: callback=%r' % callback)
try:
cbfun = env['LINKCALLBACKS'][callback]
except (KeyError, TypeError):
if Verbose:
print('_call_linker_cb: env["LINKCALLBACKS"][%r] not found or can not be used' % callback)
pass
else:
if Verbose:
print('_call_linker_cb: env["LINKCALLBACKS"][%r] found' % callback)
print('_call_linker_cb: env["LINKCALLBACKS"][%r]=%r' % (callback, cbfun))
if(isinstance(cbfun, collections.Callable)):
if Verbose:
print('_call_linker_cb: env["LINKCALLBACKS"][%r] is callable' % callback)
result = cbfun(env, *args)
return result | [
"Returns the result of env['LINKCALLBACKS'][callback](*args)\n if env['LINKCALLBACKS'] is a dictionary and env['LINKCALLBACKS'][callback]\n is callable. If these conditions are not met, return the value provided as\n the *result* argument. This function is mainly used for generating library\n info such as versioned suffixes, symlink maps, sonames etc. by delegating\n the core job to callbacks configured by current linker tool"
] |
Please provide a description of the function:def StringizeLibSymlinks(symlinks):
if SCons.Util.is_List(symlinks):
try:
return [ (k.get_path(), v.get_path()) for k,v in symlinks ]
except (TypeError, ValueError):
return symlinks
else:
return symlinks | [
"Converts list with pairs of nodes to list with pairs of node paths\n (strings). Used mainly for debugging."
] |
Please provide a description of the function:def EmitLibSymlinks(env, symlinks, libnode, **kw):
Verbose = False
# nodes involved in process... all symlinks + library
nodes = list(set([ x for x,y in symlinks ] + [libnode]))
clean_targets = kw.get('clean_targets', [])
if not SCons.Util.is_List(clean_targets):
clean_targets = [ clean_targets ]
for link, linktgt in symlinks:
env.SideEffect(link, linktgt)
if(Verbose):
print("EmitLibSymlinks: SideEffect(%r,%r)" % (link.get_path(), linktgt.get_path()))
clean_list = [x for x in nodes if x != linktgt]
env.Clean(list(set([linktgt] + clean_targets)), clean_list)
if(Verbose):
print("EmitLibSymlinks: Clean(%r,%r)" % (linktgt.get_path(), [x.get_path() for x in clean_list])) | [
"Used by emitters to handle (shared/versioned) library symlinks"
] |
Please provide a description of the function:def CreateLibSymlinks(env, symlinks):
Verbose = False
for link, linktgt in symlinks:
linktgt = link.get_dir().rel_path(linktgt)
link = link.get_path()
if(Verbose):
print("CreateLibSymlinks: preparing to add symlink %r -> %r" % (link, linktgt))
# Delete the (previously created) symlink if exists. Let only symlinks
# to be deleted to prevent accidental deletion of source files...
if env.fs.islink(link):
env.fs.unlink(link)
if(Verbose):
print("CreateLibSymlinks: removed old symlink %r" % link)
# If a file or directory exists with the same name as link, an OSError
# will be thrown, which should be enough, I think.
env.fs.symlink(linktgt, link)
if(Verbose):
print("CreateLibSymlinks: add symlink %r -> %r" % (link, linktgt))
return 0 | [
"Physically creates symlinks. The symlinks argument must be a list in\n form [ (link, linktarget), ... ], where link and linktarget are SCons\n nodes.\n "
] |
Please provide a description of the function:def createSharedLibBuilder(env):
try:
shared_lib = env['BUILDERS']['SharedLibrary']
except KeyError:
import SCons.Defaults
action_list = [ SCons.Defaults.SharedCheck,
SCons.Defaults.ShLinkAction,
LibSymlinksAction ]
shared_lib = SCons.Builder.Builder(action = action_list,
emitter = "$SHLIBEMITTER",
prefix = ShLibPrefixGenerator,
suffix = ShLibSuffixGenerator,
target_scanner = ProgramScanner,
src_suffix = '$SHOBJSUFFIX',
src_builder = 'SharedObject')
env['BUILDERS']['SharedLibrary'] = shared_lib
return shared_lib | [
"This is a utility function that creates the SharedLibrary\n Builder in an Environment if it is not there already.\n\n If it is already there, we return the existing one.\n "
] |
Please provide a description of the function:def createLoadableModuleBuilder(env):
try:
ld_module = env['BUILDERS']['LoadableModule']
except KeyError:
import SCons.Defaults
action_list = [ SCons.Defaults.SharedCheck,
SCons.Defaults.LdModuleLinkAction,
LibSymlinksAction ]
ld_module = SCons.Builder.Builder(action = action_list,
emitter = "$LDMODULEEMITTER",
prefix = LdModPrefixGenerator,
suffix = LdModSuffixGenerator,
target_scanner = ProgramScanner,
src_suffix = '$SHOBJSUFFIX',
src_builder = 'SharedObject')
env['BUILDERS']['LoadableModule'] = ld_module
return ld_module | [
"This is a utility function that creates the LoadableModule\n Builder in an Environment if it is not there already.\n\n If it is already there, we return the existing one.\n "
] |
Please provide a description of the function:def createObjBuilders(env):
try:
static_obj = env['BUILDERS']['StaticObject']
except KeyError:
static_obj = SCons.Builder.Builder(action = {},
emitter = {},
prefix = '$OBJPREFIX',
suffix = '$OBJSUFFIX',
src_builder = ['CFile', 'CXXFile'],
source_scanner = SourceFileScanner,
single_source = 1)
env['BUILDERS']['StaticObject'] = static_obj
env['BUILDERS']['Object'] = static_obj
try:
shared_obj = env['BUILDERS']['SharedObject']
except KeyError:
shared_obj = SCons.Builder.Builder(action = {},
emitter = {},
prefix = '$SHOBJPREFIX',
suffix = '$SHOBJSUFFIX',
src_builder = ['CFile', 'CXXFile'],
source_scanner = SourceFileScanner,
single_source = 1)
env['BUILDERS']['SharedObject'] = shared_obj
return (static_obj, shared_obj) | [
"This is a utility function that creates the StaticObject\n and SharedObject Builders in an Environment if they\n are not there already.\n\n If they are there already, we return the existing ones.\n\n This is a separate function because soooo many Tools\n use this functionality.\n\n The return is a 2-tuple of (StaticObject, SharedObject)\n "
] |
Please provide a description of the function:def createCFileBuilders(env):
try:
c_file = env['BUILDERS']['CFile']
except KeyError:
c_file = SCons.Builder.Builder(action = {},
emitter = {},
suffix = {None:'$CFILESUFFIX'})
env['BUILDERS']['CFile'] = c_file
env.SetDefault(CFILESUFFIX = '.c')
try:
cxx_file = env['BUILDERS']['CXXFile']
except KeyError:
cxx_file = SCons.Builder.Builder(action = {},
emitter = {},
suffix = {None:'$CXXFILESUFFIX'})
env['BUILDERS']['CXXFile'] = cxx_file
env.SetDefault(CXXFILESUFFIX = '.cc')
return (c_file, cxx_file) | [
"This is a utility function that creates the CFile/CXXFile\n Builders in an Environment if they\n are not there already.\n\n If they are there already, we return the existing ones.\n\n This is a separate function because soooo many Tools\n use this functionality.\n\n The return is a 2-tuple of (CFile, CXXFile)\n "
] |
Please provide a description of the function:def CreateJarBuilder(env):
try:
java_jar = env['BUILDERS']['JarFile']
except KeyError:
fs = SCons.Node.FS.get_default_fs()
jar_com = SCons.Action.Action('$JARCOM', '$JARCOMSTR')
java_jar = SCons.Builder.Builder(action = jar_com,
suffix = '$JARSUFFIX',
src_suffix = '$JAVACLASSSUFFIX',
src_builder = 'JavaClassFile',
source_factory = fs.Entry)
env['BUILDERS']['JarFile'] = java_jar
return java_jar | [
"The Jar builder expects a list of class files\n which it can package into a jar file.\n\n The jar tool provides an interface for passing other types\n of java files such as .java, directories or swig interfaces\n and will build them to class files in which it can package\n into the jar.\n "
] |
Please provide a description of the function:def get_builder(self, env):
builder = getattr(env, self.__name__)
self.initializer.apply_tools(env)
builder = getattr(env, self.__name__)
if builder is self:
# There was no Builder added, which means no valid Tool
# for this name was found (or possibly there's a mismatch
# between the name we were called by and the Builder name
# added by the Tool module).
return None
self.initializer.remove_methods(env)
return builder | [
"\n Returns the appropriate real Builder for this method name\n after having the associated ToolInitializer object apply\n the appropriate Tool module.\n "
] |
Please provide a description of the function:def remove_methods(self, env):
for method in list(self.methods.values()):
env.RemoveMethod(method) | [
"\n Removes the methods that were added by the tool initialization\n so we no longer copy and re-bind them when the construction\n environment gets cloned.\n "
] |
Please provide a description of the function:def apply_tools(self, env):
for t in self.tools:
tool = SCons.Tool.Tool(t)
if tool.exists(env):
env.Tool(tool)
return | [
"\n Searches the list of associated Tool modules for one that\n exists, and applies that to the construction environment.\n "
] |
Please provide a description of the function:def get_contents_entry(node):
try:
node = node.disambiguate(must_exist=1)
except SCons.Errors.UserError:
# There was nothing on disk with which to disambiguate
# this entry. Leave it as an Entry, but return a null
# string so calls to get_contents() in emitters and the
# like (e.g. in qt.py) don't have to disambiguate by hand
# or catch the exception.
return ''
else:
return _get_contents_map[node._func_get_contents](node) | [
"Fetch the contents of the entry. Returns the exact binary\n contents of the file."
] |
Please provide a description of the function:def get_contents_dir(node):
contents = []
for n in sorted(node.children(), key=lambda t: t.name):
contents.append('%s %s\n' % (n.get_csig(), n.name))
return ''.join(contents) | [
"Return content signatures and names of all our children\n separated by new-lines. Ensure that the nodes are sorted."
] |
Please provide a description of the function:def get_build_env(self):
try:
return self._memo['get_build_env']
except KeyError:
pass
result = self.get_executor().get_build_env()
self._memo['get_build_env'] = result
return result | [
"Fetch the appropriate Environment to build this node.\n "
] |
Please provide a description of the function:def get_executor(self, create=1):
try:
executor = self.executor
except AttributeError:
if not create:
raise
try:
act = self.builder.action
except AttributeError:
executor = SCons.Executor.Null(targets=[self])
else:
executor = SCons.Executor.Executor(act,
self.env or self.builder.env,
[self.builder.overrides],
[self],
self.sources)
self.executor = executor
return executor | [
"Fetch the action executor for this node. Create one if\n there isn't already one, and requested to do so."
] |
Please provide a description of the function:def executor_cleanup(self):
try:
executor = self.get_executor(create=None)
except AttributeError:
pass
else:
if executor is not None:
executor.cleanup() | [
"Let the executor clean up any cached information."
] |
Please provide a description of the function:def prepare(self):
if self.depends is not None:
for d in self.depends:
if d.missing():
msg = "Explicit dependency `%s' not found, needed by target `%s'."
raise SCons.Errors.StopError(msg % (d, self))
if self.implicit is not None:
for i in self.implicit:
if i.missing():
msg = "Implicit dependency `%s' not found, needed by target `%s'."
raise SCons.Errors.StopError(msg % (i, self))
self.binfo = self.get_binfo() | [
"Prepare for this Node to be built.\n\n This is called after the Taskmaster has decided that the Node\n is out-of-date and must be rebuilt, but before actually calling\n the method to build the Node.\n\n This default implementation checks that explicit or implicit\n dependencies either exist or are derived, and initializes the\n BuildInfo structure that will hold the information about how\n this node is, uh, built.\n\n (The existence of source files is checked separately by the\n Executor, which aggregates checks for all of the targets built\n by a specific action.)\n\n Overriding this method allows for for a Node subclass to remove\n the underlying file from the file system. Note that subclass\n methods should call this base class method to get the child\n check and the BuildInfo structure.\n "
] |
Please provide a description of the function:def build(self, **kw):
try:
self.get_executor()(self, **kw)
except SCons.Errors.BuildError as e:
e.node = self
raise | [
"Actually build the node.\n\n This is called by the Taskmaster after it's decided that the\n Node is out-of-date and must be rebuilt, and after the prepare()\n method has gotten everything, uh, prepared.\n\n This method is called from multiple threads in a parallel build,\n so only do thread safe stuff here. Do thread unsafe stuff\n in built().\n\n "
] |
Please provide a description of the function:def built(self):
# Clear the implicit dependency caches of any Nodes
# waiting for this Node to be built.
for parent in self.waiting_parents:
parent.implicit = None
self.clear()
if self.pseudo:
if self.exists():
raise SCons.Errors.UserError("Pseudo target " + str(self) + " must not exist")
else:
if not self.exists() and do_store_info:
SCons.Warnings.warn(SCons.Warnings.TargetNotBuiltWarning,
"Cannot find target " + str(self) + " after building")
self.ninfo.update(self) | [
"Called just after this node is successfully built."
] |
Please provide a description of the function:def visited(self):
try:
binfo = self.binfo
except AttributeError:
# Apparently this node doesn't need build info, so
# don't bother calculating or storing it.
pass
else:
self.ninfo.update(self)
SCons.Node.store_info_map[self.store_info](self) | [
"Called just after this node has been visited (with or\n without a build)."
] |
Please provide a description of the function:def add_to_waiting_parents(self, node):
wp = self.waiting_parents
if node in wp:
return 0
wp.add(node)
return 1 | [
"\n Returns the number of nodes added to our waiting parents list:\n 1 if we add a unique waiting parent, 0 if not. (Note that the\n returned values are intended to be used to increment a reference\n count, so don't think you can \"clean up\" this function by using\n True and False instead...)\n "
] |
Please provide a description of the function:def clear(self):
# The del_binfo() call here isn't necessary for normal execution,
# but is for interactive mode, where we might rebuild the same
# target and need to start from scratch.
self.del_binfo()
self.clear_memoized_values()
self.ninfo = self.new_ninfo()
self.executor_cleanup()
try:
delattr(self, '_calculated_sig')
except AttributeError:
pass
self.includes = None | [
"Completely clear a Node of all its cached state (so that it\n can be re-evaluated by interfaces that do continuous integration\n builds).\n "
] |
Please provide a description of the function:def has_builder(self):
try:
b = self.builder
except AttributeError:
# There was no explicit builder for this Node, so initialize
# the self.builder attribute to None now.
b = self.builder = None
return b is not None | [
"Return whether this Node has a builder or not.\n\n In Boolean tests, this turns out to be a *lot* more efficient\n than simply examining the builder attribute directly (\"if\n node.builder: ...\"). When the builder attribute is examined\n directly, it ends up calling __getattr__ for both the __len__\n and __nonzero__ attributes on instances of our Builder Proxy\n class(es), generating a bazillion extra calls and slowing\n things down immensely.\n "
] |
Please provide a description of the function:def get_implicit_deps(self, env, initial_scanner, path_func, kw = {}):
nodes = [self]
seen = set(nodes)
dependencies = []
path_memo = {}
root_node_scanner = self._get_scanner(env, initial_scanner, None, kw)
while nodes:
node = nodes.pop(0)
scanner = node._get_scanner(env, initial_scanner, root_node_scanner, kw)
if not scanner:
continue
try:
path = path_memo[scanner]
except KeyError:
path = path_func(scanner)
path_memo[scanner] = path
included_deps = [x for x in node.get_found_includes(env, scanner, path) if x not in seen]
if included_deps:
dependencies.extend(included_deps)
seen.update(included_deps)
nodes.extend(scanner.recurse_nodes(included_deps))
return dependencies | [
"Return a list of implicit dependencies for this node.\n\n This method exists to handle recursive invocation of the scanner\n on the implicit dependencies returned by the scanner, if the\n scanner's recursive flag says that we should.\n "
] |
Please provide a description of the function:def get_source_scanner(self, node):
scanner = None
try:
scanner = self.builder.source_scanner
except AttributeError:
pass
if not scanner:
# The builder didn't have an explicit scanner, so go look up
# a scanner from env['SCANNERS'] based on the node's scanner
# key (usually the file extension).
scanner = self.get_env_scanner(self.get_build_env())
if scanner:
scanner = scanner.select(node)
return scanner | [
"Fetch the source scanner for the specified node\n\n NOTE: \"self\" is the target being built, \"node\" is\n the source file for which we want to fetch the scanner.\n\n Implies self.has_builder() is true; again, expect to only be\n called from locations where this is already verified.\n\n This function may be called very often; it attempts to cache\n the scanner found to improve performance.\n "
] |
Please provide a description of the function:def scan(self):
# Don't bother scanning non-derived files, because we don't
# care what their dependencies are.
# Don't scan again, if we already have scanned.
if self.implicit is not None:
return
self.implicit = []
self.implicit_set = set()
self._children_reset()
if not self.has_builder():
return
build_env = self.get_build_env()
executor = self.get_executor()
# Here's where we implement --implicit-cache.
if implicit_cache and not implicit_deps_changed:
implicit = self.get_stored_implicit()
if implicit is not None:
# We now add the implicit dependencies returned from the
# stored .sconsign entry to have already been converted
# to Nodes for us. (We used to run them through a
# source_factory function here.)
# Update all of the targets with them. This
# essentially short-circuits an N*M scan of the
# sources for each individual target, which is a hell
# of a lot more efficient.
for tgt in executor.get_all_targets():
tgt.add_to_implicit(implicit)
if implicit_deps_unchanged or self.is_up_to_date():
return
# one of this node's sources has changed,
# so we must recalculate the implicit deps for all targets
for tgt in executor.get_all_targets():
tgt.implicit = []
tgt.implicit_set = set()
# Have the executor scan the sources.
executor.scan_sources(self.builder.source_scanner)
# If there's a target scanner, have the executor scan the target
# node itself and associated targets that might be built.
scanner = self.get_target_scanner()
if scanner:
executor.scan_targets(scanner) | [
"Scan this node's dependents for implicit dependencies."
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.