Code
stringlengths
103
85.9k
Summary
sequencelengths
0
94
Please provide a description of the function:def _open_interface(self, client, uuid, iface, key): conn_id = self._validate_connection('open_interface', uuid, key) if conn_id is None: return conn_data = self._connections[uuid] conn_data['last_touch'] = monotonic() slug = self._build_device_slug(uuid) try: resp = yield self._manager.open_interface(conn_id, iface) except Exception as exc: self._logger.exception("Error in manager open interface") resp = {'success': False, 'reason': "Internal error: %s" % str(exc)} message = {'type': 'response', 'operation': 'open_interface', 'client': client} message['success'] = resp['success'] if not message['success']: message['failure_reason'] = resp['reason'] self._publish_response(slug, message)
[ "Open an interface on a connected device.\n\n Args:\n client (string): The client id who is requesting this operation\n uuid (int): The id of the device we're opening the interface on\n iface (string): The name of the interface that we're opening\n key (string): The key to authenticate the caller\n " ]
Please provide a description of the function:def _disconnect_hanging_devices(self): now = monotonic() for uuid, data in self._connections.items(): if (now - data['last_touch']) > self.client_timeout: self._logger.info("Disconnect inactive client %s from device 0x%X", data['client'], uuid) self._loop.add_callback(self._disconnect_from_device, uuid, data['key'], data['client'], unsolicited=True)
[ "Periodic callback that checks for devices that haven't been used and disconnects them." ]
Please provide a description of the function:def _disconnect_from_device(self, uuid, key, client, unsolicited=False): conn_id = self._validate_connection('disconnect', uuid, key) if conn_id is None: return conn_data = self._connections[uuid] slug = self._build_device_slug(uuid) message = {'client': client, 'type': 'response', 'operation': 'disconnect'} self.client.reset_sequence(self.topics.gateway_topic(slug, 'control/connect')) self.client.reset_sequence(self.topics.gateway_topic(slug, 'control/action')) try: resp = yield self._manager.disconnect(conn_id) except Exception as exc: self._logger.exception("Error in manager disconnect") resp = {'success': False, 'reason': "Internal error: %s" % str(exc)} # Remove any monitors that we registered for this device self._manager.remove_monitor(conn_data['report_monitor']) self._manager.remove_monitor(conn_data['trace_monitor']) if resp['success']: del self._connections[uuid] message['success'] = True else: message['success'] = False message['failure_reason'] = resp['reason'] self._logger.info("Client %s disconnected from device 0x%X", client, uuid) # Send a response for all requested disconnects and if we tried to disconnect the client # on our own and succeeded, send an unsolicited notification to that effect if unsolicited and resp['success']: self._publish_response(slug, {'client': client, 'type': 'notification', 'operation': 'disconnect'}) elif not unsolicited: self._publish_response(slug, message)
[ "Disconnect from a device that we have previously connected to.\n\n Args:\n uuid (int): The unique id of the device\n key (string): A 64 byte string used to secure this connection\n client (string): The client id for who is trying to connect\n to the device.\n unsolicited (bool): Whether the client asked us to disconnect or we\n are forcibly doing it. Forcible disconnections are sent as notifications\n instead of responses.\n " ]
Please provide a description of the function:def _connect_to_device(self, uuid, key, client): slug = self._build_device_slug(uuid) message = {'client': client, 'type': 'response', 'operation': 'connect'} self._logger.info("Connection attempt for device %d", uuid) # If someone is already connected, fail the request if uuid in self._connections: message['success'] = False message['failure_reason'] = 'Someone else is connected to the device' self._publish_status(slug, message) return # Otherwise try to connect resp = yield self._manager.connect(uuid) message['success'] = resp['success'] if resp['success']: conn_id = resp['connection_id'] self._connections[uuid] = {'key': key, 'client': client, 'connection_id': conn_id, 'last_touch': monotonic(), 'script': [], 'trace_accum': bytes(), 'last_trace': None, 'trace_scheduled': False, 'last_progress': None} else: message['failure_reason'] = resp['reason'] self._connections[uuid] = {} connection = self._connections[uuid] connection['report_monitor'] = self._manager.register_monitor(uuid, ['report'], self._notify_report) connection['trace_monitor'] = self._manager.register_monitor(uuid, ['trace'], self._notify_trace) self._publish_status(slug, message)
[ "Connect to a device given its uuid\n\n Args:\n uuid (int): The unique id of the device\n key (string): A 64 byte string used to secure this connection\n client (string): The client id for who is trying to connect\n to the device.\n " ]
Please provide a description of the function:def _notify_report(self, device_uuid, event_name, report): if device_uuid not in self._connections: self._logger.debug("Dropping report for device without an active connection, uuid=0x%X", device_uuid) return slug = self._build_device_slug(device_uuid) streaming_topic = self.topics.prefix + 'devices/{}/data/streaming'.format(slug) data = {'type': 'notification', 'operation': 'report'} ser = report.serialize() data['received_time'] = ser['received_time'].strftime("%Y%m%dT%H:%M:%S.%fZ").encode() data['report_origin'] = ser['origin'] data['report_format'] = ser['report_format'] data['report'] = binascii.hexlify(ser['encoded_report']) data['fragment_count'] = 1 data['fragment_index'] = 0 self._logger.debug("Publishing report: (topic=%s)", streaming_topic) self.client.publish(streaming_topic, data)
[ "Notify that a report has been received from a device.\n\n This routine is called synchronously in the event loop by the DeviceManager\n " ]
Please provide a description of the function:def _notify_trace(self, device_uuid, event_name, trace): if device_uuid not in self._connections: self._logger.debug("Dropping trace data for device without an active connection, uuid=0x%X", device_uuid) return conn_data = self._connections[device_uuid] last_trace = conn_data['last_trace'] now = monotonic() conn_data['trace_accum'] += bytes(trace) # If we're throttling tracing data, we need to see if we should accumulate this trace or # send it now. We acculumate if we've last sent tracing data less than self.throttle_trace seconds ago if last_trace is not None and (now - last_trace) < self.throttle_trace: if not conn_data['trace_scheduled']: self._loop.call_later(self.throttle_trace - (now - last_trace), self._send_accum_trace, device_uuid) conn_data['trace_scheduled'] = True self._logger.debug("Deferring trace data due to throttling uuid=0x%X", device_uuid) else: self._send_accum_trace(device_uuid)
[ "Notify that we have received tracing data from a device.\n\n This routine is called synchronously in the event loop by the DeviceManager\n " ]
Please provide a description of the function:def _send_accum_trace(self, device_uuid): if device_uuid not in self._connections: self._logger.debug("Dropping trace data for device without an active connection, uuid=0x%X", device_uuid) return conn_data = self._connections[device_uuid] trace = conn_data['trace_accum'] if len(trace) > 0: slug = self._build_device_slug(device_uuid) tracing_topic = self.topics.prefix + 'devices/{}/data/tracing'.format(slug) data = {'type': 'notification', 'operation': 'trace'} data['trace'] = binascii.hexlify(trace) data['trace_origin'] = device_uuid self._logger.debug('Publishing trace: (topic=%s)', tracing_topic) self.client.publish(tracing_topic, data) conn_data['trace_scheduled'] = False conn_data['last_trace'] = monotonic() conn_data['trace_accum'] = bytes()
[ "Send whatever accumulated tracing data we have for the device." ]
Please provide a description of the function:def _on_scan_request(self, sequence, topic, message): if messages.ProbeCommand.matches(message): self._logger.debug("Received probe message on topic %s, message=%s", topic, message) self._loop.add_callback(self._publish_scan_response, message['client']) else: self._logger.warn("Invalid message received on topic %s, message=%s", topic, message)
[ "Process a request for scanning information\n\n Args:\n sequence (int:) The sequence number of the packet received\n topic (string): The topic this message was received on\n message_type (string): The type of the packet received\n message (dict): The message itself\n " ]
Please provide a description of the function:def _publish_scan_response(self, client): devices = self._manager.scanned_devices converted_devs = [] for uuid, info in devices.items(): slug = self._build_device_slug(uuid) message = {} message['uuid'] = uuid if uuid in self._connections: message['user_connected'] = True elif 'user_connected' in info: message['user_connected'] = info['user_connected'] else: message['user_connected'] = False message['connection_string'] = slug message['signal_strength'] = info['signal_strength'] converted_devs.append({x: y for x, y in message.items()}) message['type'] = 'notification' message['operation'] = 'advertisement' self.client.publish(self.topics.gateway_topic(slug, 'data/advertisement'), message) probe_message = {} probe_message['type'] = 'response' probe_message['client'] = client probe_message['success'] = True probe_message['devices'] = converted_devs self.client.publish(self.topics.status, probe_message)
[ "Publish a scan response message\n\n The message contains all of the devices that are currently known\n to this agent. Connection strings for direct connections are\n translated to what is appropriate for this agent.\n\n Args:\n client (string): A unique id for the client that made this request\n " ]
Please provide a description of the function:def _versioned_lib_name(env, libnode, version, prefix, suffix, prefix_generator, suffix_generator, **kw): Verbose = False if Verbose: print("_versioned_lib_name: libnode={:r}".format(libnode.get_path())) print("_versioned_lib_name: version={:r}".format(version)) print("_versioned_lib_name: prefix={:r}".format(prefix)) print("_versioned_lib_name: suffix={:r}".format(suffix)) print("_versioned_lib_name: suffix_generator={:r}".format(suffix_generator)) versioned_name = os.path.basename(libnode.get_path()) if Verbose: print("_versioned_lib_name: versioned_name={:r}".format(versioned_name)) versioned_prefix = prefix_generator(env, **kw) versioned_suffix = suffix_generator(env, **kw) if Verbose: print("_versioned_lib_name: versioned_prefix={:r}".format(versioned_prefix)) print("_versioned_lib_name: versioned_suffix={:r}".format(versioned_suffix)) versioned_prefix_re = '^' + re.escape(versioned_prefix) versioned_suffix_re = re.escape(versioned_suffix) + '$' name = re.sub(versioned_prefix_re, prefix, versioned_name) name = re.sub(versioned_suffix_re, suffix, name) if Verbose: print("_versioned_lib_name: name={:r}".format(name)) return name
[ "For libnode='/optional/dir/libfoo.so.X.Y.Z' it returns 'libfoo.so'" ]
Please provide a description of the function:def _versioned_lib_suffix(env, suffix, version): Verbose = False if Verbose: print("_versioned_lib_suffix: suffix={:r}".format(suffix)) print("_versioned_lib_suffix: version={:r}".format(version)) if not suffix.endswith(version): suffix = suffix + '.' + version if Verbose: print("_versioned_lib_suffix: return suffix={:r}".format(suffix)) return suffix
[ "For suffix='.so' and version='0.1.2' it returns '.so.0.1.2'" ]
Please provide a description of the function:def _versioned_lib_soname(env, libnode, version, prefix, suffix, name_func): Verbose = False if Verbose: print("_versioned_lib_soname: version={:r}".format(version)) name = name_func(env, libnode, version, prefix, suffix) if Verbose: print("_versioned_lib_soname: name={:r}".format(name)) major = version.split('.')[0] soname = name + '.' + major if Verbose: print("_versioned_lib_soname: soname={:r}".format(soname)) return soname
[ "For libnode='/optional/dir/libfoo.so.X.Y.Z' it returns 'libfoo.so.X'" ]
Please provide a description of the function:def _versioned_lib_symlinks(env, libnode, version, prefix, suffix, name_func, soname_func): Verbose = False if Verbose: print("_versioned_lib_symlinks: libnode={:r}".format(libnode.get_path())) print("_versioned_lib_symlinks: version={:r}".format(version)) if sys.platform.startswith('openbsd'): # OpenBSD uses x.y shared library versioning numbering convention # and doesn't use symlinks to backwards-compatible libraries if Verbose: print("_versioned_lib_symlinks: return symlinks={:r}".format(None)) return None linkdir = libnode.get_dir() if Verbose: print("_versioned_lib_symlinks: linkdir={:r}".format(linkdir.get_path())) name = name_func(env, libnode, version, prefix, suffix) if Verbose: print("_versioned_lib_symlinks: name={:r}".format(name)) soname = soname_func(env, libnode, version, prefix, suffix) link0 = env.fs.File(soname, linkdir) link1 = env.fs.File(name, linkdir) # We create direct symlinks, not daisy-chained. if link0 == libnode: # This enables SHLIBVERSION without periods (e.g. SHLIBVERSION=1) symlinks = [ (link1, libnode) ] else: # This handles usual SHLIBVERSION, i.e. '1.2', '1.2.3', etc. symlinks = [ (link0, libnode), (link1, libnode) ] if Verbose: print("_versioned_lib_symlinks: return symlinks={:r}".format(SCons.Tool.StringizeLibSymlinks(symlinks))) return symlinks
[ "Generate link names that should be created for a versioned shared lirbrary.\n Returns a dictionary in the form { linkname : linktarget }\n " ]
Please provide a description of the function:def _setup_versioned_lib_variables(env, **kw): tool = None try: tool = kw['tool'] except KeyError: pass use_soname = False try: use_soname = kw['use_soname'] except KeyError: pass # The $_SHLIBVERSIONFLAGS define extra commandline flags used when # building VERSIONED shared libraries. It's always set, but used only # when VERSIONED library is built (see __SHLIBVERSIONFLAGS in SCons/Defaults.py). if use_soname: # If the linker uses SONAME, then we need this little automata if tool == 'sunlink': env['_SHLIBVERSIONFLAGS'] = '$SHLIBVERSIONFLAGS -h $_SHLIBSONAME' env['_LDMODULEVERSIONFLAGS'] = '$LDMODULEVERSIONFLAGS -h $_LDMODULESONAME' else: env['_SHLIBVERSIONFLAGS'] = '$SHLIBVERSIONFLAGS -Wl,-soname=$_SHLIBSONAME' env['_LDMODULEVERSIONFLAGS'] = '$LDMODULEVERSIONFLAGS -Wl,-soname=$_LDMODULESONAME' env['_SHLIBSONAME'] = '${ShLibSonameGenerator(__env__,TARGET)}' env['_LDMODULESONAME'] = '${LdModSonameGenerator(__env__,TARGET)}' env['ShLibSonameGenerator'] = SCons.Tool.ShLibSonameGenerator env['LdModSonameGenerator'] = SCons.Tool.LdModSonameGenerator else: env['_SHLIBVERSIONFLAGS'] = '$SHLIBVERSIONFLAGS' env['_LDMODULEVERSIONFLAGS'] = '$LDMODULEVERSIONFLAGS' # LDOMDULVERSIONFLAGS should always default to $SHLIBVERSIONFLAGS env['LDMODULEVERSIONFLAGS'] = '$SHLIBVERSIONFLAGS'
[ "\n Setup all variables required by the versioning machinery\n " ]
Please provide a description of the function:def generate(env): SCons.Tool.createSharedLibBuilder(env) SCons.Tool.createProgBuilder(env) env['SHLINK'] = '$LINK' env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS -shared') env['SHLINKCOM'] = '$SHLINK -o $TARGET $SHLINKFLAGS $__SHLIBVERSIONFLAGS $__RPATH $SOURCES $_LIBDIRFLAGS $_LIBFLAGS' # don't set up the emitter, cause AppendUnique will generate a list # starting with None :-( env.Append(SHLIBEMITTER = [shlib_emitter]) env['SMARTLINK'] = smart_link env['LINK'] = "$SMARTLINK" env['LINKFLAGS'] = SCons.Util.CLVar('') # __RPATH is only set to something ($_RPATH typically) on platforms that support it. env['LINKCOM'] = '$LINK -o $TARGET $LINKFLAGS $__RPATH $SOURCES $_LIBDIRFLAGS $_LIBFLAGS' env['LIBDIRPREFIX']='-L' env['LIBDIRSUFFIX']='' env['_LIBFLAGS']='${_stripixes(LIBLINKPREFIX, LIBS, LIBLINKSUFFIX, LIBPREFIXES, LIBSUFFIXES, __env__)}' env['LIBLINKPREFIX']='-l' env['LIBLINKSUFFIX']='' if env['PLATFORM'] == 'hpux': env['SHLIBSUFFIX'] = '.sl' elif env['PLATFORM'] == 'aix': env['SHLIBSUFFIX'] = '.a' # For most platforms, a loadable module is the same as a shared # library. Platforms which are different can override these, but # setting them the same means that LoadableModule works everywhere. SCons.Tool.createLoadableModuleBuilder(env) env['LDMODULE'] = '$SHLINK' env.Append(LDMODULEEMITTER = [ldmod_emitter]) env['LDMODULEPREFIX'] = '$SHLIBPREFIX' env['LDMODULESUFFIX'] = '$SHLIBSUFFIX' env['LDMODULEFLAGS'] = '$SHLINKFLAGS' env['LDMODULECOM'] = '$LDMODULE -o $TARGET $LDMODULEFLAGS $__LDMODULEVERSIONFLAGS $__RPATH $SOURCES $_LIBDIRFLAGS $_LIBFLAGS' env['LDMODULEVERSION'] = '$SHLIBVERSION' env['LDMODULENOVERSIONSYMLINKS'] = '$SHLIBNOVERSIONSYMLINKS'
[ "Add Builders and construction variables for gnulink to an Environment." ]
Please provide a description of the function:def main(argv=None, loop=SharedLoop, max_time=None): should_raise = argv is not None if argv is None: argv = sys.argv[1:] parser = build_parser() cmd_args = parser.parse_args(argv) configure_logging(cmd_args.verbose) logger = logging.getLogger(__name__) try: args = {} if cmd_args.config is not None: try: with open(cmd_args.config, "r") as conf: args = json.load(conf) except IOError as exc: raise ScriptError("Could not open config file %s due to %s" % (cmd_args.config, str(exc)), 2) except ValueError as exc: raise ScriptError("Could not parse JSON from config file %s due to %s" % (cmd_args.config, str(exc)), 3) except TypeError as exc: raise ScriptError("You must pass the path to a json config file", 4) logger.critical("Starting gateway") gateway = IOTileGateway(args, loop=loop) loop.run_coroutine(gateway.start()) logger.critical("Gateway running") # Run forever until we receive a ctrl-c # (allow quitting early after max_time seconds for testing) loop.wait_for_interrupt(max_time=max_time) loop.run_coroutine(gateway.stop()) except ScriptError as exc: if should_raise: raise exc logger.fatal("Quitting due to error: %s", exc.msg) return exc.code except Exception as exc: # pylint: disable=W0703 if should_raise: raise exc logger.exception("Fatal error running gateway") return 1 return 0
[ "Main entry point for iotile-gateway." ]
Please provide a description of the function:def verify(self, obj): if obj is not None: raise ValidationError("Object is not None", reason='%s is not None' % str(obj), object=obj) return obj
[ "Verify that the object conforms to this verifier's schema\n\n Args:\n obj (object): A python object to verify\n\n Raises:\n ValidationError: If there is a problem verifying the dictionary, a\n ValidationError is thrown with at least the reason key set indicating\n the reason for the lack of validation.\n " ]
Please provide a description of the function:def copy(self): return _TimeAnchor(self.reading_id, self.uptime, self.utc, self.is_break, self.exact)
[ "Return a copy of this _TimeAnchor." ]
Please provide a description of the function:def anchor_stream(self, stream_id, converter="rtc"): if isinstance(converter, str): converter = self._known_converters.get(converter) if converter is None: raise ArgumentError("Unknown anchor converter string: %s" % converter, known_converters=list(self._known_converters)) self._anchor_streams[stream_id] = converter
[ "Mark a stream as containing anchor points." ]
Please provide a description of the function:def id_range(self): if len(self._anchor_points) == 0: return (0, 0) return (self._anchor_points[0].reading_id, self._anchor_points[-1].reading_id)
[ "Get the range of archor reading_ids.\n\n Returns:\n (int, int): The lowest and highest reading ids.\n\n If no reading ids have been loaded, (0, 0) is returned.\n " ]
Please provide a description of the function:def convert_rtc(cls, timestamp): if timestamp & (1 << 31): timestamp &= ~(1 << 31) delta = datetime.timedelta(seconds=timestamp) return cls._Y2KReference + delta
[ "Convert a number of seconds since 1/1/2000 to UTC time." ]
Please provide a description of the function:def _convert_epoch_anchor(cls, reading): delta = datetime.timedelta(seconds=reading.value) return cls._EpochReference + delta
[ "Convert a reading containing an epoch timestamp to datetime." ]
Please provide a description of the function:def add_point(self, reading_id, uptime=None, utc=None, is_break=False): if reading_id == 0: return if uptime is None and utc is None: return if uptime is not None and uptime & (1 << 31): if utc is not None: return uptime &= ~(1 << 31) utc = self.convert_rtc(uptime) uptime = None anchor = _TimeAnchor(reading_id, uptime, utc, is_break, exact=utc is not None) if anchor in self._anchor_points: return self._anchor_points.add(anchor) self._prepared = False
[ "Add a time point that could be used as a UTC reference." ]
Please provide a description of the function:def add_reading(self, reading): is_break = False utc = None if reading.stream in self._break_streams: is_break = True if reading.stream in self._anchor_streams: utc = self._anchor_streams[reading.stream](reading) self.add_point(reading.reading_id, reading.raw_time, utc, is_break=is_break)
[ "Add an IOTileReading." ]
Please provide a description of the function:def add_report(self, report, ignore_errors=False): if not isinstance(report, SignedListReport): if ignore_errors: return raise ArgumentError("You can only add SignedListReports to a UTCAssigner", report=report) for reading in report.visible_readings: self.add_reading(reading) self.add_point(report.report_id, report.sent_timestamp, report.received_time)
[ "Add all anchors from a report." ]
Please provide a description of the function:def assign_utc(self, reading_id, uptime=None, prefer="before"): if prefer not in ("before", "after"): raise ArgumentError("Invalid prefer parameter: {}, must be 'before' or 'after'".format(prefer)) if len(self._anchor_points) == 0: return None if reading_id > self._anchor_points[-1].reading_id: return None i = self._anchor_points.bisect_key_left(reading_id) found_id = False crossed_break = False exact = True last = self._anchor_points[i].copy() if uptime is not None: last.uptime = uptime if last.reading_id == reading_id: found_id = True if last.utc is not None: return UTCAssignment(reading_id, last.utc, found_id, exact, crossed_break) left_assign = self._fix_left(reading_id, last, i, found_id) if left_assign is not None and left_assign.exact: return left_assign right_assign = self._fix_right(reading_id, last, i, found_id) if right_assign is not None and right_assign.exact: return right_assign return self._pick_best_fix(left_assign, right_assign, prefer)
[ "Assign a utc datetime to a reading id.\n\n This method will return an object with assignment information or None\n if a utc value cannot be assigned. The assignment object returned\n contains a utc property that has the asssigned UTC as well as other\n properties describing how reliable the assignment is.\n\n Args:\n reading_id (int): The monotonic reading id that we wish to assign\n a utc timestamp to.\n uptime (int): Optional uptime that should be associated with the\n reading id. If this is not specified and the reading_id is\n found in the anchor points passed to this class then the\n uptime from the corresponding anchor point will be used.\n prefer (str): There are two possible directions that can be used\n to assign a UTC timestamp (the nearest anchor before or after the\n reading). If both directions are of similar quality, the choice\n is arbitrary. Passing prefer=\"before\" will use the anchor point\n before the reading. Passing prefer=\"after\" will use the anchor\n point after the reading. Default: before.\n\n Returns:\n UTCAssignment: The assigned UTC time or None if assignment is impossible.\n " ]
Please provide a description of the function:def ensure_prepared(self): if self._prepared: return exact_count = 0 fixed_count = 0 inexact_count = 0 self._logger.debug("Preparing UTCAssigner (%d total anchors)", len(self._anchor_points)) for curr in self._anchor_points: if not curr.exact: assignment = self.assign_utc(curr.reading_id, curr.uptime) if assignment is not None and assignment.exact: curr.utc = assignment.utc curr.exact = True fixed_count += 1 else: inexact_count += 1 else: exact_count += 1 self._logger.debug("Prepared UTCAssigner with %d reference points, " "%d exact anchors and %d inexact anchors", exact_count, fixed_count, inexact_count) self._prepared = True
[ "Calculate and cache UTC values for all exactly known anchor points." ]
Please provide a description of the function:def fix_report(self, report, errors="drop", prefer="before"): if not isinstance(report, SignedListReport): raise ArgumentError("Report must be a SignedListReport", report=report) if errors not in ('drop',): raise ArgumentError("Unknown errors handler: {}, supported=['drop']".format(errors)) self.ensure_prepared() fixed_readings = [] dropped_readings = 0 for reading in report.visible_readings: assignment = self.assign_utc(reading.reading_id, reading.raw_time, prefer=prefer) if assignment is None: dropped_readings += 1 continue fixed_reading = IOTileReading(assignment.rtc_value, reading.stream, reading.value, reading_time=assignment.utc, reading_id=reading.reading_id) fixed_readings.append(fixed_reading) fixed_report = SignedListReport.FromReadings(report.origin, fixed_readings, report_id=report.report_id, selector=report.streamer_selector, streamer=report.origin_streamer, sent_timestamp=report.sent_timestamp) fixed_report.received_time = report.received_time if dropped_readings > 0: self._logger.warning("Dropped %d readings of %d when fixing UTC timestamps in report 0x%08X for device 0x%08X", dropped_readings, len(report.visible_readings), report.report_id, report.origin) return fixed_report
[ "Perform utc assignment on all readings in a report.\n\n The returned report will have all reading timestamps in UTC. This only\n works on SignedListReport objects. Note that the report should\n typically have previously been added to the UTC assigner using\n add_report or no reference points from the report will be used.\n\n Args:\n report (SignedListReport): The report that we should fix.\n errors (str): The behavior that we should have when we can't\n fix a given reading. The only currently support behavior is\n drop, which means that the reading will be dropped and not\n included in the new report.\n prefer (str): Whether to prefer fixing readings by looking for\n reference points after the reading or before, all other things\n being equal. See the description of ``assign_utc``.\n\n Returns:\n SignedListReport: The report with UTC timestamps.\n " ]
Please provide a description of the function:def _fix_left(self, reading_id, last, start, found_id): accum_delta = 0 exact = True crossed_break = False if start == 0: return None for curr in self._anchor_points.islice(None, start - 1, reverse=True): if curr.uptime is None or last.uptime is None: exact = False elif curr.is_break or last.uptime < curr.uptime: exact = False crossed_break = True else: accum_delta += last.uptime - curr.uptime if curr.utc is not None: time_delta = datetime.timedelta(seconds=accum_delta) return UTCAssignment(reading_id, curr.utc + time_delta, found_id, exact, crossed_break) last = curr return None
[ "Fix a reading by looking for the nearest anchor point before it." ]
Please provide a description of the function:def sconsign_dir(node): if not node._sconsign: import SCons.SConsign node._sconsign = SCons.SConsign.ForDirectory(node) return node._sconsign
[ "Return the .sconsign file info for this directory,\n creating it first if necessary." ]
Please provide a description of the function:def invalidate_node_memos(targets): from traceback import extract_stack # First check if the cache really needs to be flushed. Only # actions run in the SConscript with Execute() seem to be # affected. XXX The way to check if Execute() is in the stacktrace # is a very dirty hack and should be replaced by a more sensible # solution. for f in extract_stack(): if f[2] == 'Execute' and f[0][-14:] == 'Environment.py': break else: # Dont have to invalidate, so return return if not SCons.Util.is_List(targets): targets = [targets] for entry in targets: # If the target is a Node object, clear the cache. If it is a # filename, look up potentially existing Node object first. try: entry.clear_memoized_values() except AttributeError: # Not a Node object, try to look up Node by filename. XXX # This creates Node objects even for those filenames which # do not correspond to an existing Node object. node = get_default_fs().Entry(entry) if node: node.clear_memoized_values()
[ "\n Invalidate the memoized values of all Nodes (files or directories)\n that are associated with the given entries. Has been added to\n clear the cache of nodes affected by a direct execution of an\n action (e.g. Delete/Copy/Chmod). Existing Node caches become\n inconsistent if the action is run through Execute(). The argument\n `targets` can be a single Node object or filename, or a sequence\n of Nodes/filenames.\n " ]
Please provide a description of the function:def __get_base_path(self): entry = self.get() return SCons.Subst.SpecialAttrWrapper(SCons.Util.splitext(entry.get_path())[0], entry.name + "_base")
[ "Return the file's directory and file name, with the\n suffix stripped." ]
Please provide a description of the function:def __get_posix_path(self): if os_sep_is_slash: return self else: entry = self.get() r = entry.get_path().replace(OS_SEP, '/') return SCons.Subst.SpecialAttrWrapper(r, entry.name + "_posix")
[ "Return the path with / as the path separator,\n regardless of platform." ]
Please provide a description of the function:def __get_windows_path(self): if OS_SEP == '\\': return self else: entry = self.get() r = entry.get_path().replace(OS_SEP, '\\') return SCons.Subst.SpecialAttrWrapper(r, entry.name + "_windows")
[ "Return the path with \\ as the path separator,\n regardless of platform." ]
Please provide a description of the function:def must_be_same(self, klass): if isinstance(self, klass) or klass is Entry: return raise TypeError("Tried to lookup %s '%s' as a %s." %\ (self.__class__.__name__, self.get_internal_path(), klass.__name__))
[ "\n This node, which already existed, is being looked up as the\n specified klass. Raise an exception if it isn't.\n " ]
Please provide a description of the function:def srcnode(self): srcdir_list = self.dir.srcdir_list() if srcdir_list: srcnode = srcdir_list[0].Entry(self.name) srcnode.must_be_same(self.__class__) return srcnode return self
[ "If this node is in a build path, return the node\n corresponding to its source file. Otherwise, return\n ourself.\n " ]
Please provide a description of the function:def get_path(self, dir=None): if not dir: dir = self.fs.getcwd() if self == dir: return '.' path_elems = self.get_path_elements() pathname = '' try: i = path_elems.index(dir) except ValueError: for p in path_elems[:-1]: pathname += p.dirname else: for p in path_elems[i+1:-1]: pathname += p.dirname return pathname + path_elems[-1].name
[ "Return path relative to the current working directory of the\n Node.FS.Base object that owns us." ]
Please provide a description of the function:def set_src_builder(self, builder): self.sbuilder = builder if not self.has_builder(): self.builder_set(builder)
[ "Set the source code builder for this node." ]
Please provide a description of the function:def src_builder(self): try: scb = self.sbuilder except AttributeError: scb = self.dir.src_builder() self.sbuilder = scb return scb
[ "Fetch the source code builder for this node.\n\n If there isn't one, we cache the source code builder specified\n for the directory (which in turn will cache the value from its\n parent directory, and so on up to the file system root).\n " ]
Please provide a description of the function:def Rfindalldirs(self, pathlist): try: memo_dict = self._memo['Rfindalldirs'] except KeyError: memo_dict = {} self._memo['Rfindalldirs'] = memo_dict else: try: return memo_dict[pathlist] except KeyError: pass create_dir_relative_to_self = self.Dir result = [] for path in pathlist: if isinstance(path, SCons.Node.Node): result.append(path) else: dir = create_dir_relative_to_self(path) result.extend(dir.get_all_rdirs()) memo_dict[pathlist] = result return result
[ "\n Return all of the directories for a given path list, including\n corresponding \"backing\" directories in any repositories.\n\n The Node lookups are relative to this Node (typically a\n directory), so memoizing result saves cycles from looking\n up the same path for each target in a given directory.\n " ]
Please provide a description of the function:def RDirs(self, pathlist): cwd = self.cwd or self.fs._cwd return cwd.Rfindalldirs(pathlist)
[ "Search for a list of directories in the Repository list." ]
Please provide a description of the function:def rfile(self): self.__class__ = File self._morph() self.clear() return File.rfile(self)
[ "We're a generic Entry, but the caller is actually looking for\n a File at this point, so morph into one." ]
Please provide a description of the function:def get_text_contents(self): try: self = self.disambiguate(must_exist=1) except SCons.Errors.UserError: # There was nothing on disk with which to disambiguate # this entry. Leave it as an Entry, but return a null # string so calls to get_text_contents() in emitters and # the like (e.g. in qt.py) don't have to disambiguate by # hand or catch the exception. return '' else: return self.get_text_contents()
[ "Fetch the decoded text contents of a Unicode encoded Entry.\n\n Since this should return the text contents from the file\n system, we check to see into what sort of subclass we should\n morph this Entry." ]
Please provide a description of the function:def must_be_same(self, klass): if self.__class__ is not klass: self.__class__ = klass self._morph() self.clear()
[ "Called to make sure a Node is a Dir. Since we're an\n Entry, we can morph into one." ]
Please provide a description of the function:def chdir(self, dir, change_os_dir=0): curr=self._cwd try: if dir is not None: self._cwd = dir if change_os_dir: os.chdir(dir.get_abspath()) except OSError: self._cwd = curr raise
[ "Change the current working directory for lookups.\n If change_os_dir is true, we will also change the \"real\" cwd\n to match.\n " ]
Please provide a description of the function:def get_root(self, drive): drive = _my_normcase(drive) try: return self.Root[drive] except KeyError: root = RootDir(drive, self) self.Root[drive] = root if not drive: self.Root[self.defaultDrive] = root elif drive == self.defaultDrive: self.Root[''] = root return root
[ "\n Returns the root directory for the specified drive, creating\n it if necessary.\n " ]
Please provide a description of the function:def _lookup(self, p, directory, fsclass, create=1): if isinstance(p, Base): # It's already a Node.FS object. Make sure it's the right # class and return. p.must_be_same(fsclass) return p # str(p) in case it's something like a proxy object p = str(p) if not os_sep_is_slash: p = p.replace(OS_SEP, '/') if p[0:1] == '#': # There was an initial '#', so we strip it and override # whatever directory they may have specified with the # top-level SConstruct directory. p = p[1:] directory = self.Top # There might be a drive letter following the # '#'. Although it is not described in the SCons man page, # the regression test suite explicitly tests for that # syntax. It seems to mean the following thing: # # Assuming the the SCons top dir is in C:/xxx/yyy, # '#X:/toto' means X:/xxx/yyy/toto. # # i.e. it assumes that the X: drive has a directory # structure similar to the one found on drive C:. if do_splitdrive: drive, p = _my_splitdrive(p) if drive: root = self.get_root(drive) else: root = directory.root else: root = directory.root # We can only strip trailing after splitting the drive # since the drive might the UNC '//' prefix. p = p.strip('/') needs_normpath = needs_normpath_match(p) # The path is relative to the top-level SCons directory. if p in ('', '.'): p = directory.get_labspath() else: p = directory.get_labspath() + '/' + p else: if do_splitdrive: drive, p = _my_splitdrive(p) if drive and not p: # This causes a naked drive letter to be treated # as a synonym for the root directory on that # drive. p = '/' else: drive = '' # We can only strip trailing '/' since the drive might the # UNC '//' prefix. if p != '/': p = p.rstrip('/') needs_normpath = needs_normpath_match(p) if p[0:1] == '/': # Absolute path root = self.get_root(drive) else: # This is a relative lookup or to the current directory # (the path name is not absolute). Add the string to the # appropriate directory lookup path, after which the whole # thing gets normalized. if directory: if not isinstance(directory, Dir): directory = self.Dir(directory) else: directory = self._cwd if p in ('', '.'): p = directory.get_labspath() else: p = directory.get_labspath() + '/' + p if drive: root = self.get_root(drive) else: root = directory.root if needs_normpath is not None: # Normalize a pathname. Will return the same result for # equivalent paths. # # We take advantage of the fact that we have an absolute # path here for sure. In addition, we know that the # components of lookup path are separated by slashes at # this point. Because of this, this code is about 2X # faster than calling os.path.normpath() followed by # replacing os.sep with '/' again. ins = p.split('/')[1:] outs = [] for d in ins: if d == '..': try: outs.pop() except IndexError: pass elif d not in ('', '.'): outs.append(d) p = '/' + '/'.join(outs) return root._lookup_abs(p, fsclass, create)
[ "\n The generic entry point for Node lookup with user-supplied data.\n\n This translates arbitrary input into a canonical Node.FS object\n of the specified fsclass. The general approach for strings is\n to turn it into a fully normalized absolute path and then call\n the root directory's lookup_abs() method for the heavy lifting.\n\n If the path name begins with '#', it is unconditionally\n interpreted relative to the top-level directory of this FS. '#'\n is treated as a synonym for the top-level SConstruct directory,\n much like '~' is treated as a synonym for the user's home\n directory in a UNIX shell. So both '#foo' and '#/foo' refer\n to the 'foo' subdirectory underneath the top-level SConstruct\n directory.\n\n If the path name is relative, then the path is looked up relative\n to the specified directory, or the current directory (self._cwd,\n typically the SConscript directory) if the specified directory\n is None.\n " ]
Please provide a description of the function:def Entry(self, name, directory = None, create = 1): return self._lookup(name, directory, Entry, create)
[ "Look up or create a generic Entry node with the specified name.\n If the name is a relative path (begins with ./, ../, or a file\n name), then it is looked up relative to the supplied directory\n node, or to the top level directory of the FS (supplied at\n construction time) if no directory is supplied.\n " ]
Please provide a description of the function:def File(self, name, directory = None, create = 1): return self._lookup(name, directory, File, create)
[ "Look up or create a File node with the specified name. If\n the name is a relative path (begins with ./, ../, or a file name),\n then it is looked up relative to the supplied directory node,\n or to the top level directory of the FS (supplied at construction\n time) if no directory is supplied.\n\n This method will raise TypeError if a directory is found at the\n specified path.\n " ]
Please provide a description of the function:def Dir(self, name, directory = None, create = True): return self._lookup(name, directory, Dir, create)
[ "Look up or create a Dir node with the specified name. If\n the name is a relative path (begins with ./, ../, or a file name),\n then it is looked up relative to the supplied directory node,\n or to the top level directory of the FS (supplied at construction\n time) if no directory is supplied.\n\n This method will raise TypeError if a normal file is found at the\n specified path.\n " ]
Please provide a description of the function:def VariantDir(self, variant_dir, src_dir, duplicate=1): if not isinstance(src_dir, SCons.Node.Node): src_dir = self.Dir(src_dir) if not isinstance(variant_dir, SCons.Node.Node): variant_dir = self.Dir(variant_dir) if src_dir.is_under(variant_dir): raise SCons.Errors.UserError("Source directory cannot be under variant directory.") if variant_dir.srcdir: if variant_dir.srcdir == src_dir: return # We already did this. raise SCons.Errors.UserError("'%s' already has a source directory: '%s'."%(variant_dir, variant_dir.srcdir)) variant_dir.link(src_dir, duplicate)
[ "Link the supplied variant directory to the source directory\n for purposes of building files." ]
Please provide a description of the function:def Repository(self, *dirs): for d in dirs: if not isinstance(d, SCons.Node.Node): d = self.Dir(d) self.Top.addRepository(d)
[ "Specify Repository directories to search." ]
Please provide a description of the function:def PyPackageDir(self, modulename): dirpath = '' if sys.version_info[0] < 3 or (sys.version_info[0] == 3 and sys.version_info[1] in (0,1,2,3,4)): # Python2 Code import imp splitname = modulename.split('.') srchpths = sys.path for item in splitname: file, path, desc = imp.find_module(item, srchpths) if file is not None: path = os.path.dirname(path) srchpths = [path] dirpath = path else: # Python3 Code import importlib.util modspec = importlib.util.find_spec(modulename) dirpath = os.path.dirname(modspec.origin) return self._lookup(dirpath, None, Dir, True)
[ "Locate the directory of a given python module name\n\t\t\n For example scons might resolve to\n Windows: C:\\Python27\\Lib\\site-packages\\scons-2.5.1\n Linux: /usr/lib/scons\n\n This can be useful when we want to determine a toolpath based on a python module name" ]
Please provide a description of the function:def variant_dir_target_climb(self, orig, dir, tail): targets = [] message = None fmt = "building associated VariantDir targets: %s" start_dir = dir while dir: for bd in dir.variant_dirs: if start_dir.is_under(bd): # If already in the build-dir location, don't reflect return [orig], fmt % str(orig) p = os.path.join(bd._path, *tail) targets.append(self.Entry(p)) tail = [dir.name] + tail dir = dir.up() if targets: message = fmt % ' '.join(map(str, targets)) return targets, message
[ "Create targets in corresponding variant directories\n\n Climb the directory tree, and look up path names\n relative to any linked variant directories we find.\n\n Even though this loops and walks up the tree, we don't memoize\n the return value because this is really only used to process\n the command-line targets.\n " ]
Please provide a description of the function:def Glob(self, pathname, ondisk=True, source=True, strings=False, exclude=None, cwd=None): if cwd is None: cwd = self.getcwd() return cwd.glob(pathname, ondisk, source, strings, exclude)
[ "\n Globs\n\n This is mainly a shim layer\n " ]
Please provide a description of the function:def _morph(self): self.repositories = [] self.srcdir = None self.entries = {} self.entries['.'] = self self.entries['..'] = self.dir self.cwd = self self.searched = 0 self._sconsign = None self.variant_dirs = [] self.root = self.dir.root self.changed_since_last_build = 3 self._func_sconsign = 1 self._func_exists = 2 self._func_get_contents = 2 self._abspath = SCons.Util.silent_intern(self.dir.entry_abspath(self.name)) self._labspath = SCons.Util.silent_intern(self.dir.entry_labspath(self.name)) if self.dir._path == '.': self._path = SCons.Util.silent_intern(self.name) else: self._path = SCons.Util.silent_intern(self.dir.entry_path(self.name)) if self.dir._tpath == '.': self._tpath = SCons.Util.silent_intern(self.name) else: self._tpath = SCons.Util.silent_intern(self.dir.entry_tpath(self.name)) self._path_elements = self.dir._path_elements + [self] # For directories, we make a difference between the directory # 'name' and the directory 'dirname'. The 'name' attribute is # used when we need to print the 'name' of the directory or # when we it is used as the last part of a path. The 'dirname' # is used when the directory is not the last element of the # path. The main reason for making that distinction is that # for RoorDir's the dirname can not be easily inferred from # the name. For example, we have to add a '/' after a drive # letter but not after a UNC path prefix ('//'). self.dirname = self.name + OS_SEP # Don't just reset the executor, replace its action list, # because it might have some pre-or post-actions that need to # be preserved. # # But don't reset the executor if there is a non-null executor # attached already. The existing executor might have other # targets, in which case replacing the action list with a # Mkdir action is a big mistake. if not hasattr(self, 'executor'): self.builder = get_MkdirBuilder() self.get_executor().set_action_list(self.builder.action) else: # Prepend MkdirBuilder action to existing action list l = self.get_executor().action_list a = get_MkdirBuilder().action l.insert(0, a) self.get_executor().set_action_list(l)
[ "Turn a file system Node (either a freshly initialized directory\n object or a separate Entry object) into a proper directory object.\n\n Set up this directory's entries and hook it into the file\n system tree. Specify that directories (this Node) don't use\n signatures for calculating whether they're current.\n " ]
Please provide a description of the function:def __clearRepositoryCache(self, duplicate=None): for node in list(self.entries.values()): if node != self.dir: if node != self and isinstance(node, Dir): node.__clearRepositoryCache(duplicate) else: node.clear() try: del node._srcreps except AttributeError: pass if duplicate is not None: node.duplicate=duplicate
[ "Called when we change the repository(ies) for a directory.\n This clears any cached information that is invalidated by changing\n the repository." ]
Please provide a description of the function:def Dir(self, name, create=True): return self.fs.Dir(name, self, create)
[ "\n Looks up or creates a directory node named 'name' relative to\n this directory.\n " ]
Please provide a description of the function:def link(self, srcdir, duplicate): self.srcdir = srcdir self.duplicate = duplicate self.__clearRepositoryCache(duplicate) srcdir.variant_dirs.append(self)
[ "Set this directory as the variant directory for the\n supplied source directory." ]
Please provide a description of the function:def getRepositories(self): if self.srcdir and not self.duplicate: return self.srcdir.get_all_rdirs() + self.repositories return self.repositories
[ "Returns a list of repositories for this directory.\n " ]
Please provide a description of the function:def rel_path(self, other): # This complicated and expensive method, which constructs relative # paths between arbitrary Node.FS objects, is no longer used # by SCons itself. It was introduced to store dependency paths # in .sconsign files relative to the target, but that ended up # being significantly inefficient. # # We're continuing to support the method because some SConstruct # files out there started using it when it was available, and # we're all about backwards compatibility.. try: memo_dict = self._memo['rel_path'] except KeyError: memo_dict = {} self._memo['rel_path'] = memo_dict else: try: return memo_dict[other] except KeyError: pass if self is other: result = '.' elif not other in self._path_elements: try: other_dir = other.get_dir() except AttributeError: result = str(other) else: if other_dir is None: result = other.name else: dir_rel_path = self.rel_path(other_dir) if dir_rel_path == '.': result = other.name else: result = dir_rel_path + OS_SEP + other.name else: i = self._path_elements.index(other) + 1 path_elems = ['..'] * (len(self._path_elements) - i) \ + [n.name for n in other._path_elements[i:]] result = OS_SEP.join(path_elems) memo_dict[other] = result return result
[ "Return a path to \"other\" relative to this directory.\n " ]
Please provide a description of the function:def get_found_includes(self, env, scanner, path): if not scanner: return [] # Clear cached info for this Dir. If we already visited this # directory on our walk down the tree (because we didn't know at # that point it was being used as the source for another Node) # then we may have calculated build signature before realizing # we had to scan the disk. Now that we have to, though, we need # to invalidate the old calculated signature so that any node # dependent on our directory structure gets one that includes # info about everything on disk. self.clear() return scanner(self, env, path)
[ "Return this directory's implicit dependencies.\n\n We don't bother caching the results because the scan typically\n shouldn't be requested more than once (as opposed to scanning\n .h file contents, which can be requested as many times as the\n files is #included by other files).\n " ]
Please provide a description of the function:def build(self, **kw): global MkdirBuilder if self.builder is not MkdirBuilder: SCons.Node.Node.build(self, **kw)
[ "A null \"builder\" for directories." ]
Please provide a description of the function:def _create(self): listDirs = [] parent = self while parent: if parent.exists(): break listDirs.append(parent) p = parent.up() if p is None: # Don't use while: - else: for this condition because # if so, then parent is None and has no .path attribute. raise SCons.Errors.StopError(parent._path) parent = p listDirs.reverse() for dirnode in listDirs: try: # Don't call dirnode.build(), call the base Node method # directly because we definitely *must* create this # directory. The dirnode.build() method will suppress # the build if it's the default builder. SCons.Node.Node.build(dirnode) dirnode.get_executor().nullify() # The build() action may or may not have actually # created the directory, depending on whether the -n # option was used or not. Delete the _exists and # _rexists attributes so they can be reevaluated. dirnode.clear() except OSError: pass
[ "Create this directory, silently and without worrying about\n whether the builder is the default or not." ]
Please provide a description of the function:def is_up_to_date(self): if self.builder is not MkdirBuilder and not self.exists(): return 0 up_to_date = SCons.Node.up_to_date for kid in self.children(): if kid.get_state() > up_to_date: return 0 return 1
[ "If any child is not up-to-date, then this directory isn't,\n either." ]
Please provide a description of the function:def get_timestamp(self): stamp = 0 for kid in self.children(): if kid.get_timestamp() > stamp: stamp = kid.get_timestamp() return stamp
[ "Return the latest timestamp from among our children" ]
Please provide a description of the function:def entry_exists_on_disk(self, name): try: d = self.on_disk_entries except AttributeError: d = {} try: entries = os.listdir(self._abspath) except OSError: pass else: for entry in map(_my_normcase, entries): d[entry] = True self.on_disk_entries = d if sys.platform == 'win32' or sys.platform == 'cygwin': name = _my_normcase(name) result = d.get(name) if result is None: # Belt-and-suspenders for Windows: check directly for # 8.3 file names that don't show up in os.listdir(). result = os.path.exists(self._abspath + OS_SEP + name) d[name] = result return result else: return name in d
[ " Searches through the file/dir entries of the current\n directory, and returns True if a physical entry with the given\n name could be found.\n\n @see rentry_exists_on_disk\n " ]
Please provide a description of the function:def rentry_exists_on_disk(self, name): rentry_exists = self.entry_exists_on_disk(name) if not rentry_exists: # Search through the repository folders norm_name = _my_normcase(name) for rdir in self.get_all_rdirs(): try: node = rdir.entries[norm_name] if node: rentry_exists = True break except KeyError: if rdir.entry_exists_on_disk(name): rentry_exists = True break return rentry_exists
[ " Searches through the file/dir entries of the current\n *and* all its remote directories (repos), and returns\n True if a physical entry with the given name could be found.\n The local directory (self) gets searched first, so\n repositories take a lower precedence regarding the\n searching order.\n\n @see entry_exists_on_disk\n " ]
Please provide a description of the function:def walk(self, func, arg): entries = self.entries names = list(entries.keys()) names.remove('.') names.remove('..') func(arg, self, names) for dirname in [n for n in names if isinstance(entries[n], Dir)]: entries[dirname].walk(func, arg)
[ "\n Walk this directory tree by calling the specified function\n for each directory in the tree.\n\n This behaves like the os.path.walk() function, but for in-memory\n Node.FS.Dir objects. The function takes the same arguments as\n the functions passed to os.path.walk():\n\n func(arg, dirname, fnames)\n\n Except that \"dirname\" will actually be the directory *Node*,\n not the string. The '.' and '..' entries are excluded from\n fnames. The fnames list may be modified in-place to filter the\n subdirectories visited or otherwise impose a specific order.\n The \"arg\" argument is always passed to func() and may be used\n in any way (or ignored, passing None is common).\n " ]
Please provide a description of the function:def glob(self, pathname, ondisk=True, source=False, strings=False, exclude=None): dirname, basename = os.path.split(pathname) if not dirname: result = self._glob1(basename, ondisk, source, strings) else: if has_glob_magic(dirname): list = self.glob(dirname, ondisk, source, False, exclude) else: list = [self.Dir(dirname, create=True)] result = [] for dir in list: r = dir._glob1(basename, ondisk, source, strings) if strings: r = [os.path.join(str(dir), x) for x in r] result.extend(r) if exclude: excludes = [] excludeList = SCons.Util.flatten(exclude) for x in excludeList: r = self.glob(x, ondisk, source, strings) excludes.extend(r) result = [x for x in result if not any(fnmatch.fnmatch(str(x), str(e)) for e in SCons.Util.flatten(excludes))] return sorted(result, key=lambda a: str(a))
[ "\n Returns a list of Nodes (or strings) matching a specified\n pathname pattern.\n\n Pathname patterns follow UNIX shell semantics: * matches\n any-length strings of any characters, ? matches any character,\n and [] can enclose lists or ranges of characters. Matches do\n not span directory separators.\n\n The matches take into account Repositories, returning local\n Nodes if a corresponding entry exists in a Repository (either\n an in-memory Node or something on disk).\n\n By defafult, the glob() function matches entries that exist\n on-disk, in addition to in-memory Nodes. Setting the \"ondisk\"\n argument to False (or some other non-true value) causes the glob()\n function to only match in-memory Nodes. The default behavior is\n to return both the on-disk and in-memory Nodes.\n\n The \"source\" argument, when true, specifies that corresponding\n source Nodes must be returned if you're globbing in a build\n directory (initialized with VariantDir()). The default behavior\n is to return Nodes local to the VariantDir().\n\n The \"strings\" argument, when true, returns the matches as strings,\n not Nodes. The strings are path names relative to this directory.\n\n The \"exclude\" argument, if not None, must be a pattern or a list\n of patterns following the same UNIX shell semantics.\n Elements matching a least one pattern of this list will be excluded\n from the result.\n\n The underlying algorithm is adapted from the glob.glob() function\n in the Python library (but heavily modified), and uses fnmatch()\n under the covers.\n " ]
Please provide a description of the function:def _glob1(self, pattern, ondisk=True, source=False, strings=False): search_dir_list = self.get_all_rdirs() for srcdir in self.srcdir_list(): search_dir_list.extend(srcdir.get_all_rdirs()) selfEntry = self.Entry names = [] for dir in search_dir_list: # We use the .name attribute from the Node because the keys of # the dir.entries dictionary are normalized (that is, all upper # case) on case-insensitive systems like Windows. node_names = [ v.name for k, v in dir.entries.items() if k not in ('.', '..') ] names.extend(node_names) if not strings: # Make sure the working directory (self) actually has # entries for all Nodes in repositories or variant dirs. for name in node_names: selfEntry(name) if ondisk: try: disk_names = os.listdir(dir._abspath) except os.error: continue names.extend(disk_names) if not strings: # We're going to return corresponding Nodes in # the local directory, so we need to make sure # those Nodes exist. We only want to create # Nodes for the entries that will match the # specified pattern, though, which means we # need to filter the list here, even though # the overall list will also be filtered later, # after we exit this loop. if pattern[0] != '.': disk_names = [x for x in disk_names if x[0] != '.'] disk_names = fnmatch.filter(disk_names, pattern) dirEntry = dir.Entry for name in disk_names: # Add './' before disk filename so that '#' at # beginning of filename isn't interpreted. name = './' + name node = dirEntry(name).disambiguate() n = selfEntry(name) if n.__class__ != node.__class__: n.__class__ = node.__class__ n._morph() names = set(names) if pattern[0] != '.': names = [x for x in names if x[0] != '.'] names = fnmatch.filter(names, pattern) if strings: return names return [self.entries[_my_normcase(n)] for n in names]
[ "\n Globs for and returns a list of entry names matching a single\n pattern in this directory.\n\n This searches any repositories and source directories for\n corresponding entries and returns a Node (or string) relative\n to the current directory if an entry is found anywhere.\n\n TODO: handle pattern with no wildcard\n " ]
Please provide a description of the function:def _morph(self): self.repositories = [] self.srcdir = None self.entries = {} self.entries['.'] = self self.entries['..'] = self.dir self.cwd = self self.searched = 0 self._sconsign = None self.variant_dirs = [] self.changed_since_last_build = 3 self._func_sconsign = 1 self._func_exists = 2 self._func_get_contents = 2 # Don't just reset the executor, replace its action list, # because it might have some pre-or post-actions that need to # be preserved. # # But don't reset the executor if there is a non-null executor # attached already. The existing executor might have other # targets, in which case replacing the action list with a # Mkdir action is a big mistake. if not hasattr(self, 'executor'): self.builder = get_MkdirBuilder() self.get_executor().set_action_list(self.builder.action) else: # Prepend MkdirBuilder action to existing action list l = self.get_executor().action_list a = get_MkdirBuilder().action l.insert(0, a) self.get_executor().set_action_list(l)
[ "Turn a file system Node (either a freshly initialized directory\n object or a separate Entry object) into a proper directory object.\n\n Set up this directory's entries and hook it into the file\n system tree. Specify that directories (this Node) don't use\n signatures for calculating whether they're current.\n " ]
Please provide a description of the function:def _lookup_abs(self, p, klass, create=1): k = _my_normcase(p) try: result = self._lookupDict[k] except KeyError: if not create: msg = "No such file or directory: '%s' in '%s' (and create is False)" % (p, str(self)) raise SCons.Errors.UserError(msg) # There is no Node for this path name, and we're allowed # to create it. dir_name, file_name = p.rsplit('/',1) dir_node = self._lookup_abs(dir_name, Dir) result = klass(file_name, dir_node, self.fs) # Double-check on disk (as configured) that the Node we # created matches whatever is out there in the real world. result.diskcheck_match() self._lookupDict[k] = result dir_node.entries[_my_normcase(file_name)] = result dir_node.implicit = None else: # There is already a Node for this path name. Allow it to # complain if we were looking for an inappropriate type. result.must_be_same(klass) return result
[ "\n Fast (?) lookup of a *normalized* absolute path.\n\n This method is intended for use by internal lookups with\n already-normalized path data. For general-purpose lookups,\n use the FS.Entry(), FS.Dir() or FS.File() methods.\n\n The caller is responsible for making sure we're passed a\n normalized absolute path; we merely let Python's dictionary look\n up and return the One True Node.FS object for the path.\n\n If a Node for the specified \"p\" doesn't already exist, and\n \"create\" is specified, the Node may be created after recursive\n invocation to find or create the parent directory or directories.\n " ]
Please provide a description of the function:def convert_to_sconsign(self): if os_sep_is_slash: node_to_str = str else: def node_to_str(n): try: s = n.get_internal_path() except AttributeError: s = str(n) else: s = s.replace(OS_SEP, '/') return s for attr in ['bsources', 'bdepends', 'bimplicit']: try: val = getattr(self, attr) except AttributeError: pass else: setattr(self, attr, list(map(node_to_str, val)))
[ "\n Converts this FileBuildInfo object for writing to a .sconsign file\n\n This replaces each Node in our various dependency lists with its\n usual string representation: relative to the top-level SConstruct\n directory, or an absolute path if it's outside.\n " ]
Please provide a description of the function:def prepare_dependencies(self): attrs = [ ('bsources', 'bsourcesigs'), ('bdepends', 'bdependsigs'), ('bimplicit', 'bimplicitsigs'), ] for (nattr, sattr) in attrs: try: strings = getattr(self, nattr) nodeinfos = getattr(self, sattr) except AttributeError: continue if strings is None or nodeinfos is None: continue nodes = [] for s, ni in zip(strings, nodeinfos): if not isinstance(s, SCons.Node.Node): s = ni.str_to_node(s) nodes.append(s) setattr(self, nattr, nodes)
[ "\n Prepares a FileBuildInfo object for explaining what changed\n\n The bsources, bdepends and bimplicit lists have all been\n stored on disk as paths relative to the top-level SConstruct\n directory. Convert the strings to actual Nodes (for use by the\n --debug=explain code and --implicit-cache).\n " ]
Please provide a description of the function:def Dir(self, name, create=True): return self.dir.Dir(name, create=create)
[ "Create a directory node named 'name' relative to\n the directory of this file." ]
Please provide a description of the function:def _morph(self): self.scanner_paths = {} if not hasattr(self, '_local'): self._local = 0 if not hasattr(self, 'released_target_info'): self.released_target_info = False self.store_info = 1 self._func_exists = 4 self._func_get_contents = 3 # Initialize this Node's decider function to decide_source() because # every file is a source file until it has a Builder attached... self.changed_since_last_build = 4 # If there was already a Builder set on this entry, then # we need to make sure we call the target-decider function, # not the source-decider. Reaching in and doing this by hand # is a little bogus. We'd prefer to handle this by adding # an Entry.builder_set() method that disambiguates like the # other methods, but that starts running into problems with the # fragile way we initialize Dir Nodes with their Mkdir builders, # yet still allow them to be overridden by the user. Since it's # not clear right now how to fix that, stick with what works # until it becomes clear... if self.has_builder(): self.changed_since_last_build = 5
[ "Turn a file system node into a File object." ]
Please provide a description of the function:def get_content_hash(self): if not self.rexists(): return SCons.Util.MD5signature('') fname = self.rfile().get_abspath() try: cs = SCons.Util.MD5filesignature(fname, chunksize=SCons.Node.FS.File.md5_chunksize*1024) except EnvironmentError as e: if not e.filename: e.filename = fname raise return cs
[ "\n Compute and return the MD5 hash for this file.\n " ]
Please provide a description of the function:def get_found_includes(self, env, scanner, path): memo_key = (id(env), id(scanner), path) try: memo_dict = self._memo['get_found_includes'] except KeyError: memo_dict = {} self._memo['get_found_includes'] = memo_dict else: try: return memo_dict[memo_key] except KeyError: pass if scanner: result = [n.disambiguate() for n in scanner(self, env, path)] else: result = [] memo_dict[memo_key] = result return result
[ "Return the included implicit dependencies in this file.\n Cache results so we only scan the file once per path\n regardless of how many times this information is requested.\n " ]
Please provide a description of the function:def push_to_cache(self): # This should get called before the Nodes' .built() method is # called, which would clear the build signature if the file has # a source scanner. # # We have to clear the local memoized values *before* we push # the node to cache so that the memoization of the self.exists() # return value doesn't interfere. if self.nocache: return self.clear_memoized_values() if self.exists(): self.get_build_env().get_CacheDir().push(self)
[ "Try to push the node into a cache\n " ]
Please provide a description of the function:def retrieve_from_cache(self): if self.nocache: return None if not self.is_derived(): return None return self.get_build_env().get_CacheDir().retrieve(self)
[ "Try to retrieve the node's content from a cache\n\n This method is called from multiple threads in a parallel build,\n so only do thread safe stuff here. Do thread unsafe stuff in\n built().\n\n Returns true if the node was successfully retrieved.\n " ]
Please provide a description of the function:def release_target_info(self): if (self.released_target_info or SCons.Node.interactive): return if not hasattr(self.attributes, 'keep_targetinfo'): # Cache some required values, before releasing # stuff like env, executor and builder... self.changed(allowcache=True) self.get_contents_sig() self.get_build_env() # Now purge unneeded stuff to free memory... self.executor = None self._memo.pop('rfile', None) self.prerequisites = None # Cleanup lists, but only if they're empty if not len(self.ignore_set): self.ignore_set = None if not len(self.implicit_set): self.implicit_set = None if not len(self.depends_set): self.depends_set = None if not len(self.ignore): self.ignore = None if not len(self.depends): self.depends = None # Mark this node as done, we only have to release # the memory once... self.released_target_info = True
[ "Called just after this node has been marked\n up-to-date or was built completely.\n\n This is where we try to release as many target node infos\n as possible for clean builds and update runs, in order\n to minimize the overall memory consumption.\n\n We'd like to remove a lot more attributes like self.sources\n and self.sources_set, but they might get used\n in a next build step. For example, during configuration\n the source files for a built E{*}.o file are used to figure out\n which linker to use for the resulting Program (gcc vs. g++)!\n That's why we check for the 'keep_targetinfo' attribute,\n config Nodes and the Interactive mode just don't allow\n an early release of most variables.\n\n In the same manner, we can't simply remove the self.attributes\n here. The smart linking relies on the shared flag, and some\n parts of the java Tool use it to transport information\n about nodes...\n\n @see: built() and Node.release_target_info()\n " ]
Please provide a description of the function:def has_src_builder(self): try: scb = self.sbuilder except AttributeError: scb = self.sbuilder = self.find_src_builder() return scb is not None
[ "Return whether this Node has a source builder or not.\n\n If this Node doesn't have an explicit source code builder, this\n is where we figure out, on the fly, if there's a transparent\n source code builder for it.\n\n Note that if we found a source builder, we also set the\n self.builder attribute, so that all of the methods that actually\n *build* this file don't have to do anything different.\n " ]
Please provide a description of the function:def alter_targets(self): if self.is_derived(): return [], None return self.fs.variant_dir_target_climb(self, self.dir, [self.name])
[ "Return any corresponding targets in a variant directory.\n " ]
Please provide a description of the function:def prepare(self): SCons.Node.Node.prepare(self) if self.get_state() != SCons.Node.up_to_date: if self.exists(): if self.is_derived() and not self.precious: self._rmv_existing() else: try: self._createDir() except SCons.Errors.StopError as drive: raise SCons.Errors.StopError("No drive `{}' for target `{}'.".format(drive, self))
[ "Prepare for this file to be created." ]
Please provide a description of the function:def remove(self): if self.exists() or self.islink(): self.fs.unlink(self.get_internal_path()) return 1 return None
[ "Remove this file." ]
Please provide a description of the function:def get_max_drift_csig(self): old = self.get_stored_info() mtime = self.get_timestamp() max_drift = self.fs.max_drift if max_drift > 0: if (time.time() - mtime) > max_drift: try: n = old.ninfo if n.timestamp and n.csig and n.timestamp == mtime: return n.csig except AttributeError: pass elif max_drift == 0: try: return old.ninfo.csig except AttributeError: pass return None
[ "\n Returns the content signature currently stored for this node\n if it's been unmodified longer than the max_drift value, or the\n max_drift value is 0. Returns None otherwise.\n " ]
Please provide a description of the function:def get_csig(self): ninfo = self.get_ninfo() try: return ninfo.csig except AttributeError: pass csig = self.get_max_drift_csig() if csig is None: try: if self.get_size() < SCons.Node.FS.File.md5_chunksize: contents = self.get_contents() else: csig = self.get_content_hash() except IOError: # This can happen if there's actually a directory on-disk, # which can be the case if they've disabled disk checks, # or if an action with a File target actually happens to # create a same-named directory by mistake. csig = '' else: if not csig: csig = SCons.Util.MD5signature(contents) ninfo.csig = csig return csig
[ "\n Generate a node's content signature, the digested signature\n of its content.\n\n node - the node\n cache - alternate node to use for the signature cache\n returns - the content signature\n " ]
Please provide a description of the function:def built(self): SCons.Node.Node.built(self) if (not SCons.Node.interactive and not hasattr(self.attributes, 'keep_targetinfo')): # Ensure that the build infos get computed and cached... SCons.Node.store_info_map[self.store_info](self) # ... then release some more variables. self._specific_sources = False self._labspath = None self._save_str() self.cwd = None self.scanner_paths = None
[ "Called just after this File node is successfully built.\n\n Just like for 'release_target_info' we try to release\n some more target node attributes in order to minimize the\n overall memory consumption.\n\n @see: release_target_info\n " ]
Please provide a description of the function:def changed(self, node=None, allowcache=False): if node is None: try: return self._memo['changed'] except KeyError: pass has_changed = SCons.Node.Node.changed(self, node) if allowcache: self._memo['changed'] = has_changed return has_changed
[ "\n Returns if the node is up-to-date with respect to the BuildInfo\n stored last time it was built.\n\n For File nodes this is basically a wrapper around Node.changed(),\n but we allow the return value to get cached after the reference\n to the Executor got released in release_target_info().\n\n @see: Node.changed()\n " ]
Please provide a description of the function:def get_cachedir_csig(self): try: return self.cachedir_csig except AttributeError: pass cachedir, cachefile = self.get_build_env().get_CacheDir().cachepath(self) if not self.exists() and cachefile and os.path.exists(cachefile): self.cachedir_csig = SCons.Util.MD5filesignature(cachefile, \ SCons.Node.FS.File.md5_chunksize * 1024) else: self.cachedir_csig = self.get_csig() return self.cachedir_csig
[ "\n Fetch a Node's content signature for purposes of computing\n another Node's cachesig.\n\n This is a wrapper around the normal get_csig() method that handles\n the somewhat obscure case of using CacheDir with the -n option.\n Any files that don't exist would normally be \"built\" by fetching\n them from the cache, but the normal get_csig() method will try\n to open up the local file, which doesn't exist because the -n\n option meant we didn't actually pull the file from cachedir.\n But since the file *does* actually exist in the cachedir, we\n can use its contents for the csig.\n " ]
Please provide a description of the function:def get_contents_sig(self): try: return self.contentsig except AttributeError: pass executor = self.get_executor() result = self.contentsig = SCons.Util.MD5signature(executor.get_contents()) return result
[ "\n A helper method for get_cachedir_bsig.\n\n It computes and returns the signature for this\n node's contents.\n " ]
Please provide a description of the function:def get_cachedir_bsig(self): try: return self.cachesig except AttributeError: pass # Collect signatures for all children children = self.children() sigs = [n.get_cachedir_csig() for n in children] # Append this node's signature... sigs.append(self.get_contents_sig()) # ...and it's path sigs.append(self.get_internal_path()) # Merge this all into a single signature result = self.cachesig = SCons.Util.MD5collect(sigs) return result
[ "\n Return the signature for a cached file, including\n its children.\n\n It adds the path of the cached file to the cache signature,\n because multiple targets built by the same action will all\n have the same build signature, and we have to differentiate\n them somehow.\n " ]
Please provide a description of the function:def filedir_lookup(self, p, fd=None): if fd is None: fd = self.default_filedir dir, name = os.path.split(fd) drive, d = _my_splitdrive(dir) if not name and d[:1] in ('/', OS_SEP): #return p.fs.get_root(drive).dir_on_disk(name) return p.fs.get_root(drive) if dir: p = self.filedir_lookup(p, dir) if not p: return None norm_name = _my_normcase(name) try: node = p.entries[norm_name] except KeyError: return p.dir_on_disk(name) if isinstance(node, Dir): return node if isinstance(node, Entry): node.must_be_same(Dir) return node return None
[ "\n A helper method for find_file() that looks up a directory for\n a file we're trying to find. This only creates the Dir Node if\n it exists on-disk, since if the directory doesn't exist we know\n we won't find any files in it... :-)\n\n It would be more compact to just use this as a nested function\n with a default keyword argument (see the commented-out version\n below), but that doesn't work unless you have nested scopes,\n so we define it here just so this work under Python 1.5.2.\n " ]
Please provide a description of the function:def find_file(self, filename, paths, verbose=None): memo_key = self._find_file_key(filename, paths) try: memo_dict = self._memo['find_file'] except KeyError: memo_dict = {} self._memo['find_file'] = memo_dict else: try: return memo_dict[memo_key] except KeyError: pass if verbose and not callable(verbose): if not SCons.Util.is_String(verbose): verbose = "find_file" _verbose = u' %s: ' % verbose verbose = lambda s: sys.stdout.write(_verbose + s) filedir, filename = os.path.split(filename) if filedir: self.default_filedir = filedir paths = [_f for _f in map(self.filedir_lookup, paths) if _f] result = None for dir in paths: if verbose: verbose("looking for '%s' in '%s' ...\n" % (filename, dir)) node, d = dir.srcdir_find_file(filename) if node: if verbose: verbose("... FOUND '%s' in '%s'\n" % (filename, d)) result = node break memo_dict[memo_key] = result return result
[ "\n Find a node corresponding to either a derived file or a file that exists already.\n\n Only the first file found is returned, and none is returned if no file is found.\n\n filename: A filename to find\n paths: A list of directory path *nodes* to search in. Can be represented as a list, a tuple, or a callable that is called with no arguments and returns the list or tuple.\n\n returns The node created from the found file.\n\n " ]
Please provide a description of the function:def run(self, resources): hwman = resources['connection'] updater = hwman.hwman.app(name='device_updater') updater.run_script(self._script, no_reboot=self._no_reboot)
[ "Actually send the trub script.\n\n Args:\n resources (dict): A dictionary containing the required resources that\n we needed access to in order to perform this step.\n " ]
Please provide a description of the function:def process_gatt_service(services, event): length = len(event.payload) - 5 handle, start, end, uuid = unpack('<BHH%ds' % length, event.payload) uuid = process_uuid(uuid) services[uuid] = {'uuid_raw': uuid, 'start_handle': start, 'end_handle': end}
[ "Process a BGAPI event containing a GATT service description and add it to a dictionary\n\n Args:\n services (dict): A dictionary of discovered services that is updated with this event\n event (BGAPIPacket): An event containing a GATT service\n\n " ]
Please provide a description of the function:def handle_to_uuid(handle, services): for service in services.values(): for char_uuid, char_def in service['characteristics'].items(): if char_def['handle'] == handle: return char_uuid raise ValueError("Handle not found in GATT table")
[ "Find the corresponding UUID for an attribute handle" ]
Please provide a description of the function:def _text2bool(val): lval = val.lower() if lval in __true_strings: return True if lval in __false_strings: return False raise ValueError("Invalid value for boolean option: %s" % val)
[ "\n Converts strings to True/False depending on the 'truth' expressed by\n the string. If the string can't be converted, the original value\n will be returned.\n\n See '__true_strings' and '__false_strings' for values considered\n 'true' or 'false respectively.\n\n This is usable as 'converter' for SCons' Variables.\n " ]
Please provide a description of the function:def _validator(key, val, env): if not env[key] in (True, False): raise SCons.Errors.UserError( 'Invalid value for boolean option %s: %s' % (key, env[key]))
[ "\n Validates the given value to be either '0' or '1'.\n \n This is usable as 'validator' for SCons' Variables.\n " ]