docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Add a new service to the list of ones we know about. Args: new_service (WindowsService): the service to add.
def AddService(self, new_service): for service in self._services: if new_service == service: # If this service is the same as one we already know about, we # just want to add where it came from. service.sources.append(new_service.sources[0]) return # We only add a new object to our list if we don't have # an identical one already. self._services.append(new_service)
289,304
Produces a human readable multi-line string representing the service. Args: service (WindowsService): service to format. Returns: str: human readable representation of a Windows Service.
def _FormatServiceText(self, service): string_segments = [ service.name, '\tImage Path = {0:s}'.format(service.image_path), '\tService Type = {0:s}'.format(service.HumanReadableType()), '\tStart Type = {0:s}'.format(service.HumanReadableStartType()), '\tService Dll = {0:s}'.format(service.service_dll), '\tObject Name = {0:s}'.format(service.object_name), '\tSources:'] for source in service.sources: string_segments.append('\t\t{0:s}:{1:s}'.format(source[0], source[1])) return '\n'.join(string_segments)
289,306
Compiles an analysis report. Args: mediator (AnalysisMediator): mediates interactions between analysis plugins and other components, such as storage and dfvfs. Returns: AnalysisReport: report.
def CompileReport(self, mediator): # TODO: move YAML representation out of plugin and into serialization. lines_of_text = [] if self._output_format == 'yaml': lines_of_text.append( yaml.safe_dump_all(self._service_collection.services)) else: lines_of_text.append('Listing Windows Services') for service in self._service_collection.services: lines_of_text.append(self._FormatServiceText(service)) lines_of_text.append('') lines_of_text.append('') report_text = '\n'.join(lines_of_text) return reports.AnalysisReport(plugin_name=self.NAME, text=report_text)
289,307
Analyzes an event and creates Windows Services as required. At present, this method only handles events extracted from the Registry. Args: mediator (AnalysisMediator): mediates interactions between analysis plugins and other components, such as storage and dfvfs. event (EventObject): event to examine.
def ExamineEvent(self, mediator, event): # TODO: Handle event log entries here also (ie, event id 4697). event_data_type = getattr(event, 'data_type', '') if event_data_type == 'windows:registry:service': # Create and store the service. service = WindowsService.FromEvent(event) self._service_collection.AddService(service)
289,308
Determines the formatted message strings for an event object. Args: formatter_mediator (FormatterMediator): mediates the interactions between formatters and other components, such as storage and Windows EventLog resources. event (EventObject): event. Returns: tuple(str, str): formatted message string and short message string. Raises: WrongFormatter: if the event object cannot be formatted by the formatter.
def GetMessages(self, formatter_mediator, event): if self.DATA_TYPE != event.data_type: raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format( event.data_type)) event_values = event.CopyToDict() cookie_flags = event_values.get('flags', None) if cookie_flags == 0: del event_values['flags'] elif cookie_flags: flags = [] for flag_value, flag_description in iter(self._COOKIE_FLAGS.items()): if cookie_flags & flag_value: flags.append(flag_description) event_values['flags'] = '|'.join(flags) return self._ConditionalFormatMessages(event_values)
289,309
Extracts relevant install history entries. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. top_level (dict[str, object]): plist top-level key.
def GetEntries(self, parser_mediator, top_level=None, **unused_kwargs): for entry in top_level: datetime_value = entry.get('date', None) package_identifiers = entry.get('packageIdentifiers', []) if not datetime_value or not package_identifiers: continue display_name = entry.get('displayName', '<UNKNOWN>') display_version = entry.get('displayVersion', '<DISPLAY_VERSION>') process_name = entry.get('processName', '<PROCESS_NAME>') package_identifiers = ', '.join(package_identifiers) event_data = plist_event.PlistTimeEventData() event_data.desc = ( 'Installation of [{0:s} {1:s}] using [{2:s}]. Packages: ' '{3:s}.').format( display_name, display_version, process_name, package_identifiers) event_data.key = '' event_data.root = '/item' event = time_events.PythonDatetimeEvent( datetime_value, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
289,310
Initializes a storage reader. Args: path (str): path to the input file.
def __init__(self, path): super(SQLiteStorageFileReader, self).__init__(path) self._storage_file = sqlite_file.SQLiteStorageFile() self._storage_file.Open(path=path)
289,311
Detects which tag file is most appropriate. Args: analysis_mediator (AnalysisMediator): analysis mediator. Returns: bool: True if a tag file is autodetected.
def _AttemptAutoDetectTagFile(self, analysis_mediator): self._autodetect_tag_file_attempt = True if not analysis_mediator.data_location: return False operating_system = analysis_mediator.operating_system.lower() filename = self._OS_TAG_FILES.get(operating_system, None) if not filename: return False logger.info('Using auto detected tag file: {0:s}'.format(filename)) tag_file_path = os.path.join(analysis_mediator.data_location, filename) self.SetAndLoadTagFile(tag_file_path) return True
289,313
Compiles an analysis report. Args: mediator (AnalysisMediator): mediates interactions between analysis plugins and other components, such as storage and dfvfs. Returns: AnalysisReport: analysis report.
def CompileReport(self, mediator): report_text = 'Tagging plugin produced {0:d} tags.\n'.format( self._number_of_event_tags) self._number_of_event_tags = 0 return reports.AnalysisReport(plugin_name=self.NAME, text=report_text)
289,314
Analyzes an EventObject and tags it according to rules in the tag file. Args: mediator (AnalysisMediator): mediates interactions between analysis plugins and other components, such as storage and dfvfs. event (EventObject): event to examine.
def ExamineEvent(self, mediator, event): if self._tagging_rules is None: if self._autodetect_tag_file_attempt: # There's nothing to tag with, and we've already tried to find a good # tag file, so there's nothing we can do with this event (or any other). return if not self._AttemptAutoDetectTagFile(mediator): logger.info( 'No tag definition file specified, and plaso was not able to ' 'autoselect a tagging file. As no definitions were specified, ' 'no events will be tagged.') return matched_label_names = [] for label_name, filter_objects in iter(self._tagging_rules.items()): for filter_object in filter_objects: if filter_object.Match(event): matched_label_names.append(label_name) break if matched_label_names: event_tag = self._CreateEventTag( event, self._EVENT_TAG_COMMENT, matched_label_names) mediator.ProduceEventTag(event_tag) self._number_of_event_tags += 1
289,315
Sets the tag file to be used by the plugin. Args: tagging_file_path (str): path of the tagging file.
def SetAndLoadTagFile(self, tagging_file_path): tag_file = tagging_file.TaggingFile(tagging_file_path) self._tagging_rules = tag_file.GetEventTaggingRules()
289,316
Writes the body of an event to the output. Args: event (EventObject): event.
def WriteEventBody(self, event): latitude = getattr(event, 'latitude', None) longitude = getattr(event, 'longitude', None) if latitude is not None and longitude is not None: placemark_xml_element = ElementTree.Element('Placemark') name_xml_element = ElementTree.SubElement(placemark_xml_element, 'name') name_xml_element.text = 'PLACEHOLDER FOR EVENT IDENTIFIER' description_xml_element = ElementTree.SubElement( placemark_xml_element, 'description') # TODO: move the description formatting into this output module. description_xml_element.text = ( rawpy.NativePythonFormatterHelper.GetFormattedEventObject(event)) point_xml_element = ElementTree.SubElement( placemark_xml_element, 'Point') coordinates_xml_element = ElementTree.SubElement( point_xml_element, 'coordinates') coordinates_xml_element.text = '{0!s},{1!s}'.format(longitude, latitude) # Note that ElementTree.tostring() will appropriately escape # the input data. xml_string = ElementTree.tostring(placemark_xml_element) output_text = codecs.decode(xml_string, self._output_mediator.encoding) self._output_writer.Write(output_text)
289,317
Parses a syslog body that matched one of defined grammars. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. key (str): name of the matching grammar. date_time (dfdatetime.DateTimeValues): date and time values. tokens (dict[str, str]): tokens derived from a syslog message based on the defined grammar. Raises: ValueError: If an unknown key is provided.
def ParseMessage(self, parser_mediator, key, date_time, tokens): if key != 'task_run': raise ValueError('Unknown grammar key: {0:s}'.format(key)) event_data = CronTaskRunEventData() event_data.body = tokens.get('body', None) event_data.command = tokens.get('command', None) event_data.hostname = tokens.get('hostname', None) # TODO: pass line number to offset or remove. event_data.offset = 0 event_data.pid = tokens.get('pid', None) event_data.reporter = tokens.get('reporter', None) event_data.severity = tokens.get('severity', None) event_data.username = tokens.get('username', None) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
289,320
Analyzes a block of data, attempting to match Yara rules to it. Args: data(bytes): a block of data.
def Analyze(self, data): if not self._rules: return try: self._matches = self._rules.match(data=data, timeout=self._MATCH_TIMEOUT) except yara.YaraTimeoutError: logger.error('Could not process file within timeout: {0:d}'.format( self._MATCH_TIMEOUT)) except yara.YaraError as exception: logger.error('Error processing file with Yara: {0!s}.'.format( exception))
289,322
Retrieves the table names in a database. Args: database (pyesedb.file): ESE database. Returns: list[str]: table names.
def _GetTableNames(self, database): table_names = [] for esedb_table in database.tables: table_names.append(esedb_table.name) return table_names
289,324
Parses an ESE database file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): file-like object.
def ParseFileObject(self, parser_mediator, file_object): esedb_file = pyesedb.file() try: esedb_file.open_file_object(file_object) except IOError as exception: parser_mediator.ProduceExtractionWarning( 'unable to open file with error: {0!s}'.format(exception)) return # Compare the list of available plugin objects. cache = ESEDBCache() try: table_names = frozenset(self._GetTableNames(esedb_file)) for plugin in self._plugins: if parser_mediator.abort: break if not plugin.required_tables.issubset(table_names): continue try: plugin.UpdateChainAndProcess( parser_mediator, cache=cache, database=esedb_file) except Exception as exception: # pylint: disable=broad-except parser_mediator.ProduceExtractionWarning(( 'plugin: {0:s} unable to parse ESE database with error: ' '{1!s}').format(plugin.NAME, exception)) finally: # TODO: explicitly clean up cache. esedb_file.close()
289,325
Parses an activity log row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row.
def ParseActivityLogUncompressedRow( self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) event_data = ChromeExtensionActivityEventData() event_data.action_type = self._GetRowValue(query_hash, row, 'action_type') event_data.activity_id = self._GetRowValue(query_hash, row, 'activity_id') event_data.api_name = self._GetRowValue(query_hash, row, 'api_name') event_data.arg_url = self._GetRowValue(query_hash, row, 'arg_url') event_data.args = self._GetRowValue(query_hash, row, 'args') event_data.extension_id = self._GetRowValue(query_hash, row, 'extension_id') event_data.other = self._GetRowValue(query_hash, row, 'other') event_data.page_title = self._GetRowValue(query_hash, row, 'page_title') event_data.page_url = self._GetRowValue(query_hash, row, 'page_url') event_data.query = query timestamp = self._GetRowValue(query_hash, row, 'time') date_time = dfdatetime_webkit_time.WebKitTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_UNKNOWN) parser_mediator.ProduceEventWithEventData(event, event_data)
289,327
Determines if a specific table exists. Args: table_name (str): table name. Returns: bool: True if the table exists. Raises: RuntimeError: if the database is not opened.
def HasTable(self, table_name): if not self._connection: raise RuntimeError( 'Cannot determine if table exists database not opened.') sql_query = self._HAS_TABLE_QUERY.format(table_name) self._cursor.execute(sql_query) if self._cursor.fetchone(): return True return False
289,330
Retrieves values from a table. Args: table_names (list[str]): table names. column_names (list[str]): column names. condition (str): query condition such as "log_source == 'Application Error'". Yields: sqlite3.row: row. Raises: RuntimeError: if the database is not opened.
def GetValues(self, table_names, column_names, condition): if not self._connection: raise RuntimeError('Cannot retrieve values database not opened.') if condition: condition = ' WHERE {0:s}'.format(condition) sql_query = 'SELECT {1:s} FROM {0:s}{2:s}'.format( ', '.join(table_names), ', '.join(column_names), condition) self._cursor.execute(sql_query) # TODO: have a look at https://docs.python.org/2/library/ # sqlite3.html#sqlite3.Row. for row in self._cursor: yield { column_name: row[column_index] for column_index, column_name in enumerate(column_names)}
289,331
Opens the database file. Args: filename (str): filename of the database. read_only (Optional[bool]): True if the database should be opened in read-only mode. Since sqlite3 does not support a real read-only mode we fake it by only permitting SELECT queries. Returns: bool: True if successful. Raises: RuntimeError: if the database is already opened.
def Open(self, filename, read_only=False): if self._connection: raise RuntimeError('Cannot open database already opened.') self.filename = filename self.read_only = read_only try: self._connection = sqlite3.connect(filename) except sqlite3.OperationalError: return False if not self._connection: return False self._cursor = self._connection.cursor() if not self._cursor: return False return True
289,332
Retrieves the Event Log provider key. Args: log_source (str): Event Log source. Returns: str: Event Log provider key or None if not available. Raises: RuntimeError: if more than one value is found in the database.
def _GetEventLogProviderKey(self, log_source): table_names = ['event_log_providers'] column_names = ['event_log_provider_key'] condition = 'log_source == "{0:s}"'.format(log_source) values_list = list(self._database_file.GetValues( table_names, column_names, condition)) number_of_values = len(values_list) if number_of_values == 0: return None if number_of_values == 1: values = values_list[0] return values['event_log_provider_key'] raise RuntimeError('More than one value found in database.')
289,333
Retrieves a specific message from a specific message table. Args: message_file_key (int): message file key. lcid (int): language code identifier (LCID). message_identifier (int): message identifier. Returns: str: message string or None if not available. Raises: RuntimeError: if more than one value is found in the database.
def _GetMessage(self, message_file_key, lcid, message_identifier): table_name = 'message_table_{0:d}_0x{1:08x}'.format(message_file_key, lcid) has_table = self._database_file.HasTable(table_name) if not has_table: return None column_names = ['message_string'] condition = 'message_identifier == "0x{0:08x}"'.format(message_identifier) values = list(self._database_file.GetValues( [table_name], column_names, condition)) number_of_values = len(values) if number_of_values == 0: return None if number_of_values == 1: return values[0]['message_string'] raise RuntimeError('More than one value found in database.')
289,334
Retrieves the message file keys. Args: event_log_provider_key (int): Event Log provider key. Yields: int: message file key.
def _GetMessageFileKeys(self, event_log_provider_key): table_names = ['message_file_per_event_log_provider'] column_names = ['message_file_key'] condition = 'event_log_provider_key == {0:d}'.format( event_log_provider_key) generator = self._database_file.GetValues( table_names, column_names, condition) for values in generator: yield values['message_file_key']
289,335
Reformats the message string. Args: message_string (str): message string. Returns: str: message string in Python format() (PEP 3101) style.
def _ReformatMessageString(self, message_string): def _PlaceHolderSpecifierReplacer(match_object): expanded_groups = [] for group in match_object.groups(): try: place_holder_number = int(group, 10) - 1 expanded_group = '{{{0:d}:s}}'.format(place_holder_number) except ValueError: expanded_group = group expanded_groups.append(expanded_group) return ''.join(expanded_groups) if not message_string: return None message_string = self._WHITE_SPACE_SPECIFIER_RE.sub(r'', message_string) message_string = self._TEXT_SPECIFIER_RE.sub(r'\\\1', message_string) message_string = self._CURLY_BRACKETS.sub(r'\1\1', message_string) return self._PLACE_HOLDER_SPECIFIER_RE.sub( _PlaceHolderSpecifierReplacer, message_string)
289,336
Retrieves a specific message for a specific Event Log source. Args: log_source (str): Event Log source. lcid (int): language code identifier (LCID). message_identifier (int): message identifier. Returns: str: message string or None if not available.
def GetMessage(self, log_source, lcid, message_identifier): event_log_provider_key = self._GetEventLogProviderKey(log_source) if not event_log_provider_key: return None generator = self._GetMessageFileKeys(event_log_provider_key) if not generator: return None # TODO: cache a number of message strings. message_string = None for message_file_key in generator: message_string = self._GetMessage( message_file_key, lcid, message_identifier) if message_string: break if self._string_format == 'wrc': message_string = self._ReformatMessageString(message_string) return message_string
289,337
Retrieves the metadata attribute. Args: attribute_name (str): name of the metadata attribute. Returns: str: the metadata attribute or None. Raises: RuntimeError: if more than one value is found in the database.
def GetMetadataAttribute(self, attribute_name): table_name = 'metadata' has_table = self._database_file.HasTable(table_name) if not has_table: return None column_names = ['value'] condition = 'name == "{0:s}"'.format(attribute_name) values = list(self._database_file.GetValues( [table_name], column_names, condition)) number_of_values = len(values) if number_of_values == 0: return None if number_of_values == 1: return values[0]['value'] raise RuntimeError('More than one value found in database.')
289,338
Opens the database reader object. Args: filename (str): filename of the database. Returns: bool: True if successful. Raises: RuntimeError: if the version or string format of the database is not supported.
def Open(self, filename): if not super(WinevtResourcesSqlite3DatabaseReader, self).Open(filename): return False version = self.GetMetadataAttribute('version') if not version or version != '20150315': raise RuntimeError('Unsupported version: {0:s}'.format(version)) string_format = self.GetMetadataAttribute('string_format') if not string_format: string_format = 'wrc' if string_format not in ('pep3101', 'wrc'): raise RuntimeError('Unsupported string format: {0:s}'.format( string_format)) self._string_format = string_format return True
289,339
Initializes the analysis report. Args: plugin_name (Optional[str]): name of the analysis plugin that generated the report. text (Optional[str]): report text.
def __init__(self, plugin_name=None, text=None): super(AnalysisReport, self).__init__() self.filter_string = None self.plugin_name = plugin_name self.report_array = None self.report_dict = None # TODO: rename text to body? self.text = text self.time_compiled = None
289,340
Parses and validates options. Args: options (argparse.Namespace): parser options. configuration_object (CLITool): object to be configured by the argument helper. Raises: BadConfigObject: when the configuration object is of the wrong type.
def ParseOptions(cls, options, configuration_object): if not isinstance(configuration_object, tools.CLITool): raise errors.BadConfigObject( 'Configuration object is not an instance of CLITool') profilers = cls._ParseStringOption(options, 'profilers') if not profilers: profilers = set() elif profilers.lower() != 'list': profilers = set(profilers.split(',')) supported_profilers = set(cls.PROFILERS_INFORMATION.keys()) unsupported_profilers = profilers.difference(supported_profilers) if unsupported_profilers: unsupported_profilers = ', '.join(unsupported_profilers) raise errors.BadConfigOption( 'Unsupported profilers: {0:s}'.format(unsupported_profilers)) profiling_directory = getattr(options, 'profiling_directory', None) if profiling_directory and not os.path.isdir(profiling_directory): raise errors.BadConfigOption( 'No such profiling directory: {0:s}'.format(profiling_directory)) profiling_sample_rate = getattr(options, 'profiling_sample_rate', None) if not profiling_sample_rate: profiling_sample_rate = cls.DEFAULT_PROFILING_SAMPLE_RATE else: try: profiling_sample_rate = int(profiling_sample_rate, 10) except (TypeError, ValueError): raise errors.BadConfigOption( 'Invalid profile sample rate: {0!s}.'.format(profiling_sample_rate)) setattr(configuration_object, '_profilers', profilers) setattr(configuration_object, '_profiling_directory', profiling_directory) setattr( configuration_object, '_profiling_sample_rate', profiling_sample_rate)
289,343
Parses and validates options. Args: options (argparse.Namespace): parser options. configuration_object (CLITool): object to be configured by the argument helper. Raises: BadConfigObject: when the configuration object is of the wrong type.
def ParseOptions(cls, options, configuration_object): if not isinstance(configuration_object, tools.CLITool): raise errors.BadConfigObject( 'Configuration object is not an instance of CLITool') data_location = cls._ParseStringOption(options, 'data_location') if not data_location: # Determine the source root path, which is 3 directories up from # the location of the script. data_location = os.path.dirname(cls._PATH) data_location = os.path.dirname(data_location) data_location = os.path.dirname(data_location) data_location = os.path.dirname(data_location) # There are multiple options to run a tool e.g. running from source or # from an egg file. data_location_egg = os.path.join(data_location, 'share', 'plaso') data_location_source = os.path.join(data_location, 'data') data_location = None if os.path.exists(data_location_egg) and os.path.isfile(os.path.join( data_location_egg, 'plaso-data.README')): data_location = data_location_egg elif os.path.exists(data_location_source) and os.path.isfile(os.path.join( data_location_source, 'plaso-data.README')): data_location = data_location_source if not data_location or not os.path.exists(data_location): data_location = os.path.join(sys.prefix, 'share', 'plaso') if not os.path.exists(data_location): data_location = os.path.join(sys.prefix, 'local', 'share', 'plaso') if sys.prefix != '/usr': if not os.path.exists(data_location): data_location = os.path.join('/usr', 'share', 'plaso') if not os.path.exists(data_location): data_location = os.path.join('/usr', 'local', 'share', 'plaso') if not os.path.exists(data_location) or not os.path.isfile(os.path.join( data_location, 'plaso-data.README')): data_location = None if not data_location: raise errors.BadConfigOption( 'Unable to determine location of data files.') logger.info('Determined data location: {0:s}'.format(data_location)) setattr(configuration_object, '_data_location', data_location)
289,345
Adds command line arguments the helper supports to an argument group. This function takes an argument parser or an argument group object and adds to it all the command line arguments this helper supports. Args: argument_group (argparse._ArgumentGroup|argparse.ArgumentParser): group to append arguments to.
def AddArguments(cls, argument_group): argument_group.add_argument( '--nsrlsvr-hash', '--nsrlsvr_hash', dest='nsrlsvr_hash', type=str, action='store', choices=nsrlsvr.NsrlsvrAnalyzer.SUPPORTED_HASHES, default=cls._DEFAULT_HASH, metavar='HASH', help=( 'Type of hash to use to query nsrlsvr instance, the default is: ' '{0:s}. Supported options: {1:s}'.format( cls._DEFAULT_HASH, ', '.join( nsrlsvr.NsrlsvrAnalyzer.SUPPORTED_HASHES)))) argument_group.add_argument( '--nsrlsvr-host', '--nsrlsvr_host', dest='nsrlsvr_host', type=str, action='store', default=cls._DEFAULT_HOST, metavar='HOST', help=( 'Hostname or IP address of the nsrlsvr instance to query, the ' 'default is: {0:s}').format(cls._DEFAULT_HOST)) argument_group.add_argument( '--nsrlsvr-label', '--nsrlsvr_label', dest='nsrlsvr_label', type=str, action='store', default=cls._DEFAULT_LABEL, metavar='LABEL', help=( 'Label to apply to events, the default is: ' '{0:s}.').format(cls._DEFAULT_LABEL)) argument_group.add_argument( '--nsrlsvr-port', '--nsrlsvr_port', dest='nsrlsvr_port', type=int, action='store', default=cls._DEFAULT_PORT, metavar='PORT', help=( 'Port number of the nsrlsvr instance to query, the default is: ' '{0:d}.').format(cls._DEFAULT_PORT))
289,346
Parses and validates options. Args: options (argparse.Namespace): parser options object. analysis_plugin (NsrlsvrAnalysisPlugin): analysis plugin to configure. Raises: BadConfigObject: when the analysis plugin is the wrong type. BadConfigOption: when unable to connect to nsrlsvr instance.
def ParseOptions(cls, options, analysis_plugin): if not isinstance(analysis_plugin, nsrlsvr.NsrlsvrAnalysisPlugin): raise errors.BadConfigObject( 'Analysis plugin is not an instance of NsrlsvrAnalysisPlugin') label = cls._ParseStringOption( options, 'nsrlsvr_label', default_value=cls._DEFAULT_LABEL) analysis_plugin.SetLabel(label) lookup_hash = cls._ParseStringOption( options, 'nsrlsvr_hash', default_value=cls._DEFAULT_HASH) analysis_plugin.SetLookupHash(lookup_hash) host = cls._ParseStringOption( options, 'nsrlsvr_host', default_value=cls._DEFAULT_HOST) analysis_plugin.SetHost(host) port = cls._ParseNumericOption( options, 'nsrlsvr_port', default_value=cls._DEFAULT_PORT) analysis_plugin.SetPort(port) if not analysis_plugin.TestConnection(): raise errors.BadConfigOption( 'Unable to connect to nsrlsvr {0:s}:{1:d}'.format(host, port))
289,347
Adds command line arguments to an argument group. This function takes an argument parser or an argument group object and adds to it all the command line arguments this helper supports. Args: argument_group (argparse._ArgumentGroup|argparse.ArgumentParser): argparse group.
def AddArguments(cls, argument_group): storage_formats = sorted(definitions.STORAGE_FORMATS) argument_group.add_argument( '--storage_format', '--storage-format', action='store', choices=storage_formats, dest='storage_format', type=str, metavar='FORMAT', default=definitions.DEFAULT_STORAGE_FORMAT, help=( 'Format of the storage file, the default is: {0:s}. Supported ' 'options: {1:s}'.format( definitions.DEFAULT_STORAGE_FORMAT, ', '.join(storage_formats))))
289,348
Parses and validates options. Args: options (argparse.Namespace): parser options. configuration_object (CLITool): object to be configured by the argument helper. Raises: BadConfigObject: when the configuration object is of the wrong type. BadConfigOption: if the storage format is not defined or supported.
def ParseOptions(cls, options, configuration_object): if not isinstance(configuration_object, tools.CLITool): raise errors.BadConfigObject( 'Configuration object is not an instance of CLITool') storage_format = cls._ParseStringOption(options, 'storage_format') if not storage_format: raise errors.BadConfigOption('Unable to determine storage format.') if storage_format not in definitions.STORAGE_FORMATS: raise errors.BadConfigOption( 'Unsupported storage format: {0:s}'.format(storage_format)) setattr(configuration_object, '_storage_format', storage_format)
289,349
Configures the logging root logger. Args: debug_output (Optional[bool]): True if the logging should include debug output. filename (Optional[str]): log filename. mode (Optional[str]): log file access mode. quiet_mode (Optional[bool]): True if the logging should not include information output. Note that debug_output takes precedence over quiet_mode.
def ConfigureLogging( debug_output=False, filename=None, mode='w', quiet_mode=False): # Remove all possible log handlers. The log handlers cannot be reconfigured # and therefore must be recreated. for handler in logging.root.handlers: logging.root.removeHandler(handler) logger = logging.getLogger() if filename and filename.endswith('.gz'): handler = CompressedFileHandler(filename, mode=mode) elif filename: handler = logging.FileHandler(filename, mode=mode) else: handler = logging.StreamHandler() format_string = ( '%(asctime)s [%(levelname)s] (%(processName)-10s) PID:%(process)d ' '<%(module)s> %(message)s') formatter = logging.Formatter(format_string) handler.setFormatter(formatter) if debug_output: level = logging.DEBUG elif quiet_mode: level = logging.WARNING else: level = logging.INFO logger.setLevel(level) handler.setLevel(level) logger.addHandler(handler)
289,350
Initializes a compressed file logging handler. Args: filename (str): name of the log file. mode (Optional[str]): file access mode. encoding (Optional[str]): encoding of the log lines.
def __init__(self, filename, mode='a', encoding='utf-8'): if 't' not in mode and encoding and py2to3.PY_3: mode = '{0:s}t'.format(mode) super(CompressedFileHandler, self).__init__( filename, mode=mode, encoding=encoding, delay=True)
289,351
Do a wikipedia geo search for `latitude` and `longitude` using HTTP API described in http://www.mediawiki.org/wiki/Extension:GeoData Arguments: * latitude (float or decimal.Decimal) * longitude (float or decimal.Decimal) Keyword arguments: * title - The title of an article to search for * results - the maximum number of results returned * radius - Search radius in meters. The value must be between 10 and 10000
def geosearch(latitude, longitude, title=None, results=10, radius=1000): search_params = { 'list': 'geosearch', 'gsradius': radius, 'gscoord': '{0}|{1}'.format(latitude, longitude), 'gslimit': results } if title: search_params['titles'] = title raw_results = _wiki_request(search_params) if 'error' in raw_results: if raw_results['error']['info'] in ('HTTP request timed out.', 'Pool queue is full'): raise HTTPTimeoutError('{0}|{1}'.format(latitude, longitude)) else: raise WikipediaException(raw_results['error']['info']) search_pages = raw_results['query'].get('pages', None) if search_pages: search_results = (v['title'] for k, v in search_pages.items() if k != '-1') else: search_results = (d['title'] for d in raw_results['query']['geosearch']) return list(search_results)
289,650
Returns the Longest Subsequence between x and y. Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence Args: x: sequence of words y: sequence of words Returns: sequence: LCS of x and y
def _recon_lcs(x, y): i, j = len(x), len(y) table = _lcs(x, y) def _recon(i, j): if i == 0 or j == 0: return [] elif x[i - 1] == y[j - 1]: return _recon(i - 1, j - 1) + [(x[i - 1], i)] elif table[i - 1, j] > table[i, j - 1]: return _recon(i - 1, j) else: return _recon(i, j - 1) recon_tuple = tuple(map(lambda x: x[0], _recon(i, j))) return recon_tuple
290,226
Computes ROUGE-N of two text collections of sentences. Sourece: http://research.microsoft.com/en-us/um/people/cyl/download/ papers/rouge-working-note-v1.3.1.pdf Args: evaluated_sentences: The sentences that have been picked by the summarizer reference_sentences: The sentences from the referene set n: Size of ngram. Defaults to 2. Returns: A tuple (f1, precision, recall) for ROUGE-N Raises: ValueError: raises exception if a param has len <= 0
def rouge_n(evaluated_sentences, reference_sentences, n=2): if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0: raise ValueError("Collections must contain at least 1 sentence.") evaluated_ngrams = _get_word_ngrams(n, evaluated_sentences) reference_ngrams = _get_word_ngrams(n, reference_sentences) reference_count = len(reference_ngrams) evaluated_count = len(evaluated_ngrams) # Gets the overlapping ngrams between evaluated and reference overlapping_ngrams = evaluated_ngrams.intersection(reference_ngrams) overlapping_count = len(overlapping_ngrams) # Handle edge case. This isn't mathematically correct, but it's good enough if evaluated_count == 0: precision = 0.0 else: precision = overlapping_count / evaluated_count # Handle edge case for recall, same as for precision if reference_count == 0: recall = 0.0 else: recall = overlapping_count / reference_count f1_score = 2.0 * ((precision * recall) / (precision + recall + 1e-8)) # return overlapping_count / reference_count return f1_score, precision, recall
290,227
Class method that will return a Droplet object by ID. Args: api_token (str): token droplet_id (int): droplet id
def get_object(cls, api_token, droplet_id): droplet = cls(token=api_token, id=droplet_id) droplet.load() return droplet
291,113
Perform a droplet action. Args: params (dict): parameters of the action Optional Args: return_dict (bool): Return a dict when True (default), otherwise return an Action. Returns dict or Action
def _perform_action(self, params, return_dict=True): action = self.get_data( "droplets/%s/actions/" % self.id, type=POST, params=params ) if return_dict: return action else: action = action[u'action'] return_action = Action(token=self.token) # Loading attributes for attr in action.keys(): setattr(return_action, attr, action[attr]) return return_action
291,118
Resize the droplet to a new size slug. https://developers.digitalocean.com/documentation/v2/#resize-a-droplet Args: new_size_slug (str): name of new size Optional Args: return_dict (bool): Return a dict when True (default), otherwise return an Action. disk (bool): If a permanent resize, with disk changes included. Returns dict or Action
def resize(self, new_size_slug, return_dict=True, disk=True): options = {"type": "resize", "size": new_size_slug} if disk: options["disk"] = "true" return self._perform_action(options, return_dict)
291,119
Take a snapshot! Args: snapshot_name (str): name of snapshot Optional Args: return_dict (bool): Return a dict when True (default), otherwise return an Action. power_off (bool): Before taking the snapshot the droplet will be turned off with another API call. It will wait until the droplet will be powered off. Returns dict or Action
def take_snapshot(self, snapshot_name, return_dict=True, power_off=False): if power_off is True and self.status != "off": action = self.power_off(return_dict=False) action.wait() self.load() return self._perform_action( {"type": "snapshot", "name": snapshot_name}, return_dict )
291,120
Restore the droplet to an image ( snapshot or backup ) Args: image_id (int): id of image Optional Args: return_dict (bool): Return a dict when True (default), otherwise return an Action. Returns dict or Action
def rebuild(self, image_id=None, return_dict=True): if not image_id: image_id = self.image['id'] return self._perform_action( {"type": "rebuild", "image": image_id}, return_dict )
291,121
Change the kernel to a new one Args: kernel : instance of digitalocean.Kernel.Kernel Optional Args: return_dict (bool): Return a dict when True (default), otherwise return an Action. Returns dict or Action
def change_kernel(self, kernel, return_dict=True): if type(kernel) != Kernel: raise BadKernelObject("Use Kernel object") return self._perform_action( {'type': 'change_kernel', 'kernel': kernel.id}, return_dict )
291,122
Returns a specific Action by its ID. Args: action_id (int): id of action
def get_action(self, action_id): return Action.get_object( api_token=self.token, action_id=action_id )
291,126
Class method that will return a FloatingIP object by its IP. Args: api_token: str - token ip: str - floating ip address
def get_object(cls, api_token, ip): floating_ip = cls(token=api_token, ip=ip) floating_ip.load() return floating_ip
291,139
Creates a FloatingIP and assigns it to a Droplet. Note: Every argument and parameter given to this method will be assigned to the object. Args: droplet_id: int - droplet id
def create(self, *args, **kwargs): data = self.get_data('floating_ips/', type=POST, params={'droplet_id': self.droplet_id}) if data: self.ip = data['floating_ip']['ip'] self.region = data['floating_ip']['region'] return self
291,141
Creates a FloatingIP in a region without assigning it to a specific Droplet. Note: Every argument and parameter given to this method will be assigned to the object. Args: region_slug: str - region's slug (e.g. 'nyc3')
def reserve(self, *args, **kwargs): data = self.get_data('floating_ips/', type=POST, params={'region': self.region_slug}) if data: self.ip = data['floating_ip']['ip'] self.region = data['floating_ip']['region'] return self
291,142
Assign a FloatingIP to a Droplet. Args: droplet_id: int - droplet id
def assign(self, droplet_id): return self.get_data( "floating_ips/%s/actions/" % self.ip, type=POST, params={"type": "assign", "droplet_id": droplet_id} )
291,143
Returns a Load Balancer object by its ID. Args: id (str): Load Balancer ID
def get_load_balancer(self, id): return LoadBalancer.get_object(api_token=self.token, id=id)
291,176
Returns a Certificate object by its ID. Args: id (str): Certificate ID
def get_certificate(self, id): return Certificate.get_object(api_token=self.token, cert_id=id)
291,177
Class method that will return a LoadBalancer object by its ID. Args: api_token (str): DigitalOcean API token id (str): Load Balancer ID
def get_object(cls, api_token, id): load_balancer = cls(token=api_token, id=id) load_balancer.load() return load_balancer
291,191
Assign a LoadBalancer to a Droplet. Args: droplet_ids (obj:`list` of `int`): A list of Droplet IDs
def add_droplets(self, droplet_ids): return self.get_data( "load_balancers/%s/droplets/" % self.id, type=POST, params={"droplet_ids": droplet_ids} )
291,195
Unassign a LoadBalancer. Args: droplet_ids (obj:`list` of `int`): A list of Droplet IDs
def remove_droplets(self, droplet_ids): return self.get_data( "load_balancers/%s/droplets/" % self.id, type=DELETE, params={"droplet_ids": droplet_ids} )
291,196
Adds new forwarding rules to a LoadBalancer. Args: forwarding_rules (obj:`list`): A list of `ForwrdingRules` objects
def add_forwarding_rules(self, forwarding_rules): rules_dict = [rule.__dict__ for rule in forwarding_rules] return self.get_data( "load_balancers/%s/forwarding_rules/" % self.id, type=POST, params={"forwarding_rules": rules_dict} )
291,197
Removes existing forwarding rules from a LoadBalancer. Args: forwarding_rules (obj:`list`): A list of `ForwrdingRules` objects
def remove_forwarding_rules(self, forwarding_rules): rules_dict = [rule.__dict__ for rule in forwarding_rules] return self.get_data( "load_balancers/%s/forwarding_rules/" % self.id, type=DELETE, params={"forwarding_rules": rules_dict} )
291,198
Attach a Volume to a Droplet. Args: droplet_id: int - droplet id region: string - slug identifier for the region
def attach(self, droplet_id, region): return self.get_data( "volumes/%s/actions/" % self.id, type=POST, params={"type": "attach", "droplet_id": droplet_id, "region": region} )
291,219
Detach a Volume to a Droplet. Args: size_gigabytes: int - size of the Block Storage volume in GiB region: string - slug identifier for the region
def resize(self, size_gigabytes, region): return self.get_data( "volumes/%s/actions/" % self.id, type=POST, params={"type": "resize", "size_gigabytes": size_gigabytes, "region": region} )
291,220
Create a snapshot of the volume. Args: name: string - a human-readable name for the snapshot
def snapshot(self, name): return self.get_data( "volumes/%s/snapshots/" % self.id, type=POST, params={"name": name} )
291,221
Construct the analogy test set by mapping the words to their word vector ids. Arguments: - test_examples: iterable of 4-word iterables - dictionay: a mapping from words to ids - boolean ignore_missing: if True, words in the test set that are not in the dictionary will be dropeed. Returns: - a N by 4 numpy matrix.
def construct_analogy_test_set(test_examples, dictionary, ignore_missing=False): test = [] for example in test_examples: try: test.append([dictionary[word] for word in example]) except KeyError: if ignore_missing: pass else: raise try: test = np.array(test, dtype=np.int32) except ValueError as e: # This should use raise ... from ... in Python 3. raise ValueError('Each row of the test set should contain ' '4 integer word ids', e) return test
293,066
Estimate the word embeddings. Parameters: - scipy.sparse.coo_matrix matrix: coocurrence matrix - int epochs: number of training epochs - int no_threads: number of training threads - bool verbose: print progress messages if True
def fit(self, matrix, epochs=5, no_threads=2, verbose=False): shape = matrix.shape if (len(shape) != 2 or shape[0] != shape[1]): raise Exception('Coocurrence matrix must be square') if not sp.isspmatrix_coo(matrix): raise Exception('Coocurrence matrix must be in the COO format') random_state = check_random_state(self.random_state) self.word_vectors = ((random_state.rand(shape[0], self.no_components) - 0.5) / self.no_components) self.word_biases = np.zeros(shape[0], dtype=np.float64) self.vectors_sum_gradients = np.ones_like(self.word_vectors) self.biases_sum_gradients = np.ones_like(self.word_biases) shuffle_indices = np.arange(matrix.nnz, dtype=np.int32) if verbose: print('Performing %s training epochs ' 'with %s threads' % (epochs, no_threads)) for epoch in range(epochs): if verbose: print('Epoch %s' % epoch) # Shuffle the coocurrence matrix random_state.shuffle(shuffle_indices) fit_vectors(self.word_vectors, self.vectors_sum_gradients, self.word_biases, self.biases_sum_gradients, matrix.row, matrix.col, matrix.data, shuffle_indices, self.learning_rate, self.max_count, self.alpha, self.max_loss, int(no_threads)) if not np.isfinite(self.word_vectors).all(): raise Exception('Non-finite values in word vectors. ' 'Try reducing the learning rate or the ' 'max_loss parameter.')
293,074
Scale the DNB data using a histogram equalization method. Args: dnb_data (ndarray): Day/Night Band data array sza_data (ndarray): Solar Zenith Angle data array
def _run_dnb_normalization(self, dnb_data, sza_data): # convert dask arrays to DataArray objects dnb_data = xr.DataArray(dnb_data, dims=('y', 'x')) sza_data = xr.DataArray(sza_data, dims=('y', 'x')) good_mask = ~(dnb_data.isnull() | sza_data.isnull()) output_dataset = dnb_data.where(good_mask) # we only need the numpy array output_dataset = output_dataset.values.copy() dnb_data = dnb_data.values sza_data = sza_data.values day_mask, mixed_mask, night_mask = make_day_night_masks( sza_data, good_mask.values, self.high_angle_cutoff, self.low_angle_cutoff, stepsDegrees=self.mixed_degree_step) did_equalize = False if day_mask.any(): LOG.debug("Histogram equalizing DNB day data...") histogram_equalization(dnb_data, day_mask, out=output_dataset) did_equalize = True if mixed_mask: for mask in mixed_mask: if mask.any(): LOG.debug("Histogram equalizing DNB mixed data...") histogram_equalization(dnb_data, mask, out=output_dataset) did_equalize = True if night_mask.any(): LOG.debug("Histogram equalizing DNB night data...") histogram_equalization(dnb_data, night_mask, out=output_dataset) did_equalize = True if not did_equalize: raise RuntimeError("No valid data found to histogram equalize") return output_dataset
293,140
Scale the DNB data using a adaptive histogram equalization method. Args: dnb_data (ndarray): Day/Night Band data array sza_data (ndarray): Solar Zenith Angle data array
def _run_dnb_normalization(self, dnb_data, sza_data): # convert dask arrays to DataArray objects dnb_data = xr.DataArray(dnb_data, dims=('y', 'x')) sza_data = xr.DataArray(sza_data, dims=('y', 'x')) good_mask = ~(dnb_data.isnull() | sza_data.isnull()) # good_mask = ~(dnb_data.mask | sza_data.mask) output_dataset = dnb_data.where(good_mask) # we only need the numpy array output_dataset = output_dataset.values.copy() dnb_data = dnb_data.values sza_data = sza_data.values day_mask, mixed_mask, night_mask = make_day_night_masks( sza_data, good_mask.values, self.high_angle_cutoff, self.low_angle_cutoff, stepsDegrees=self.mixed_degree_step) did_equalize = False has_multi_times = len(mixed_mask) > 0 if day_mask.any(): did_equalize = True if self.adaptive_day == "always" or ( has_multi_times and self.adaptive_day == "multiple"): LOG.debug("Adaptive histogram equalizing DNB day data...") local_histogram_equalization( dnb_data, day_mask, valid_data_mask=good_mask.values, local_radius_px=self.day_radius_pixels, out=output_dataset) else: LOG.debug("Histogram equalizing DNB day data...") histogram_equalization(dnb_data, day_mask, out=output_dataset) if mixed_mask: for mask in mixed_mask: if mask.any(): did_equalize = True if self.adaptive_mixed == "always" or ( has_multi_times and self.adaptive_mixed == "multiple"): LOG.debug( "Adaptive histogram equalizing DNB mixed data...") local_histogram_equalization( dnb_data, mask, valid_data_mask=good_mask.values, local_radius_px=self.mixed_radius_pixels, out=output_dataset) else: LOG.debug("Histogram equalizing DNB mixed data...") histogram_equalization(dnb_data, day_mask, out=output_dataset) if night_mask.any(): did_equalize = True if self.adaptive_night == "always" or ( has_multi_times and self.adaptive_night == "multiple"): LOG.debug("Adaptive histogram equalizing DNB night data...") local_histogram_equalization( dnb_data, night_mask, valid_data_mask=good_mask.values, local_radius_px=self.night_radius_pixels, out=output_dataset) else: LOG.debug("Histogram equalizing DNB night data...") histogram_equalization(dnb_data, night_mask, out=output_dataset) if not did_equalize: raise RuntimeError("No valid data found to histogram equalize") return output_dataset
293,143
Evaluate a Chebyshev Polynomial Args: coefs (list, np.array): Coefficients defining the polynomial time (int, float): Time where to evaluate the polynomial domain (list, tuple): Domain (or time interval) for which the polynomial is defined: [left, right] Reference: Appendix A in the MSG Level 1.5 Image Data Format Description.
def chebyshev(coefs, time, domain): return Chebyshev(coefs, domain=domain)(time) - 0.5 * coefs[0]
293,181
Filer provided key iterable by the provided `DatasetID`. Note: The `modifiers` attribute of `did` should be `None` to allow for **any** modifier in the results. Args: did (DatasetID): Query parameters to match in the `key_container`. key_container (iterable): Set, list, tuple, or dict of `DatasetID` keys. Returns (list): List of keys matching the provided parameters in no specific order.
def filter_keys_by_dataset_id(did, key_container): keys = iter(key_container) for key in DATASET_KEYS: if getattr(did, key) is not None: if key == "wavelength": keys = [k for k in keys if (getattr(k, key) is not None and DatasetID.wavelength_match(getattr(k, key), getattr(did, key)))] else: keys = [k for k in keys if getattr(k, key) is not None and getattr(k, key) == getattr(did, key)] return keys
293,205
Generator of reader configuration files for one or more readers Args: reader (Optional[str]): Yield configs only for this reader ppp_config_dir (Optional[str]): Additional configuration directory to search for reader configuration files. Returns: Generator of lists of configuration files
def configs_for_reader(reader=None, ppp_config_dir=None): search_paths = (ppp_config_dir,) if ppp_config_dir else tuple() if reader is not None: if not isinstance(reader, (list, tuple)): reader = [reader] # check for old reader names new_readers = [] for reader_name in reader: if reader_name.endswith('.yaml') or reader_name not in OLD_READER_NAMES: new_readers.append(reader_name) continue new_name = OLD_READER_NAMES[reader_name] # Satpy 0.11 only displays a warning # Satpy 0.13 will raise an exception raise ValueError("Reader name '{}' has been deprecated, use '{}' instead.".format(reader_name, new_name)) # Satpy 0.15 or 1.0, remove exception and mapping reader = new_readers # given a config filename or reader name config_files = [r if r.endswith('.yaml') else r + '.yaml' for r in reader] else: reader_configs = glob_config(os.path.join('readers', '*.yaml'), *search_paths) config_files = set(reader_configs) for config_file in config_files: config_basename = os.path.basename(config_file) reader_configs = config_search_paths( os.path.join("readers", config_basename), *search_paths) if not reader_configs: # either the reader they asked for does not exist # or satpy is improperly configured and can't find its own readers raise ValueError("No reader(s) named: {}".format(reader)) yield reader_configs
293,209
Available readers based on current configuration. Args: as_dict (bool): Optionally return reader information as a dictionary. Default: False Returns: List of available reader names. If `as_dict` is `True` then a list of dictionaries including additionally reader information is returned.
def available_readers(as_dict=False): readers = [] for reader_configs in configs_for_reader(): try: reader_info = read_reader_config(reader_configs) except (KeyError, IOError, yaml.YAMLError): LOG.warning("Could not import reader config from: %s", reader_configs) LOG.debug("Error loading YAML", exc_info=True) continue readers.append(reader_info if as_dict else reader_info['name']) return readers
293,210
Collect custom configuration values. Args: max_sza (float): Maximum solar zenith angle in degrees that is considered valid and correctable. Default 95.0.
def __init__(self, max_sza=95.0, **kwargs): self.max_sza = max_sza self.max_sza_cos = np.cos(np.deg2rad(max_sza)) if max_sza is not None else None super(SunZenithCorrectorBase, self).__init__(**kwargs)
293,237
Collect custom configuration values. Args: correction_limit (float): Maximum solar zenith angle to apply the correction in degrees. Pixels beyond this limit have a constant correction applied. Default 88. max_sza (float): Maximum solar zenith angle in degrees that is considered valid and correctable. Default 95.0.
def __init__(self, correction_limit=88., **kwargs): self.correction_limit = correction_limit super(SunZenithCorrector, self).__init__(**kwargs)
293,239
Collect custom configuration values. Args: correction_limit (float): Maximum solar zenith angle to apply the correction in degrees. Pixels beyond this limit have a constant correction applied. Default 88. max_sza (float): Maximum solar zenith angle in degrees that is considered valid and correctable. Default 95.0.
def __init__(self, correction_limit=88., **kwargs): self.correction_limit = correction_limit super(EffectiveSolarPathLengthCorrector, self).__init__(**kwargs)
293,241
Collect custom configuration values. Args: common_channel_mask (bool): If True, mask all the channels with a mask that combines all the invalid areas of the given data.
def __init__(self, name, common_channel_mask=True, **kwargs): self.common_channel_mask = common_channel_mask super(GenericCompositor, self).__init__(name, **kwargs)
293,250
Collect custom configuration values. Args: lim_low (float): lower limit of Sun zenith angle for the blending of the given channels lim_high (float): upper limit of Sun zenith angle for the blending of the given channels
def __init__(self, name, lim_low=85., lim_high=95., **kwargs): self.lim_low = lim_low self.lim_high = lim_high super(DayNightCompositor, self).__init__(name, **kwargs)
293,260
Collect custom configuration values. Args: transition_min (float): Values below or equal to this are clouds -> opaque white transition_max (float): Values above this are cloud free -> transparent transition_gamma (float): Gamma correction to apply at the end
def __init__(self, name, transition_min=258.15, transition_max=298.15, transition_gamma=3.0, **kwargs): self.transition_min = transition_min self.transition_max = transition_max self.transition_gamma = transition_gamma super(CloudCompositor, self).__init__(name, **kwargs)
293,264
Initialize resampler with geolocation information. Args: source_geo_def (SwathDefinition, AreaDefinition): Geolocation definition for the data to be resampled target_geo_def (CoordinateDefinition, AreaDefinition): Geolocation definition for the area to resample data to.
def __init__(self, source_geo_def, target_geo_def): self.source_geo_def = source_geo_def self.target_geo_def = target_geo_def
293,280
Generator of writer configuration files for one or more writers Args: writer (Optional[str]): Yield configs only for this writer ppp_config_dir (Optional[str]): Additional configuration directory to search for writer configuration files. Returns: Generator of lists of configuration files
def configs_for_writer(writer=None, ppp_config_dir=None): search_paths = (ppp_config_dir,) if ppp_config_dir else tuple() if writer is not None: if not isinstance(writer, (list, tuple)): writer = [writer] # given a config filename or writer name config_files = [w if w.endswith('.yaml') else w + '.yaml' for w in writer] else: writer_configs = glob_config(os.path.join('writers', '*.yaml'), *search_paths) config_files = set(writer_configs) for config_file in config_files: config_basename = os.path.basename(config_file) writer_configs = config_search_paths( os.path.join("writers", config_basename), *search_paths) if not writer_configs: LOG.warning("No writer configs found for '%s'", writer) continue yield writer_configs
293,369
Available writers based on current configuration. Args: as_dict (bool): Optionally return writer information as a dictionary. Default: False Returns: List of available writer names. If `as_dict` is `True` then a list of dictionaries including additionally writer information is returned.
def available_writers(as_dict=False): writers = [] for writer_configs in configs_for_writer(): try: writer_info = read_writer_config(writer_configs) except (KeyError, IOError, yaml.YAMLError): LOG.warning("Could not import writer config from: %s", writer_configs) LOG.debug("Error loading YAML", exc_info=True) continue writers.append(writer_info if as_dict else writer_info['name']) return writers
293,370
convert ``dataset`` into a :class:`~trollimage.xrimage.XRImage` instance. Convert the ``dataset`` into an instance of the :class:`~trollimage.xrimage.XRImage` class. This function makes no other changes. To get an enhanced image, possibly with overlays and decoration, see :func:`~get_enhanced_image`. Args: dataset (xarray.DataArray): Data to be converted to an image. Returns: Instance of :class:`~trollimage.xrimage.XRImage`.
def to_image(dataset): dataset = dataset.squeeze() if dataset.ndim < 2: raise ValueError("Need at least a 2D array to make an image.") else: return XRImage(dataset)
293,377
Compute all the given dask graphs `results` so that the files are saved. Args: results (iterable): Iterable of dask graphs resulting from calls to `scn.save_datasets(..., compute=False)`
def compute_writer_results(results): if not results: return sources, targets, delayeds = split_results(results) # one or more writers have targets that we need to close in the future if targets: delayeds.append(da.store(sources, targets, compute=False)) if delayeds: da.compute(delayeds) if targets: for target in targets: if hasattr(target, 'close'): target.close()
293,379
Create a filename where output data will be saved. Args: kwargs (dict): Attributes and other metadata to use for formatting the previously provided `filename`.
def get_filename(self, **kwargs): if self.filename_parser is None: raise RuntimeError("No filename pattern or specific filename provided") output_filename = self.filename_parser.compose(kwargs) dirname = os.path.dirname(output_filename) if dirname and not os.path.isdir(dirname): LOG.info("Creating output directory: {}".format(dirname)) os.makedirs(dirname) return output_filename
293,383
Initialize an Enhancer instance. Args: ppp_config_dir: Points to the base configuration directory enhancement_config_file: The enhancement configuration to apply, False to leave as is.
def __init__(self, ppp_config_dir=None, enhancement_config_file=None): self.ppp_config_dir = ppp_config_dir or get_environ_config_dir() self.enhancement_config_file = enhancement_config_file # Set enhancement_config_file to False for no enhancements if self.enhancement_config_file is None: # it wasn't specified in the config or in the kwargs, we should # provide a default config_fn = os.path.join("enhancements", "generic.yaml") self.enhancement_config_file = config_search_paths(config_fn, self.ppp_config_dir) if not self.enhancement_config_file: # They don't want any automatic enhancements self.enhancement_tree = None else: if not isinstance(self.enhancement_config_file, (list, tuple)): self.enhancement_config_file = [self.enhancement_config_file] self.enhancement_tree = EnhancementDecisionTree(*self.enhancement_config_file) self.sensor_enhancement_configs = []
293,398
Check the satpy readers and writers for correct installation. Args: readers (list or None): Limit readers checked to those specified writers (list or None): Limit writers checked to those specified extras (list or None): Limit extras checked to those specified Returns: bool True if all specified features were successfully loaded.
def check_satpy(readers=None, writers=None, extras=None): from satpy.readers import configs_for_reader from satpy.writers import configs_for_writer print('Readers') print('=======') for reader, res in sorted(check_yaml_configs(configs_for_reader(reader=readers), 'reader').items()): print(reader + ': ', res) print() print('Writers') print('=======') for writer, res in sorted(check_yaml_configs(configs_for_writer(writer=writers), 'writer').items()): print(writer + ': ', res) print() print('Extras') print('======') module_names = extras if extras is not None else ('cartopy', 'geoviews') for module_name, res in sorted(_check_import(module_names).items()): print(module_name + ': ', res) print()
293,493
Make sure sensor and platform are consistent Args: sensor (str) : Sensor name from YAML dataset definition Raises: ValueError if they don't match
def _check_sensor_platform_consistency(self, sensor): ref_sensor = SENSORS.get(self.platform, None) if ref_sensor and not sensor == ref_sensor: logger.error('Sensor-Platform mismatch: {} is not a payload ' 'of {}. Did you choose the correct reader?' .format(sensor, self.platform))
293,564
Get dataset function Args: dsid: Dataset ID param2: Dataset Information Returns: Dask DataArray: Data
def get_dataset(self, dsid, dsinfo): data = self[dsinfo.get('file_key', dsid.name)] data.attrs.update(dsinfo) data.attrs["platform_name"] = self['/attr/satellite_name'] data.attrs["sensor"] = self['/attr/instrument_name'] return data
293,635
Makes sure filepath is valid and then reads data into a Dask DataFrame Args: filename: Filename filename_info: Filename information filetype_info: Filetype information
def __init__(self, filename, filename_info, filetype_info): super(VIIRSActiveFiresTextFileHandler, self).__init__(filename, filename_info, filetype_info) if not os.path.isfile(filename): return self.file_content = dd.read_csv(filename, skiprows=15, header=None, names=["latitude", "longitude", "T13", "Along-scan", "Along-track", "detection_confidence", "power"])
293,636
Convert an `numpy.string_` to str. Args: value (ndarray): scalar or 1-element numpy array to convert Raises: ValueError: if value is array larger than 1-element or it is not of type `numpy.string_` or it is not a numpy array
def np2str(value): if hasattr(value, 'dtype') and \ issubclass(value.dtype.type, (np.string_, np.object_)) and value.size == 1: value = np.asscalar(value) if not isinstance(value, str): # python 3 - was scalar numpy array of bytes # otherwise python 2 - scalar numpy array of 'str' value = value.decode() return value else: raise ValueError("Array is not a string type or is larger than 1")
293,638
Compute a mask of the earth's shape as seen by a geostationary satellite Args: area (pyresample.geometry.AreaDefinition) : Corresponding area definition Returns: Boolean mask, True inside the earth's shape, False outside.
def get_geostationary_mask(area): # Compute projection coordinates at the earth's limb h = area.proj_dict['h'] xmax, ymax = get_geostationary_angle_extent(area) xmax *= h ymax *= h # Compute projection coordinates at the centre of each pixel x, y = area.get_proj_coords_dask() # Compute mask of the earth's elliptical shape return ((x / xmax) ** 2 + (y / ymax) ** 2) <= 1
293,640
Get the bbox in lon/lats of the valid pixels inside *geos_area*. Args: nb_points: Number of points on the polygon
def get_geostationary_bounding_box(geos_area, nb_points=50): xmax, ymax = get_geostationary_angle_extent(geos_area) # generate points around the north hemisphere in satellite projection # make it a bit smaller so that we stay inside the valid area x = np.cos(np.linspace(-np.pi, 0, nb_points / 2)) * (xmax - 0.001) y = -np.sin(np.linspace(-np.pi, 0, nb_points / 2)) * (ymax - 0.001) # clip the projection coordinates to fit the area extent of geos_area ll_x, ll_y, ur_x, ur_y = (np.array(geos_area.area_extent) / geos_area.proj_dict['h']) x = np.clip(np.concatenate([x, x[::-1]]), min(ll_x, ur_x), max(ll_x, ur_x)) y = np.clip(np.concatenate([y, -y]), min(ll_y, ur_y), max(ll_y, ur_y)) return _lonlat_from_geos_angle(x, y, geos_area)
293,642
Create a copy of the Scene including dependency information. Args: datasets (list, tuple): `DatasetID` objects for the datasets to include in the new Scene object.
def copy(self, datasets=None): new_scn = self.__class__() new_scn.attrs = self.attrs.copy() new_scn.dep_tree = self.dep_tree.copy() for ds_id in (datasets or self.keys()): # NOTE: Must use `.datasets` or side effects of `__setitem__` # could hurt us with regards to the wishlist new_scn.datasets[ds_id] = self[ds_id] if not datasets: new_scn.wishlist = self.wishlist.copy() else: new_scn.wishlist = set([DatasetID.from_dict(ds.attrs) for ds in new_scn]) return new_scn
293,662
Collect all composite prereqs and create the specified composite. Args: comp_node (Node): Composite Node to generate a Dataset for keepables (set): `set` to update if any datasets are needed when generation is continued later. This can happen if generation is delayed to incompatible areas which would require resampling first.
def _generate_composite(self, comp_node, keepables): if comp_node.name in self.datasets: # already loaded return compositor, prereqs, optional_prereqs = comp_node.data try: prereq_datasets = self._get_prereq_datasets( comp_node.name, prereqs, keepables, ) except KeyError: return optional_datasets = self._get_prereq_datasets( comp_node.name, optional_prereqs, keepables, skip=True ) try: composite = compositor(prereq_datasets, optional_datasets=optional_datasets, **self.attrs) cid = DatasetID.from_dict(composite.attrs) self.datasets[cid] = composite # update the node with the computed DatasetID if comp_node.name in self.wishlist: self.wishlist.remove(comp_node.name) self.wishlist.add(cid) comp_node.name = cid except IncompatibleAreas: LOG.debug("Delaying generation of %s because of incompatible areas", str(compositor.id)) preservable_datasets = set(self.datasets.keys()) prereq_ids = set(p.name for p in prereqs) opt_prereq_ids = set(p.name for p in optional_prereqs) keepables |= preservable_datasets & (prereq_ids | opt_prereq_ids) # even though it wasn't generated keep a list of what # might be needed in other compositors keepables.add(comp_node.name) return
293,675
Load datasets from the necessary reader. Args: nodes (iterable): DependencyTree Node objects **kwargs: Keyword arguments to pass to the reader's `load` method. Returns: DatasetDict of loaded datasets
def read(self, nodes=None, **kwargs): if nodes is None: required_nodes = self.wishlist - set(self.datasets.keys()) nodes = self.dep_tree.leaves(nodes=required_nodes) return self._read_datasets(nodes, **kwargs)
293,677
Unload all unneeded datasets. Datasets are considered unneeded if they weren't directly requested or added to the Scene by the user or they are no longer needed to generate composites that have yet to be generated. Args: keepables (iterable): DatasetIDs to keep whether they are needed or not.
def unload(self, keepables=None): to_del = [ds_id for ds_id, projectable in self.datasets.items() if ds_id not in self.wishlist and (not keepables or ds_id not in keepables)] for ds_id in to_del: LOG.debug("Unloading dataset: %r", ds_id) del self.datasets[ds_id]
293,680
Merge all xr.DataArrays of a scene to a xr.DataSet. Parameters: datasets (list): List of products to include in the :class:`xarray.Dataset` Returns: :class:`xarray.Dataset`
def to_xarray_dataset(self, datasets=None): if datasets is not None: datasets = [self[ds] for ds in datasets] else: datasets = [self.datasets.get(ds) for ds in self.wishlist] datasets = [ds for ds in datasets if ds is not None] ds_dict = {i.attrs['name']: i.rename(i.attrs['name']) for i in datasets if i.attrs.get('area') is not None} mdata = combine_metadata(*tuple(i.attrs for i in datasets)) if mdata.get('area') is None or not isinstance(mdata['area'], SwathDefinition): # either don't know what the area is or we have an AreaDefinition ds = xr.merge(ds_dict.values()) else: # we have a swath definition and should use lon/lat values lons, lats = mdata['area'].get_lonlats() if not isinstance(lons, DataArray): lons = DataArray(lons, dims=('y', 'x')) lats = DataArray(lats, dims=('y', 'x')) # ds_dict['longitude'] = lons # ds_dict['latitude'] = lats ds = xr.Dataset(ds_dict, coords={"latitude": (["y", "x"], lats), "longitude": (["y", "x"], lons)}) ds.attrs = mdata return ds
293,687
Obtain GCPs and construct latitude and longitude arrays. Args: band (gdal band): Measurement band which comes with GCP's array_shape (tuple) : The size of the data array Returns: coordinates (tuple): A tuple with longitude and latitude arrays
def get_lonlatalts(self): band = self.filehandle (xpoints, ypoints), (gcp_lons, gcp_lats, gcp_alts), (gcps, crs) = self.get_gcps() # FIXME: do interpolation on cartesion coordinates if the area is # problematic. longitudes = interpolate_xarray(xpoints, ypoints, gcp_lons, band.shape) latitudes = interpolate_xarray(xpoints, ypoints, gcp_lats, band.shape) altitudes = interpolate_xarray(xpoints, ypoints, gcp_alts, band.shape) longitudes.attrs['gcps'] = gcps longitudes.attrs['crs'] = crs latitudes.attrs['gcps'] = gcps latitudes.attrs['crs'] = crs altitudes.attrs['gcps'] = gcps altitudes.attrs['crs'] = crs return longitudes, latitudes, altitudes
293,706
Read GCP from the GDAL band. Args: band (gdal band): Measurement band which comes with GCP's coordinates (tuple): A tuple with longitude and latitude arrays Returns: points (tuple): Pixel and Line indices 1d arrays gcp_coords (tuple): longitude and latitude 1d arrays
def get_gcps(self): gcps = self.filehandle.gcps gcp_array = np.array([(p.row, p.col, p.x, p.y, p.z) for p in gcps[0]]) ypoints = np.unique(gcp_array[:, 0]) xpoints = np.unique(gcp_array[:, 1]) gcp_lons = gcp_array[:, 2].reshape(ypoints.shape[0], xpoints.shape[0]) gcp_lats = gcp_array[:, 3].reshape(ypoints.shape[0], xpoints.shape[0]) gcp_alts = gcp_array[:, 4].reshape(ypoints.shape[0], xpoints.shape[0]) return (xpoints, ypoints), (gcp_lons, gcp_lats, gcp_alts), gcps
293,707
Initialize file reader and adjust geolocation preferences. Args: config_files (iterable): yaml config files passed to base class use_tc (boolean): If `True` use the terrain corrected files. If `False`, switch to non-TC files. If `None` (default), use TC if available, non-TC otherwise.
def __init__(self, config_files, use_tc=None, **kwargs): super(VIIRSSDRReader, self).__init__(config_files, **kwargs) self.use_tc = use_tc
293,728
Flatten tree structure to a one level dictionary. Args: d (dict, optional): output dictionary to update Returns: dict: Node.name -> Node. The returned dictionary includes the current Node and all its children.
def flatten(self, d=None): if d is None: d = {} if self.name is not None: d[self.name] = self for child in self.children: child.flatten(d=d) return d
293,749
Get the leaves of the tree starting at this root. Args: nodes (iterable): limit leaves for these node names unique: only include individual leaf nodes once Returns: list of leaf nodes
def leaves(self, nodes=None, unique=True): if nodes is None: return super(DependencyTree, self).leaves(unique=unique) res = list() for child_id in nodes: for sub_child in self._all_nodes[child_id].leaves(unique=unique): if not unique or sub_child not in res: res.append(sub_child) return res
293,756
Find the dependencies for *dataset_key*. Args: dataset_key (str, float, DatasetID): Dataset identifier to locate and find any additional dependencies for. **dfilter (dict): Additional filter parameters. See `satpy.readers.get_key` for more details.
def _find_dependencies(self, dataset_key, **dfilter): # 0 check if the *exact* dataset is already loaded try: node = self.getitem(dataset_key) LOG.trace("Found exact dataset already loaded: {}".format(node.name)) return node, set() except KeyError: # exact dataset isn't loaded, let's load it below LOG.trace("Exact dataset {} isn't loaded, will try reader...".format(dataset_key)) # 1 try to get *best* dataset from reader try: node = self._find_reader_dataset(dataset_key, **dfilter) except TooManyResults: LOG.warning("Too many possible datasets to load for {}".format(dataset_key)) return None, set([dataset_key]) if node is not None: LOG.trace("Found reader provided dataset:\n\tRequested: {}\n\tFound: {}".format(dataset_key, node.name)) return node, set() LOG.trace("Could not find dataset in reader: {}".format(dataset_key)) # 2 try to find a composite by name (any version of it is good enough) try: # assume that there is no such thing as a "better" composite # version so if we find any DatasetIDs already loaded then # we want to use them node = self[dataset_key] LOG.trace("Composite already loaded:\n\tRequested: {}\n\tFound: {}".format(dataset_key, node.name)) return node, set() except KeyError: # composite hasn't been loaded yet, let's load it below LOG.trace("Composite hasn't been loaded yet, will load: {}".format(dataset_key)) # 3 try to find a composite that matches try: node, unknowns = self._find_compositor(dataset_key, **dfilter) LOG.trace("Found composite:\n\tRequested: {}\n\tFound: {}".format(dataset_key, node and node.name)) except KeyError: node = None unknowns = set([dataset_key]) LOG.trace("Composite not found: {}".format(dataset_key)) return node, unknowns
293,766
Create the dependency tree. Args: dataset_keys (iterable): Strings or DatasetIDs to find dependencies for **dfilter (dict): Additional filter parameters. See `satpy.readers.get_key` for more details. Returns: (Node, set): Root node of the dependency tree and a set of unknown datasets
def find_dependencies(self, dataset_keys, **dfilter): unknown_datasets = set() for key in dataset_keys.copy(): n, unknowns = self._find_dependencies(key, **dfilter) dataset_keys.discard(key) # remove old non-DatasetID if n is not None: dataset_keys.add(n.name) # add equivalent DatasetID if unknowns: unknown_datasets.update(unknowns) continue self.add_child(self, n) return unknown_datasets
293,767