text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Adds an enumeration value. <END_TASK> <USER_TASK:> Description: def AddValue(self, name, number, aliases=None, description=None): """Adds an enumeration value. Args: name (str): name. number (int): number. aliases (Optional[list[str]]): aliases. description (Optional[str]): description. Raises: KeyError: if the enumeration value already exists. """
if name in self.values_per_name: raise KeyError('Value with name: {0:s} already exists.'.format(name)) if number in self.values_per_number: raise KeyError('Value with number: {0!s} already exists.'.format(number)) for alias in aliases or []: if alias in self.values_per_alias: raise KeyError('Value with alias: {0:s} already exists.'.format(alias)) enumeration_value = EnumerationValue( name, number, aliases=aliases, description=description) self.values.append(enumeration_value) self.values_per_name[name] = enumeration_value self.values_per_number[number] = enumeration_value for alias in aliases or []: self.values_per_alias[alias] = enumeration_value
<SYSTEM_TASK:> Reads a constant data type definition. <END_TASK> <USER_TASK:> Description: def _ReadConstantDataTypeDefinition( self, definitions_registry, definition_values, definition_name, is_member=False): """Reads a constant data type definition. Args: definitions_registry (DataTypeDefinitionsRegistry): data type definitions registry. definition_values (dict[str, object]): definition values. definition_name (str): name of the definition. is_member (Optional[bool]): True if the data type definition is a member data type definition. Returns: ConstantDataTypeDefinition: constant data type definition. Raises: DefinitionReaderError: if the definitions values are missing or if the format is incorrect. """
if is_member: error_message = 'data type not supported as member' raise errors.DefinitionReaderError(definition_name, error_message) value = definition_values.get('value', None) if value is None: error_message = 'missing value' raise errors.DefinitionReaderError(definition_name, error_message) definition_object = self._ReadSemanticDataTypeDefinition( definitions_registry, definition_values, data_types.ConstantDefinition, definition_name, self._SUPPORTED_DEFINITION_VALUES_CONSTANT) definition_object.value = value return definition_object
<SYSTEM_TASK:> Reads a data type definition with members. <END_TASK> <USER_TASK:> Description: def _ReadDataTypeDefinitionWithMembers( self, definitions_registry, definition_values, data_type_definition_class, definition_name, supports_conditions=False): """Reads a data type definition with members. Args: definitions_registry (DataTypeDefinitionsRegistry): data type definitions registry. definition_values (dict[str, object]): definition values. data_type_definition_class (str): data type definition class. definition_name (str): name of the definition. supports_conditions (Optional[bool]): True if conditions are supported by the data type definition. Returns: StringDefinition: string data type definition. Raises: DefinitionReaderError: if the definitions values are missing or if the format is incorrect. """
members = definition_values.get('members', None) if not members: error_message = 'missing members' raise errors.DefinitionReaderError(definition_name, error_message) supported_definition_values = ( self._SUPPORTED_DEFINITION_VALUES_STORAGE_DATA_TYPE_WITH_MEMBERS) definition_object = self._ReadDataTypeDefinition( definitions_registry, definition_values, data_type_definition_class, definition_name, supported_definition_values) attributes = definition_values.get('attributes', None) if attributes: unsupported_attributes = set(attributes.keys()).difference( self._SUPPORTED_ATTRIBUTES_STORAGE_DATA_TYPE) if unsupported_attributes: error_message = 'unsupported attributes: {0:s}'.format( ', '.join(unsupported_attributes)) raise errors.DefinitionReaderError(definition_name, error_message) byte_order = attributes.get('byte_order', definitions.BYTE_ORDER_NATIVE) if byte_order not in definitions.BYTE_ORDERS: error_message = 'unsupported byte-order attribute: {0!s}'.format( byte_order) raise errors.DefinitionReaderError(definition_name, error_message) definition_object.byte_order = byte_order for member in members: section = member.get('section', None) if section: member_section_definition = data_types.MemberSectionDefinition(section) definition_object.AddSectionDefinition(member_section_definition) else: member_data_type_definition = self._ReadMemberDataTypeDefinitionMember( definitions_registry, member, definition_object.name, supports_conditions=supports_conditions) definition_object.AddMemberDefinition(member_data_type_definition) return definition_object
<SYSTEM_TASK:> Reads an enumeration data type definition. <END_TASK> <USER_TASK:> Description: def _ReadEnumerationDataTypeDefinition( self, definitions_registry, definition_values, definition_name, is_member=False): """Reads an enumeration data type definition. Args: definitions_registry (DataTypeDefinitionsRegistry): data type definitions registry. definition_values (dict[str, object]): definition values. definition_name (str): name of the definition. is_member (Optional[bool]): True if the data type definition is a member data type definition. Returns: EnumerationDataTypeDefinition: enumeration data type definition. Raises: DefinitionReaderError: if the definitions values are missing or if the format is incorrect. """
if is_member: error_message = 'data type not supported as member' raise errors.DefinitionReaderError(definition_name, error_message) values = definition_values.get('values') if not values: error_message = 'missing values' raise errors.DefinitionReaderError(definition_name, error_message) definition_object = self._ReadSemanticDataTypeDefinition( definitions_registry, definition_values, data_types.EnumerationDefinition, definition_name, self._SUPPORTED_DEFINITION_VALUES_ENUMERATION) last_name = None for enumeration_value in values: aliases = enumeration_value.get('aliases', None) description = enumeration_value.get('description', None) name = enumeration_value.get('name', None) number = enumeration_value.get('number', None) if not name or number is None: if last_name: error_location = 'after: {0:s}'.format(last_name) else: error_location = 'at start' error_message = '{0:s} missing name or number'.format(error_location) raise errors.DefinitionReaderError(definition_name, error_message) else: try: definition_object.AddValue( name, number, aliases=aliases, description=description) except KeyError as exception: error_message = '{0!s}'.format(exception) raise errors.DefinitionReaderError(definition_name, error_message) last_name = name return definition_object
<SYSTEM_TASK:> Reads an element sequence data type definition. <END_TASK> <USER_TASK:> Description: def _ReadElementSequenceDataTypeDefinition( self, definitions_registry, definition_values, data_type_definition_class, definition_name, supported_definition_values): """Reads an element sequence data type definition. Args: definitions_registry (DataTypeDefinitionsRegistry): data type definitions registry. definition_values (dict[str, object]): definition values. data_type_definition_class (str): data type definition class. definition_name (str): name of the definition. supported_definition_values (set[str]): names of the supported definition values. Returns: SequenceDefinition: sequence data type definition. Raises: DefinitionReaderError: if the definitions values are missing or if the format is incorrect. """
unsupported_definition_values = set(definition_values.keys()).difference( supported_definition_values) if unsupported_definition_values: error_message = 'unsupported definition values: {0:s}'.format( ', '.join(unsupported_definition_values)) raise errors.DefinitionReaderError(definition_name, error_message) element_data_type = definition_values.get('element_data_type', None) if not element_data_type: error_message = 'missing element data type' raise errors.DefinitionReaderError(definition_name, error_message) elements_data_size = definition_values.get('elements_data_size', None) elements_terminator = definition_values.get('elements_terminator', None) number_of_elements = definition_values.get('number_of_elements', None) size_values = (elements_data_size, elements_terminator, number_of_elements) size_values = [value for value in size_values if value is not None] if not size_values: error_message = ( 'missing element data size, elements terminator and number of ' 'elements') raise errors.DefinitionReaderError(definition_name, error_message) if len(size_values) > 1: error_message = ( 'element data size, elements terminator and number of elements ' 'not allowed to be set at the same time') raise errors.DefinitionReaderError(definition_name, error_message) element_data_type_definition = definitions_registry.GetDefinitionByName( element_data_type) if not element_data_type_definition: error_message = 'undefined element data type: {0:s}.'.format( element_data_type) raise errors.DefinitionReaderError(definition_name, error_message) element_byte_size = element_data_type_definition.GetByteSize() element_type_indicator = element_data_type_definition.TYPE_INDICATOR if not element_byte_size and element_type_indicator != ( definitions.TYPE_INDICATOR_STRING): error_message = ( 'unsupported variable size element data type: {0:s}'.format( element_data_type)) raise errors.DefinitionReaderError(definition_name, error_message) aliases = definition_values.get('aliases', None) description = definition_values.get('description', None) urls = definition_values.get('urls', None) definition_object = data_type_definition_class( definition_name, element_data_type_definition, aliases=aliases, data_type=element_data_type, description=description, urls=urls) if elements_data_size is not None: try: definition_object.elements_data_size = int(elements_data_size) except ValueError: definition_object.elements_data_size_expression = elements_data_size elif elements_terminator is not None: if isinstance(elements_terminator, py2to3.UNICODE_TYPE): elements_terminator = elements_terminator.encode('ascii') definition_object.elements_terminator = elements_terminator elif number_of_elements is not None: try: definition_object.number_of_elements = int(number_of_elements) except ValueError: definition_object.number_of_elements_expression = number_of_elements return definition_object
<SYSTEM_TASK:> Reads a fixed-size data type definition. <END_TASK> <USER_TASK:> Description: def _ReadFixedSizeDataTypeDefinition( self, definitions_registry, definition_values, data_type_definition_class, definition_name, supported_attributes, default_size=definitions.SIZE_NATIVE, default_units='bytes', is_member=False, supported_size_values=None): """Reads a fixed-size data type definition. Args: definitions_registry (DataTypeDefinitionsRegistry): data type definitions registry. definition_values (dict[str, object]): definition values. data_type_definition_class (str): data type definition class. definition_name (str): name of the definition. supported_attributes (set[str]): names of the supported attributes. default_size (Optional[int]): default size. default_units (Optional[str]): default units. is_member (Optional[bool]): True if the data type definition is a member data type definition. supported_size_values (Optional[tuple[int]]): supported size values, or None if not set. Returns: FixedSizeDataTypeDefinition: fixed-size data type definition. Raises: DefinitionReaderError: if the definitions values are missing or if the format is incorrect. """
definition_object = self._ReadStorageDataTypeDefinition( definitions_registry, definition_values, data_type_definition_class, definition_name, supported_attributes, is_member=is_member) attributes = definition_values.get('attributes', None) if attributes: size = attributes.get('size', default_size) if size != definitions.SIZE_NATIVE: try: int(size) except ValueError: error_message = 'unuspported size attribute: {0!s}'.format(size) raise errors.DefinitionReaderError(definition_name, error_message) if supported_size_values and size not in supported_size_values: error_message = 'unuspported size value: {0!s}'.format(size) raise errors.DefinitionReaderError(definition_name, error_message) definition_object.size = size definition_object.units = attributes.get('units', default_units) return definition_object
<SYSTEM_TASK:> Reads a format data type definition. <END_TASK> <USER_TASK:> Description: def _ReadFormatDataTypeDefinition( self, definitions_registry, definition_values, definition_name, is_member=False): """Reads a format data type definition. Args: definitions_registry (DataTypeDefinitionsRegistry): data type definitions registry. definition_values (dict[str, object]): definition values. definition_name (str): name of the definition. is_member (Optional[bool]): True if the data type definition is a member data type definition. Returns: FormatDefinition: format definition. Raises: DefinitionReaderError: if the definitions values are missing or if the format is incorrect. """
if is_member: error_message = 'data type not supported as member' raise errors.DefinitionReaderError(definition_name, error_message) definition_object = self._ReadLayoutDataTypeDefinition( definitions_registry, definition_values, data_types.FormatDefinition, definition_name, self._SUPPORTED_DEFINITION_VALUES_FORMAT) # TODO: disabled for now # layout = definition_values.get('layout', None) # if layout is None: # error_message = 'missing layout' # raise errors.DefinitionReaderError(definition_name, error_message) definition_object.metadata = definition_values.get('metadata', {}) attributes = definition_values.get('attributes', None) if attributes: unsupported_attributes = set(attributes.keys()).difference( self._SUPPORTED_ATTRIBUTES_FORMAT) if unsupported_attributes: error_message = 'unsupported attributes: {0:s}'.format( ', '.join(unsupported_attributes)) raise errors.DefinitionReaderError(definition_name, error_message) byte_order = attributes.get('byte_order', definitions.BYTE_ORDER_NATIVE) if byte_order not in definitions.BYTE_ORDERS: error_message = 'unsupported byte-order attribute: {0!s}'.format( byte_order) raise errors.DefinitionReaderError(definition_name, error_message) definition_object.byte_order = byte_order return definition_object
<SYSTEM_TASK:> Reads an integer data type definition. <END_TASK> <USER_TASK:> Description: def _ReadIntegerDataTypeDefinition( self, definitions_registry, definition_values, definition_name, is_member=False): """Reads an integer data type definition. Args: definitions_registry (DataTypeDefinitionsRegistry): data type definitions registry. definition_values (dict[str, object]): definition values. definition_name (str): name of the definition. is_member (Optional[bool]): True if the data type definition is a member data type definition. Returns: IntegerDataTypeDefinition: integer data type definition. Raises: DefinitionReaderError: if the definitions values are missing or if the format is incorrect. """
definition_object = self._ReadFixedSizeDataTypeDefinition( definitions_registry, definition_values, data_types.IntegerDefinition, definition_name, self._SUPPORTED_ATTRIBUTES_INTEGER, is_member=is_member, supported_size_values=(1, 2, 4, 8)) attributes = definition_values.get('attributes', None) if attributes: format_attribute = attributes.get('format', definitions.FORMAT_SIGNED) if format_attribute not in self._INTEGER_FORMAT_ATTRIBUTES: error_message = 'unsupported format attribute: {0!s}'.format( format_attribute) raise errors.DefinitionReaderError(definition_name, error_message) definition_object.format = format_attribute return definition_object
<SYSTEM_TASK:> Reads a padding data type definition. <END_TASK> <USER_TASK:> Description: def _ReadPaddingDataTypeDefinition( self, definitions_registry, definition_values, definition_name, is_member=False): """Reads a padding data type definition. Args: definitions_registry (DataTypeDefinitionsRegistry): data type definitions registry. definition_values (dict[str, object]): definition values. definition_name (str): name of the definition. is_member (Optional[bool]): True if the data type definition is a member data type definition. Returns: PaddingtDefinition: padding definition. Raises: DefinitionReaderError: if the definitions values are missing or if the format is incorrect. """
if not is_member: error_message = 'data type only supported as member' raise errors.DefinitionReaderError(definition_name, error_message) definition_object = self._ReadDataTypeDefinition( definitions_registry, definition_values, data_types.PaddingDefinition, definition_name, self._SUPPORTED_DEFINITION_VALUES_PADDING) alignment_size = definition_values.get('alignment_size', None) if not alignment_size: error_message = 'missing alignment_size' raise errors.DefinitionReaderError(definition_name, error_message) try: int(alignment_size) except ValueError: error_message = 'unuspported alignment size attribute: {0!s}'.format( alignment_size) raise errors.DefinitionReaderError(definition_name, error_message) if alignment_size not in (2, 4, 8, 16): error_message = 'unuspported alignment size value: {0!s}'.format( alignment_size) raise errors.DefinitionReaderError(definition_name, error_message) definition_object.alignment_size = alignment_size return definition_object
<SYSTEM_TASK:> Reads a sequence data type definition. <END_TASK> <USER_TASK:> Description: def _ReadSequenceDataTypeDefinition( self, definitions_registry, definition_values, definition_name, is_member=False): """Reads a sequence data type definition. Args: definitions_registry (DataTypeDefinitionsRegistry): data type definitions registry. definition_values (dict[str, object]): definition values. definition_name (str): name of the definition. is_member (Optional[bool]): True if the data type definition is a member data type definition. Returns: SequenceDefinition: sequence data type definition. Raises: DefinitionReaderError: if the definitions values are missing or if the format is incorrect. """
if is_member: supported_definition_values = ( self._SUPPORTED_DEFINITION_VALUES_ELEMENTS_MEMBER_DATA_TYPE) else: supported_definition_values = ( self._SUPPORTED_DEFINITION_VALUES_ELEMENTS_DATA_TYPE) return self._ReadElementSequenceDataTypeDefinition( definitions_registry, definition_values, data_types.SequenceDefinition, definition_name, supported_definition_values)
<SYSTEM_TASK:> Reads a storage data type definition. <END_TASK> <USER_TASK:> Description: def _ReadStorageDataTypeDefinition( self, definitions_registry, definition_values, data_type_definition_class, definition_name, supported_attributes, is_member=False): """Reads a storage data type definition. Args: definitions_registry (DataTypeDefinitionsRegistry): data type definitions registry. definition_values (dict[str, object]): definition values. data_type_definition_class (str): data type definition class. definition_name (str): name of the definition. supported_attributes (set[str]): names of the supported attributes. is_member (Optional[bool]): True if the data type definition is a member data type definition. Returns: StorageDataTypeDefinition: storage data type definition. Raises: DefinitionReaderError: if the definitions values are missing or if the format is incorrect. """
if is_member: supported_definition_values = ( self._SUPPORTED_DEFINITION_VALUES_MEMBER_DATA_TYPE) else: supported_definition_values = ( self._SUPPORTED_DEFINITION_VALUES_STORAGE_DATA_TYPE) definition_object = self._ReadDataTypeDefinition( definitions_registry, definition_values, data_type_definition_class, definition_name, supported_definition_values) attributes = definition_values.get('attributes', None) if attributes: unsupported_attributes = set(attributes.keys()).difference( supported_attributes) if unsupported_attributes: error_message = 'unsupported attributes: {0:s}'.format( ', '.join(unsupported_attributes)) raise errors.DefinitionReaderError(definition_name, error_message) byte_order = attributes.get('byte_order', definitions.BYTE_ORDER_NATIVE) if byte_order not in definitions.BYTE_ORDERS: error_message = 'unsupported byte-order attribute: {0!s}'.format( byte_order) raise errors.DefinitionReaderError(definition_name, error_message) definition_object.byte_order = byte_order return definition_object
<SYSTEM_TASK:> Reads a stream data type definition. <END_TASK> <USER_TASK:> Description: def _ReadStreamDataTypeDefinition( self, definitions_registry, definition_values, definition_name, is_member=False): """Reads a stream data type definition. Args: definitions_registry (DataTypeDefinitionsRegistry): data type definitions registry. definition_values (dict[str, object]): definition values. definition_name (str): name of the definition. is_member (Optional[bool]): True if the data type definition is a member data type definition. Returns: StreamDefinition: stream data type definition. Raises: DefinitionReaderError: if the definitions values are missing or if the format is incorrect. """
if is_member: supported_definition_values = ( self._SUPPORTED_DEFINITION_VALUES_ELEMENTS_MEMBER_DATA_TYPE) else: supported_definition_values = ( self._SUPPORTED_DEFINITION_VALUES_ELEMENTS_DATA_TYPE) return self._ReadElementSequenceDataTypeDefinition( definitions_registry, definition_values, data_types.StreamDefinition, definition_name, supported_definition_values)
<SYSTEM_TASK:> Reads a string data type definition. <END_TASK> <USER_TASK:> Description: def _ReadStringDataTypeDefinition( self, definitions_registry, definition_values, definition_name, is_member=False): """Reads a string data type definition. Args: definitions_registry (DataTypeDefinitionsRegistry): data type definitions registry. definition_values (dict[str, object]): definition values. definition_name (str): name of the definition. is_member (Optional[bool]): True if the data type definition is a member data type definition. Returns: StringDefinition: string data type definition. Raises: DefinitionReaderError: if the definitions values are missing or if the format is incorrect. """
if is_member: supported_definition_values = ( self._SUPPORTED_DEFINITION_VALUES_STRING_MEMBER) else: supported_definition_values = self._SUPPORTED_DEFINITION_VALUES_STRING definition_object = self._ReadElementSequenceDataTypeDefinition( definitions_registry, definition_values, data_types.StringDefinition, definition_name, supported_definition_values) encoding = definition_values.get('encoding', None) if not encoding: error_message = 'missing encoding' raise errors.DefinitionReaderError(definition_name, error_message) definition_object.encoding = encoding return definition_object
<SYSTEM_TASK:> Reads a structure family data type definition. <END_TASK> <USER_TASK:> Description: def _ReadStructureFamilyDataTypeDefinition( self, definitions_registry, definition_values, definition_name, is_member=False): """Reads a structure family data type definition. Args: definitions_registry (DataTypeDefinitionsRegistry): data type definitions registry. definition_values (dict[str, object]): definition values. definition_name (str): name of the definition. is_member (Optional[bool]): True if the data type definition is a member data type definition. Returns: StructureDefinition: structure data type definition. Raises: DefinitionReaderError: if the definitions values are missing or if the format is incorrect. """
if is_member: error_message = 'data type not supported as member' raise errors.DefinitionReaderError(definition_name, error_message) definition_object = self._ReadLayoutDataTypeDefinition( definitions_registry, definition_values, data_types.StructureFamilyDefinition, definition_name, self._SUPPORTED_DEFINITION_VALUES_STRUCTURE_FAMILY) runtime = definition_values.get('runtime', None) if not runtime: error_message = 'missing runtime' raise errors.DefinitionReaderError(definition_name, error_message) runtime_data_type_definition = definitions_registry.GetDefinitionByName( runtime) if not runtime_data_type_definition: error_message = 'undefined runtime: {0:s}.'.format(runtime) raise errors.DefinitionReaderError(definition_name, error_message) if runtime_data_type_definition.family_definition: error_message = 'runtime: {0:s} already part of a family.'.format(runtime) raise errors.DefinitionReaderError(definition_name, error_message) definition_object.AddRuntimeDefinition(runtime_data_type_definition) members = definition_values.get('members', None) if not members: error_message = 'missing members' raise errors.DefinitionReaderError(definition_name, error_message) for member in members: member_data_type_definition = definitions_registry.GetDefinitionByName( member) if not member_data_type_definition: error_message = 'undefined member: {0:s}.'.format(member) raise errors.DefinitionReaderError(definition_name, error_message) if member_data_type_definition.family_definition: error_message = 'member: {0:s} already part of a family.'.format(member) raise errors.DefinitionReaderError(definition_name, error_message) definition_object.AddMemberDefinition(member_data_type_definition) return definition_object
<SYSTEM_TASK:> Reads an union data type definition. <END_TASK> <USER_TASK:> Description: def _ReadUnionDataTypeDefinition( self, definitions_registry, definition_values, definition_name, is_member=False): """Reads an union data type definition. Args: definitions_registry (DataTypeDefinitionsRegistry): data type definitions registry. definition_values (dict[str, object]): definition values. definition_name (str): name of the definition. is_member (Optional[bool]): True if the data type definition is a member data type definition. Returns: UnionDefinition: union data type definition. Raises: DefinitionReaderError: if the definitions values are missing or if the format is incorrect. """
return self._ReadDataTypeDefinitionWithMembers( definitions_registry, definition_values, data_types.UnionDefinition, definition_name, supports_conditions=False)
<SYSTEM_TASK:> Reads data type definitions from a file into the registry. <END_TASK> <USER_TASK:> Description: def ReadFile(self, definitions_registry, path): """Reads data type definitions from a file into the registry. Args: definitions_registry (DataTypeDefinitionsRegistry): data type definitions registry. path (str): path of the file to read from. """
with open(path, 'r') as file_object: self.ReadFileObject(definitions_registry, file_object)
<SYSTEM_TASK:> Reads data type definitions from a file-like object into the registry. <END_TASK> <USER_TASK:> Description: def ReadFileObject(self, definitions_registry, file_object): """Reads data type definitions from a file-like object into the registry. Args: definitions_registry (DataTypeDefinitionsRegistry): data type definitions registry. file_object (file): file-like object to read from. Raises: FormatError: if the definitions values are missing or if the format is incorrect. """
last_definition_object = None error_location = None error_message = None try: yaml_generator = yaml.safe_load_all(file_object) for yaml_definition in yaml_generator: definition_object = self._ReadDefinition( definitions_registry, yaml_definition) if not definition_object: error_location = self._GetFormatErrorLocation( yaml_definition, last_definition_object) error_message = '{0:s} Missing definition object.'.format( error_location) raise errors.FormatError(error_message) definitions_registry.RegisterDefinition(definition_object) last_definition_object = definition_object except errors.DefinitionReaderError as exception: error_message = 'in: {0:s} {1:s}'.format( exception.name or '<NAMELESS>', exception.message) raise errors.FormatError(error_message) except (yaml.reader.ReaderError, yaml.scanner.ScannerError) as exception: error_location = self._GetFormatErrorLocation({}, last_definition_object) error_message = '{0:s} {1!s}'.format(error_location, exception) raise errors.FormatError(error_message)
<SYSTEM_TASK:> Reads the organization given by identifier from HDX and returns Organization object <END_TASK> <USER_TASK:> Description: def read_from_hdx(identifier, configuration=None): # type: (str, Optional[Configuration]) -> Optional['Organization'] """Reads the organization given by identifier from HDX and returns Organization object Args: identifier (str): Identifier of organization configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. Returns: Optional[Organization]: Organization object if successful read, None if not """
organization = Organization(configuration=configuration) result = organization._load_from_hdx('organization', identifier) if result: return organization return None
<SYSTEM_TASK:> Returns the organization's users. <END_TASK> <USER_TASK:> Description: def get_users(self, capacity=None): # type: (Optional[str]) -> List[User] """Returns the organization's users. Args: capacity (Optional[str]): Filter by capacity eg. member, admin. Defaults to None. Returns: List[User]: Organization's users. """
users = list() usersdicts = self.data.get('users') if usersdicts is not None: for userdata in usersdicts: if capacity is not None and userdata['capacity'] != capacity: continue id = userdata.get('id') if id is None: id = userdata['name'] user = hdx.data.user.User.read_from_hdx(id, configuration=self.configuration) user['capacity'] = userdata['capacity'] users.append(user) return users
<SYSTEM_TASK:> Get list of datasets in organization <END_TASK> <USER_TASK:> Description: def get_datasets(self, query='*:*', **kwargs): # type: (str, Any) -> List[hdx.data.dataset.Dataset] """Get list of datasets in organization Args: query (str): Restrict datasets returned to this query (in Solr format). Defaults to '*:*'. **kwargs: See below sort (string): Sorting of the search results. Defaults to 'relevance asc, metadata_modified desc'. rows (int): Number of matching rows to return. Defaults to all datasets (sys.maxsize). start (int): Offset in the complete result for where the set of returned datasets should begin facet (string): Whether to enable faceted results. Default to True. facet.mincount (int): Minimum counts for facet fields should be included in the results facet.limit (int): Maximum number of values the facet fields return (- = unlimited). Defaults to 50. facet.field (List[str]): Fields to facet upon. Default is empty. use_default_schema (bool): Use default package schema instead of custom schema. Defaults to False. Returns: List[Dataset]: List of datasets in organization """
return hdx.data.dataset.Dataset.search_in_hdx(query=query, configuration=self.configuration, fq='organization:%s' % self.data['name'], **kwargs)
<SYSTEM_TASK:> Get all organization names in HDX <END_TASK> <USER_TASK:> Description: def get_all_organization_names(configuration=None, **kwargs): # type: (Optional[Configuration], Any) -> List[str] """Get all organization names in HDX Args: configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. **kwargs: See below sort (str): Sort the search results according to field name and sort-order. Allowed fields are ‘name’, ‘package_count’ and ‘title’. Defaults to 'name asc'. organizations (List[str]): List of names of the groups to return. all_fields (bool): Return group dictionaries instead of just names. Only core fields are returned - get some more using the include_* options. Defaults to False. include_extras (bool): If all_fields, include the group extra fields. Defaults to False. include_tags (bool): If all_fields, include the group tags. Defaults to False. include_groups: If all_fields, include the groups the groups are in. Defaults to False. Returns: List[str]: List of all organization names in HDX """
organization = Organization(configuration=configuration) organization['id'] = 'all organizations' # only for error message if produced return organization._write_to_hdx('list', kwargs, 'id')
<SYSTEM_TASK:> Makes a read call to HDX passing in given parameter. <END_TASK> <USER_TASK:> Description: def _read_from_hdx(self, object_type, value, fieldname='id', action=None, **kwargs): # type: (str, str, str, Optional[str], Any) -> Tuple[bool, Union[Dict, str]] """Makes a read call to HDX passing in given parameter. Args: object_type (str): Description of HDX object type (for messages) value (str): Value of HDX field fieldname (str): HDX field name. Defaults to id. action (Optional[str]): Replacement CKAN action url to use. Defaults to None. **kwargs: Other fields to pass to CKAN. Returns: Tuple[bool, Union[Dict, str]]: (True/False, HDX object metadata/Error) """
if not fieldname: raise HDXError('Empty %s field name!' % object_type) if action is None: action = self.actions()['show'] data = {fieldname: value} data.update(kwargs) try: result = self.configuration.call_remoteckan(action, data) return True, result except NotFound: return False, '%s=%s: not found!' % (fieldname, value) except Exception as e: raisefrom(HDXError, 'Failed when trying to read: %s=%s! (POST)' % (fieldname, value), e)
<SYSTEM_TASK:> Helper method to load the HDX object given by identifier from HDX <END_TASK> <USER_TASK:> Description: def _load_from_hdx(self, object_type, id_field): # type: (str, str) -> bool """Helper method to load the HDX object given by identifier from HDX Args: object_type (str): Description of HDX object type (for messages) id_field (str): HDX object identifier Returns: bool: True if loaded, False if not """
success, result = self._read_from_hdx(object_type, id_field) if success: self.old_data = self.data self.data = result return True logger.debug(result) return False
<SYSTEM_TASK:> Check metadata exists and contains HDX object identifier, and if so load HDX object <END_TASK> <USER_TASK:> Description: def _check_load_existing_object(self, object_type, id_field_name, operation='update'): # type: (str, str, str) -> None """Check metadata exists and contains HDX object identifier, and if so load HDX object Args: object_type (str): Description of HDX object type (for messages) id_field_name (str): Name of field containing HDX object identifier operation (str): Operation to report if error. Defaults to update. Returns: None """
self._check_existing_object(object_type, id_field_name) if not self._load_from_hdx(object_type, self.data[id_field_name]): raise HDXError('No existing %s to %s!' % (object_type, operation))
<SYSTEM_TASK:> Helper method to check that metadata for HDX object is complete <END_TASK> <USER_TASK:> Description: def _check_required_fields(self, object_type, ignore_fields): # type: (str, List[str]) -> None """Helper method to check that metadata for HDX object is complete Args: ignore_fields (List[str]): Any fields to ignore in the check Returns: None """
for field in self.configuration[object_type]['required_fields']: if field not in self.data and field not in ignore_fields: raise HDXError('Field %s is missing in %s!' % (field, object_type))
<SYSTEM_TASK:> Helper method to check if HDX object exists and update it <END_TASK> <USER_TASK:> Description: def _merge_hdx_update(self, object_type, id_field_name, file_to_upload=None, **kwargs): # type: (str, str, Optional[str], Any) -> None """Helper method to check if HDX object exists and update it Args: object_type (str): Description of HDX object type (for messages) id_field_name (str): Name of field containing HDX object identifier file_to_upload (Optional[str]): File to upload to HDX **kwargs: See below operation (string): Operation to perform eg. patch. Defaults to update. Returns: None """
merge_two_dictionaries(self.data, self.old_data) if 'batch_mode' in kwargs: # Whether or not CKAN should change groupings of datasets on /datasets page self.data['batch_mode'] = kwargs['batch_mode'] if 'skip_validation' in kwargs: # Whether or not CKAN should perform validation steps (checking fields present) self.data['skip_validation'] = kwargs['skip_validation'] ignore_field = self.configuration['%s' % object_type].get('ignore_on_update') self.check_required_fields(ignore_fields=[ignore_field]) operation = kwargs.get('operation', 'update') self._save_to_hdx(operation, id_field_name, file_to_upload)
<SYSTEM_TASK:> Helper method to check if HDX object exists in HDX and if so, update it <END_TASK> <USER_TASK:> Description: def _update_in_hdx(self, object_type, id_field_name, file_to_upload=None, **kwargs): # type: (str, str, Optional[str], Any) -> None """Helper method to check if HDX object exists in HDX and if so, update it Args: object_type (str): Description of HDX object type (for messages) id_field_name (str): Name of field containing HDX object identifier file_to_upload (Optional[str]): File to upload to HDX **kwargs: See below operation (string): Operation to perform eg. patch. Defaults to update. Returns: None """
self._check_load_existing_object(object_type, id_field_name) # We load an existing object even thought it may well have been loaded already # to prevent an admittedly unlikely race condition where someone has updated # the object in the intervening time self._merge_hdx_update(object_type, id_field_name, file_to_upload, **kwargs)
<SYSTEM_TASK:> Creates or updates an HDX object in HDX and return HDX object metadata dict <END_TASK> <USER_TASK:> Description: def _write_to_hdx(self, action, data, id_field_name, file_to_upload=None): # type: (str, Dict, str, Optional[str]) -> Dict """Creates or updates an HDX object in HDX and return HDX object metadata dict Args: action (str): Action to perform eg. 'create', 'update' data (Dict): Data to write to HDX id_field_name (str): Name of field containing HDX object identifier or None file_to_upload (Optional[str]): File to upload to HDX Returns: Dict: HDX object metadata """
file = None try: if file_to_upload: file = open(file_to_upload, 'rb') files = [('upload', file)] else: files = None return self.configuration.call_remoteckan(self.actions()[action], data, files=files) except Exception as e: raisefrom(HDXError, 'Failed when trying to %s %s! (POST)' % (action, data[id_field_name]), e) finally: if file_to_upload and file: file.close()
<SYSTEM_TASK:> Creates or updates an HDX object in HDX, saving current data and replacing with returned HDX object data <END_TASK> <USER_TASK:> Description: def _save_to_hdx(self, action, id_field_name, file_to_upload=None): # type: (str, str, Optional[str]) -> None """Creates or updates an HDX object in HDX, saving current data and replacing with returned HDX object data from HDX Args: action (str): Action to perform: 'create' or 'update' id_field_name (str): Name of field containing HDX object identifier file_to_upload (Optional[str]): File to upload to HDX Returns: None """
result = self._write_to_hdx(action, self.data, id_field_name, file_to_upload) self.old_data = self.data self.data = result
<SYSTEM_TASK:> Helper method to check if resource exists in HDX and if so, update it, otherwise create it <END_TASK> <USER_TASK:> Description: def _create_in_hdx(self, object_type, id_field_name, name_field_name, file_to_upload=None): # type: (str, str, str, Optional[str]) -> None """Helper method to check if resource exists in HDX and if so, update it, otherwise create it Args: object_type (str): Description of HDX object type (for messages) id_field_name (str): Name of field containing HDX object identifier name_field_name (str): Name of field containing HDX object name file_to_upload (Optional[str]): File to upload to HDX (if url not supplied) Returns: None """
self.check_required_fields() if id_field_name in self.data and self._load_from_hdx(object_type, self.data[id_field_name]): logger.warning('%s exists. Updating %s' % (object_type, self.data[id_field_name])) self._merge_hdx_update(object_type, id_field_name, file_to_upload) else: self._save_to_hdx('create', name_field_name, file_to_upload)
<SYSTEM_TASK:> Helper method to deletes a resource from HDX <END_TASK> <USER_TASK:> Description: def _delete_from_hdx(self, object_type, id_field_name): # type: (str, str) -> None """Helper method to deletes a resource from HDX Args: object_type (str): Description of HDX object type (for messages) id_field_name (str): Name of field containing HDX object identifier Returns: None """
if id_field_name not in self.data: raise HDXError('No %s field (mandatory) in %s!' % (id_field_name, object_type)) self._save_to_hdx('delete', id_field_name)
<SYSTEM_TASK:> Helper function to add a new HDX object to a supplied list of HDX objects or update existing metadata if the object <END_TASK> <USER_TASK:> Description: def _addupdate_hdxobject(self, hdxobjects, id_field, new_hdxobject): # type: (List[HDXObjectUpperBound], str, HDXObjectUpperBound) -> HDXObjectUpperBound """Helper function to add a new HDX object to a supplied list of HDX objects or update existing metadata if the object already exists in the list Args: hdxobjects (List[T <= HDXObject]): list of HDX objects to which to add new objects or update existing ones id_field (str): Field on which to match to determine if object already exists in list new_hdxobject (T <= HDXObject): The HDX object to be added/updated Returns: T <= HDXObject: The HDX object which was added or updated """
for hdxobject in hdxobjects: if hdxobject[id_field] == new_hdxobject[id_field]: merge_two_dictionaries(hdxobject, new_hdxobject) return hdxobject hdxobjects.append(new_hdxobject) return new_hdxobject
<SYSTEM_TASK:> Remove an HDX object from a list within the parent HDX object <END_TASK> <USER_TASK:> Description: def _remove_hdxobject(self, objlist, obj, matchon='id', delete=False): # type: (List[Union[HDXObjectUpperBound,Dict]], Union[HDXObjectUpperBound,Dict,str], str, bool) -> bool """Remove an HDX object from a list within the parent HDX object Args: objlist (List[Union[T <= HDXObject,Dict]]): list of HDX objects obj (Union[T <= HDXObject,Dict,str]): Either an id or hdx object metadata either from an HDX object or a dictionary matchon (str): Field to match on. Defaults to id. delete (bool): Whether to delete HDX object. Defaults to False. Returns: bool: True if object removed, False if not """
if objlist is None: return False if isinstance(obj, six.string_types): obj_id = obj elif isinstance(obj, dict) or isinstance(obj, HDXObject): obj_id = obj.get(matchon) else: raise HDXError('Type of object not a string, dict or T<=HDXObject') if not obj_id: return False for i, objdata in enumerate(objlist): objid = objdata.get(matchon) if objid and objid == obj_id: if delete: objlist[i].delete_from_hdx() del objlist[i] return True return False
<SYSTEM_TASK:> Helper function to convert supplied list of HDX objects to a list of dict <END_TASK> <USER_TASK:> Description: def _convert_hdxobjects(self, hdxobjects): # type: (List[HDXObjectUpperBound]) -> List[HDXObjectUpperBound] """Helper function to convert supplied list of HDX objects to a list of dict Args: hdxobjects (List[T <= HDXObject]): List of HDX objects to convert Returns: List[Dict]: List of HDX objects converted to simple dictionaries """
newhdxobjects = list() for hdxobject in hdxobjects: newhdxobjects.append(hdxobject.data) return newhdxobjects
<SYSTEM_TASK:> Helper function to make a deep copy of a supplied list of HDX objects <END_TASK> <USER_TASK:> Description: def _copy_hdxobjects(self, hdxobjects, hdxobjectclass, attribute_to_copy=None): # type: (List[HDXObjectUpperBound], type, Optional[str]) -> List[HDXObjectUpperBound] """Helper function to make a deep copy of a supplied list of HDX objects Args: hdxobjects (List[T <= HDXObject]): list of HDX objects to copy hdxobjectclass (type): Type of the HDX Objects to be copied attribute_to_copy (Optional[str]): An attribute to copy over from the HDX object. Defaults to None. Returns: List[T <= HDXObject]: Deep copy of list of HDX objects """
newhdxobjects = list() for hdxobject in hdxobjects: newhdxobjectdata = copy.deepcopy(hdxobject.data) newhdxobject = hdxobjectclass(newhdxobjectdata, configuration=self.configuration) if attribute_to_copy: value = getattr(hdxobject, attribute_to_copy) setattr(newhdxobject, attribute_to_copy, value) newhdxobjects.append(newhdxobject) return newhdxobjects
<SYSTEM_TASK:> Helper function to take a list of HDX objects contained in the internal dictionary and add them to a <END_TASK> <USER_TASK:> Description: def _separate_hdxobjects(self, hdxobjects, hdxobjects_name, id_field, hdxobjectclass): # type: (List[HDXObjectUpperBound], str, str, type) -> None """Helper function to take a list of HDX objects contained in the internal dictionary and add them to a supplied list of HDX objects or update existing metadata if any objects already exist in the list. The list in the internal dictionary is then deleted. Args: hdxobjects (List[T <= HDXObject]): list of HDX objects to which to add new objects or update existing ones hdxobjects_name (str): Name of key in internal dictionary from which to obtain list of HDX objects id_field (str): Field on which to match to determine if object already exists in list hdxobjectclass (type): Type of the HDX Object to be added/updated Returns: None """
new_hdxobjects = self.data.get(hdxobjects_name, list()) """:type : List[HDXObjectUpperBound]""" if new_hdxobjects: hdxobject_names = set() for hdxobject in hdxobjects: hdxobject_name = hdxobject[id_field] hdxobject_names.add(hdxobject_name) for new_hdxobject in new_hdxobjects: if hdxobject_name == new_hdxobject[id_field]: merge_two_dictionaries(hdxobject, new_hdxobject) break for new_hdxobject in new_hdxobjects: if not new_hdxobject[id_field] in hdxobject_names: hdxobjects.append(hdxobjectclass(new_hdxobject, configuration=self.configuration)) del self.data[hdxobjects_name]
<SYSTEM_TASK:> Return the dataset's list of tags <END_TASK> <USER_TASK:> Description: def _get_tags(self): # type: () -> List[str] """Return the dataset's list of tags Returns: List[str]: list of tags or [] if there are none """
tags = self.data.get('tags', None) if not tags: return list() return [x['name'] for x in tags]
<SYSTEM_TASK:> Add a tag <END_TASK> <USER_TASK:> Description: def _add_tag(self, tag): # type: (str) -> bool """Add a tag Args: tag (str): Tag to add Returns: bool: True if tag added or False if tag already present """
tags = self.data.get('tags', None) if tags: if tag in [x['name'] for x in tags]: return False else: tags = list() tags.append({'name': tag}) self.data['tags'] = tags return True
<SYSTEM_TASK:> Add a list of tag <END_TASK> <USER_TASK:> Description: def _add_tags(self, tags): # type: (List[str]) -> bool """Add a list of tag Args: tags (List[str]): list of tags to add Returns: bool: True if all tags added or False if any already present. """
alltagsadded = True for tag in tags: if not self._add_tag(tag): alltagsadded = False return alltagsadded
<SYSTEM_TASK:> Return list of strings from comma separated list <END_TASK> <USER_TASK:> Description: def _get_stringlist_from_commastring(self, field): # type: (str) -> List[str] """Return list of strings from comma separated list Args: field (str): Field containing comma separated list Returns: List[str]: List of strings """
strings = self.data.get(field) if strings: return strings.split(',') else: return list()
<SYSTEM_TASK:> Add a string to a comma separated list of strings <END_TASK> <USER_TASK:> Description: def _add_string_to_commastring(self, field, string): # type: (str, str) -> bool """Add a string to a comma separated list of strings Args: field (str): Field containing comma separated list string (str): String to add Returns: bool: True if string added or False if string already present """
if string in self._get_stringlist_from_commastring(field): return False strings = '%s,%s' % (self.data.get(field, ''), string) if strings[0] == ',': strings = strings[1:] self.data[field] = strings return True
<SYSTEM_TASK:> Add a list of strings to a comma separated list of strings <END_TASK> <USER_TASK:> Description: def _add_strings_to_commastring(self, field, strings): # type: (str, List[str]) -> bool """Add a list of strings to a comma separated list of strings Args: field (str): Field containing comma separated list strings (List[str]): list of strings to add Returns: bool: True if all strings added or False if any already present. """
allstringsadded = True for string in strings: if not self._add_string_to_commastring(field, string): allstringsadded = False return allstringsadded
<SYSTEM_TASK:> Remove a string from a comma separated list of strings <END_TASK> <USER_TASK:> Description: def _remove_string_from_commastring(self, field, string): # type: (str, str) -> bool """Remove a string from a comma separated list of strings Args: field (str): Field containing comma separated list string (str): String to remove Returns: bool: True if string removed or False if not """
commastring = self.data.get(field, '') if string in commastring: self.data[field] = commastring.replace(string, '') return True return False
<SYSTEM_TASK:> Reads the resource given by identifier from HDX and returns Resource object <END_TASK> <USER_TASK:> Description: def read_from_hdx(identifier, configuration=None): # type: (str, Optional[Configuration]) -> Optional['Resource'] """Reads the resource given by identifier from HDX and returns Resource object Args: identifier (str): Identifier of resource configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. Returns: Optional[Resource]: Resource object if successful read, None if not """
if is_valid_uuid(identifier) is False: raise HDXError('%s is not a valid resource id!' % identifier) resource = Resource(configuration=configuration) result = resource._load_from_hdx('resource', identifier) if result: return resource return None
<SYSTEM_TASK:> Delete any existing url and set the file uploaded to the local path provided <END_TASK> <USER_TASK:> Description: def set_file_to_upload(self, file_to_upload): # type: (str) -> None """Delete any existing url and set the file uploaded to the local path provided Args: file_to_upload (str): Local path to file to upload Returns: None """
if 'url' in self.data: del self.data['url'] self.file_to_upload = file_to_upload
<SYSTEM_TASK:> Check if url or file to upload provided for resource and add resource_type and url_type if not supplied <END_TASK> <USER_TASK:> Description: def check_url_filetoupload(self): # type: () -> None """Check if url or file to upload provided for resource and add resource_type and url_type if not supplied Returns: None """
if self.file_to_upload is None: if 'url' in self.data: if 'resource_type' not in self.data: self.data['resource_type'] = 'api' if 'url_type' not in self.data: self.data['url_type'] = 'api' else: raise HDXError('Either a url or a file to upload must be supplied!') else: if 'url' in self.data: if self.data['url'] != hdx.data.dataset.Dataset.temporary_url: raise HDXError('Either a url or a file to upload must be supplied not both!') if 'resource_type' not in self.data: self.data['resource_type'] = 'file.upload' if 'url_type' not in self.data: self.data['url_type'] = 'upload' if 'tracking_summary' in self.data: del self.data['tracking_summary']
<SYSTEM_TASK:> Check if resource exists in HDX and if so, update it <END_TASK> <USER_TASK:> Description: def update_in_hdx(self, **kwargs): # type: (Any) -> None """Check if resource exists in HDX and if so, update it Args: **kwargs: See below operation (string): Operation to perform eg. patch. Defaults to update. Returns: None """
self._check_load_existing_object('resource', 'id') if self.file_to_upload and 'url' in self.data: del self.data['url'] self._merge_hdx_update('resource', 'id', self.file_to_upload, **kwargs)
<SYSTEM_TASK:> Check if resource exists in HDX and if so, update it, otherwise create it <END_TASK> <USER_TASK:> Description: def create_in_hdx(self): # type: () -> None """Check if resource exists in HDX and if so, update it, otherwise create it Returns: None """
self.check_required_fields() id = self.data.get('id') if id and self._load_from_hdx('resource', id): logger.warning('%s exists. Updating %s' % ('resource', id)) if self.file_to_upload and 'url' in self.data: del self.data['url'] self._merge_hdx_update('resource', 'id', self.file_to_upload) else: self._save_to_hdx('create', 'name', self.file_to_upload)
<SYSTEM_TASK:> Download resource store to provided folder or temporary folder if no folder supplied <END_TASK> <USER_TASK:> Description: def download(self, folder=None): # type: (Optional[str]) -> Tuple[str, str] """Download resource store to provided folder or temporary folder if no folder supplied Args: folder (Optional[str]): Folder to download resource to. Defaults to None. Returns: Tuple[str, str]: (URL downloaded, Path to downloaded file) """
# Download the resource url = self.data.get('url', None) if not url: raise HDXError('No URL to download!') logger.debug('Downloading %s' % url) filename = self.data['name'] format = '.%s' % self.data['format'] if format not in filename: filename = '%s%s' % (filename, format) with Download(full_agent=self.configuration.get_user_agent()) as downloader: path = downloader.download_file(url, folder, filename) return url, path
<SYSTEM_TASK:> Get list of resources that have a datastore returning their ids. <END_TASK> <USER_TASK:> Description: def get_all_resource_ids_in_datastore(configuration=None): # type: (Optional[Configuration]) -> List[str] """Get list of resources that have a datastore returning their ids. Args: configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. Returns: List[str]: List of resource ids that are in the datastore """
resource = Resource(configuration=configuration) success, result = resource._read_from_hdx('datastore', '_table_metadata', 'resource_id', Resource.actions()['datastore_search'], limit=10000) resource_ids = list() if not success: logger.debug(result) else: for record in result['records']: resource_ids.append(record['name']) return resource_ids
<SYSTEM_TASK:> Check if the resource has a datastore. <END_TASK> <USER_TASK:> Description: def has_datastore(self): # type: () -> bool """Check if the resource has a datastore. Returns: bool: Whether the resource has a datastore or not """
success, result = self._read_from_hdx('datastore', self.data['id'], 'resource_id', self.actions()['datastore_search']) if not success: logger.debug(result) else: if result: return True return False
<SYSTEM_TASK:> Delete a resource from the HDX datastore <END_TASK> <USER_TASK:> Description: def delete_datastore(self): # type: () -> None """Delete a resource from the HDX datastore Returns: None """
success, result = self._read_from_hdx('datastore', self.data['id'], 'resource_id', self.actions()['datastore_delete'], force=True) if not success: logger.debug(result)
<SYSTEM_TASK:> For tabular data, create a resource in the HDX datastore which enables data preview in HDX. If no schema is provided <END_TASK> <USER_TASK:> Description: def create_datastore(self, schema=None, primary_key=None, delete_first=0, path=None): # type: (Optional[List[Dict]], Optional[str], int, Optional[str]) -> None """For tabular data, create a resource in the HDX datastore which enables data preview in HDX. If no schema is provided all fields are assumed to be text. If path is not supplied, the file is first downloaded from HDX. Args: schema (List[Dict]): List of fields and types of form {'id': 'FIELD', 'type': 'TYPE'}. Defaults to None. primary_key (Optional[str]): Primary key of schema. Defaults to None. delete_first (int): Delete datastore before creation. 0 = No, 1 = Yes, 2 = If no primary key. Defaults to 0. path (Optional[str]): Local path to file that was uploaded. Defaults to None. Returns: None """
if delete_first == 0: pass elif delete_first == 1: self.delete_datastore() elif delete_first == 2: if primary_key is None: self.delete_datastore() else: raise HDXError('delete_first must be 0, 1 or 2! (0 = No, 1 = Yes, 2 = Delete if no primary key)') if path is None: # Download the resource url, path = self.download() delete_after_download = True else: url = path delete_after_download = False def convert_to_text(extended_rows): for number, headers, row in extended_rows: for i, val in enumerate(row): row[i] = str(val) yield (number, headers, row) with Download(full_agent=self.configuration.get_user_agent()) as downloader: try: stream = downloader.get_tabular_stream(path, headers=1, post_parse=[convert_to_text], bytes_sample_size=1000000) nonefieldname = False if schema is None: schema = list() for fieldname in stream.headers: if fieldname is not None: schema.append({'id': fieldname, 'type': 'text'}) else: nonefieldname = True data = {'resource_id': self.data['id'], 'force': True, 'fields': schema, 'primary_key': primary_key} self._write_to_hdx('datastore_create', data, 'resource_id') if primary_key is None: method = 'insert' else: method = 'upsert' logger.debug('Uploading data from %s to datastore' % url) offset = 0 chunksize = 100 rowset = stream.read(keyed=True, limit=chunksize) while len(rowset) != 0: if nonefieldname: for row in rowset: del row[None] data = {'resource_id': self.data['id'], 'force': True, 'method': method, 'records': rowset} self._write_to_hdx('datastore_upsert', data, 'resource_id') rowset = stream.read(keyed=True, limit=chunksize) logger.debug('Uploading: %s' % offset) offset += chunksize except Exception as e: raisefrom(HDXError, 'Upload to datastore of %s failed!' % url, e) finally: if delete_after_download: remove(path)
<SYSTEM_TASK:> For tabular data, create a resource in the HDX datastore which enables data preview in HDX using the built in <END_TASK> <USER_TASK:> Description: def create_datastore_for_topline(self, delete_first=0, path=None): # type: (int, Optional[str]) -> None """For tabular data, create a resource in the HDX datastore which enables data preview in HDX using the built in YAML definition for a topline. If path is not supplied, the file is first downloaded from HDX. Args: delete_first (int): Delete datastore before creation. 0 = No, 1 = Yes, 2 = If no primary key. Defaults to 0. path (Optional[str]): Local path to file that was uploaded. Defaults to None. Returns: None """
data = load_yaml(script_dir_plus_file(join('..', 'hdx_datasource_topline.yml'), Resource)) self.create_datastore_from_dict_schema(data, delete_first, path=path)
<SYSTEM_TASK:> For tabular data, update a resource in the HDX datastore which enables data preview in HDX. If no schema is provided <END_TASK> <USER_TASK:> Description: def update_datastore(self, schema=None, primary_key=None, path=None): # type: (Optional[List[Dict]], Optional[str], Optional[str]) -> None """For tabular data, update a resource in the HDX datastore which enables data preview in HDX. If no schema is provided all fields are assumed to be text. If path is not supplied, the file is first downloaded from HDX. Args: schema (List[Dict]): List of fields and types of form {'id': 'FIELD', 'type': 'TYPE'}. Defaults to None. primary_key (Optional[str]): Primary key of schema. Defaults to None. path (Optional[str]): Local path to file that was uploaded. Defaults to None. Returns: None """
self.create_datastore(schema, primary_key, 2, path=path)
<SYSTEM_TASK:> Parses the output stem lines to produce a list with possible stems <END_TASK> <USER_TASK:> Description: def parse_for_simple_stems(output, skip_empty=False, skip_same_stems=True): """ Parses the output stem lines to produce a list with possible stems for each word in the output. :param skip_empty: set True to skip lines without stems (default is False) :returns: a list of tuples, each containing an original text word and a list of stems for the given word """
lines_with_stems = _get_lines_with_stems(output) stems = list() last_word = None for line in lines_with_stems: word, stem, _ = line.split("\t") stem = stem if stem != '-' else None if skip_empty and (stem is None): continue if last_word != word: stems.append((word, [])) ## append new stem only if not on list already stem = None if skip_same_stems and stem in stems[-1][1] else stem if stem is not None: stems[-1][1].append(stem) last_word = word return stems
<SYSTEM_TASK:> Checks if a string contains an identifier. <END_TASK> <USER_TASK:> Description: def _IsIdentifier(cls, string): """Checks if a string contains an identifier. Args: string (str): string to check. Returns: bool: True if the string contains an identifier, False otherwise. """
return ( string and not string[0].isdigit() and all(character.isalnum() or character == '_' for character in string))
<SYSTEM_TASK:> Deregisters a data type definition. <END_TASK> <USER_TASK:> Description: def DeregisterDefinition(self, data_type_definition): """Deregisters a data type definition. The data type definitions are identified based on their lower case name. Args: data_type_definition (DataTypeDefinition): data type definition. Raises: KeyError: if a data type definition is not set for the corresponding name. """
name = data_type_definition.name.lower() if name not in self._definitions: raise KeyError('Definition not set for name: {0:s}.'.format( data_type_definition.name)) del self._definitions[name]
<SYSTEM_TASK:> Registers a data type definition. <END_TASK> <USER_TASK:> Description: def RegisterDefinition(self, data_type_definition): """Registers a data type definition. The data type definitions are identified based on their lower case name. Args: data_type_definition (DataTypeDefinition): data type definitions. Raises: KeyError: if data type definition is already set for the corresponding name. """
name_lower = data_type_definition.name.lower() if name_lower in self._definitions: raise KeyError('Definition already set for name: {0:s}.'.format( data_type_definition.name)) if data_type_definition.name in self._aliases: raise KeyError('Alias already set for name: {0:s}.'.format( data_type_definition.name)) for alias in data_type_definition.aliases: if alias in self._aliases: raise KeyError('Alias already set for name: {0:s}.'.format(alias)) self._definitions[name_lower] = data_type_definition for alias in data_type_definition.aliases: self._aliases[alias] = name_lower if data_type_definition.TYPE_INDICATOR == definitions.TYPE_INDICATOR_FORMAT: self._format_definitions.append(name_lower)
<SYSTEM_TASK:> Splits a given string by comma, trims whitespace on the resulting strings and applies a given ```func``` to <END_TASK> <USER_TASK:> Description: def apply_on_csv_string(rules_str, func): """ Splits a given string by comma, trims whitespace on the resulting strings and applies a given ```func``` to each item. """
splitted = rules_str.split(",") for str in splitted: func(str.strip())
<SYSTEM_TASK:> Set default theme name based in config file. <END_TASK> <USER_TASK:> Description: def set_default_theme(theme): """ Set default theme name based in config file. """
pref_init() # make sure config files exist parser = cp.ConfigParser() parser.read(PREFS_FILE) # Do we need to create a section? if not parser.has_section("theme"): parser.add_section("theme") parser.set("theme", "default", theme) # best way to make sure no file truncation? with open("%s.2" % PREFS_FILE, "w") as fp: parser.write(fp) copy("%s.2" % PREFS_FILE, PREFS_FILE) unlink("%s.2" % PREFS_FILE,)
<SYSTEM_TASK:> Return theme name based on manual input, prefs file, or default to "plain". <END_TASK> <USER_TASK:> Description: def pick_theme(manual): """ Return theme name based on manual input, prefs file, or default to "plain". """
if manual: return manual pref_init() parser = cp.ConfigParser() parser.read(PREFS_FILE) try: theme = parser.get("theme", "default") except (cp.NoSectionError, cp.NoOptionError): theme = "plain" return theme
<SYSTEM_TASK:> Pass a path to a theme file which will be extracted to the themes directory. <END_TASK> <USER_TASK:> Description: def install_theme(path_to_theme): """ Pass a path to a theme file which will be extracted to the themes directory. """
pref_init() # cp the file filename = basename(path_to_theme) dest = join(THEMES_DIR, filename) copy(path_to_theme, dest) # unzip zf = zipfile.ZipFile(dest) # should make sure zipfile contains only themename folder which doesn't conflict # with existing themename. Or some kind of sanity check zf.extractall(THEMES_DIR) # plus this is a potential security flaw pre 2.7.4 # remove the copied zipfile unlink(dest)
<SYSTEM_TASK:> Entry point for choosing what subcommand to run. Really should be using asciidocapi <END_TASK> <USER_TASK:> Description: def main(): """ Entry point for choosing what subcommand to run. Really should be using asciidocapi """
# Try parsing command line args and flags with docopt args = docopt(__doc__, version="cdk") # Am I going to need validation? No Schema for the moment... if args['FILE']: out = output_file(args['FILE']) # Great! Run asciidoc with appropriate flags theme = pick_theme(args['--theme']) if theme not in listdir(THEMES_DIR): exit('Selected theme "%s" not found. Check ~/.cdk/prefs' % theme) cmd = create_command(theme, args['--bare'], args['--toc'], args['--notransition'], args['--logo']) run_command(cmd, args) if args['--toc']: add_css(out, '.deck-container .deck-toc li a span{color: #888;display:inline;}') if args['--custom-css']: add_css_file(out, args['--custom-css']) if args['--open']: webbrowser.open("file://" + abspath(out)) # other commands elif args['--generate']: if isfile(args['--generate']): exit("%s already exists!" % args['--generate']) with open(args['--generate'], "w") as fp: sample = join(LOCATION, "custom", "sample.asc") fp.write(open(sample).read()) print("Created sample slide deck in %s..." % args['--generate']) exit() elif args['--install-theme']: path = args['--install-theme'] if not isfile(path): exit("Theme file not found.") if not path.endswith(".zip"): exit("Theme installation currently only supports theme install from " ".zip files.") install_theme(path) elif args['--default-theme']: set_default_theme(args['--default-theme'])
<SYSTEM_TASK:> Move contents of resources key in internal dictionary into self.resources <END_TASK> <USER_TASK:> Description: def separate_resources(self): # type: () -> None """Move contents of resources key in internal dictionary into self.resources Returns: None """
self._separate_hdxobjects(self.resources, 'resources', 'name', hdx.data.resource.Resource)
<SYSTEM_TASK:> Delete a resource from the dataset and also from HDX by default <END_TASK> <USER_TASK:> Description: def delete_resource(self, resource, delete=True): # type: (Union[hdx.data.resource.Resource,Dict,str], bool) -> bool """Delete a resource from the dataset and also from HDX by default Args: resource (Union[hdx.data.resource.Resource,Dict,str]): Either resource id or resource metadata from a Resource object or a dictionary delete (bool): Whetehr to delete the resource from HDX (not just the dataset). Defaults to True. Returns: bool: True if resource removed or False if not """
if isinstance(resource, str): if is_valid_uuid(resource) is False: raise HDXError('%s is not a valid resource id!' % resource) return self._remove_hdxobject(self.resources, resource, delete=delete)
<SYSTEM_TASK:> Reorder resources in dataset according to provided list. <END_TASK> <USER_TASK:> Description: def reorder_resources(self, resource_ids, hxl_update=True): # type: (List[str], bool) -> None """Reorder resources in dataset according to provided list. If only some resource ids are supplied then these are assumed to be first and the other resources will stay in their original order. Args: resource_ids (List[str]): List of resource ids hxl_update (bool): Whether to call package_hxl_update. Defaults to True. Returns: None """
dataset_id = self.data.get('id') if not dataset_id: raise HDXError('Dataset has no id! It must be read, created or updated first.') data = {'id': dataset_id, 'order': resource_ids} self._write_to_hdx('reorder', data, 'package_id') if hxl_update: self.hxl_update()
<SYSTEM_TASK:> Loads the dataset given by either id or name from HDX <END_TASK> <USER_TASK:> Description: def _dataset_load_from_hdx(self, id_or_name): # type: (str) -> bool """Loads the dataset given by either id or name from HDX Args: id_or_name (str): Either id or name of dataset Returns: bool: True if loaded, False if not """
if not self._load_from_hdx('dataset', id_or_name): return False self._dataset_create_resources() return True
<SYSTEM_TASK:> Check that metadata for dataset and its resources is complete. The parameter ignore_fields <END_TASK> <USER_TASK:> Description: def check_required_fields(self, ignore_fields=list(), allow_no_resources=False): # type: (List[str], bool) -> None """Check that metadata for dataset and its resources is complete. The parameter ignore_fields should be set if required to any fields that should be ignored for the particular operation. Args: ignore_fields (List[str]): Fields to ignore. Default is []. allow_no_resources (bool): Whether to allow no resources. Defaults to False. Returns: None """
if self.is_requestable(): self._check_required_fields('dataset-requestable', ignore_fields) else: self._check_required_fields('dataset', ignore_fields) if len(self.resources) == 0 and not allow_no_resources: raise HDXError('There are no resources! Please add at least one resource!') for resource in self.resources: ignore_fields = ['package_id'] resource.check_required_fields(ignore_fields=ignore_fields)
<SYSTEM_TASK:> Helper method to merge updated resource from dataset into HDX resource read from HDX including filestore. <END_TASK> <USER_TASK:> Description: def _dataset_merge_filestore_resource(self, resource, updated_resource, filestore_resources, ignore_fields): # type: (hdx.data.Resource, hdx.data.Resource, List[hdx.data.Resource], List[str]) -> None """Helper method to merge updated resource from dataset into HDX resource read from HDX including filestore. Args: resource (hdx.data.Resource): Resource read from HDX updated_resource (hdx.data.Resource): Updated resource from dataset filestore_resources (List[hdx.data.Resource]): List of resources that use filestore (to be appended to) ignore_fields (List[str]): List of fields to ignore when checking resource Returns: None """
if updated_resource.get_file_to_upload(): resource.set_file_to_upload(updated_resource.get_file_to_upload()) filestore_resources.append(resource) merge_two_dictionaries(resource, updated_resource) resource.check_required_fields(ignore_fields=ignore_fields) if resource.get_file_to_upload(): resource['url'] = Dataset.temporary_url
<SYSTEM_TASK:> Helper method to add new resource from dataset including filestore. <END_TASK> <USER_TASK:> Description: def _dataset_merge_filestore_newresource(self, new_resource, ignore_fields, filestore_resources): # type: (hdx.data.Resource, List[str], List[hdx.data.Resource]) -> None """Helper method to add new resource from dataset including filestore. Args: new_resource (hdx.data.Resource): New resource from dataset ignore_fields (List[str]): List of fields to ignore when checking resource filestore_resources (List[hdx.data.Resource]): List of resources that use filestore (to be appended to) Returns: None """
new_resource.check_required_fields(ignore_fields=ignore_fields) self.resources.append(new_resource) if new_resource.get_file_to_upload(): filestore_resources.append(new_resource) new_resource['url'] = Dataset.temporary_url
<SYSTEM_TASK:> Helper method to create files in filestore by updating resources. <END_TASK> <USER_TASK:> Description: def _add_filestore_resources(self, filestore_resources, create_default_views, hxl_update): # type: (List[hdx.data.Resource], bool, bool) -> None """Helper method to create files in filestore by updating resources. Args: filestore_resources (List[hdx.data.Resource]): List of resources that use filestore (to be appended to) create_default_views (bool): Whether to call package_create_default_resource_views. hxl_update (bool): Whether to call package_hxl_update. Returns: None """
for resource in filestore_resources: for created_resource in self.data['resources']: if resource['name'] == created_resource['name']: merge_two_dictionaries(resource.data, created_resource) del resource['url'] resource.update_in_hdx() merge_two_dictionaries(created_resource, resource.data) break self.init_resources() self.separate_resources() if create_default_views: self.create_default_views() if hxl_update: self.hxl_update()
<SYSTEM_TASK:> Helper method to check if dataset or its resources exist and update them <END_TASK> <USER_TASK:> Description: def _dataset_merge_hdx_update(self, update_resources, update_resources_by_name, remove_additional_resources, create_default_views, hxl_update): # type: (bool, bool, bool, bool, bool) -> None """Helper method to check if dataset or its resources exist and update them Args: update_resources (bool): Whether to update resources update_resources_by_name (bool): Compare resource names rather than position in list remove_additional_resources (bool): Remove additional resources found in dataset (if updating) create_default_views (bool): Whether to call package_create_default_resource_views. hxl_update (bool): Whether to call package_hxl_update. Returns: None """
# 'old_data' here is the data we want to use for updating while 'data' is the data read from HDX merge_two_dictionaries(self.data, self.old_data) if 'resources' in self.data: del self.data['resources'] updated_resources = self.old_data.get('resources', None) filestore_resources = list() if update_resources and updated_resources: ignore_fields = ['package_id'] if update_resources_by_name: resource_names = set() for resource in self.resources: resource_name = resource['name'] resource_names.add(resource_name) for updated_resource in updated_resources: if resource_name == updated_resource['name']: logger.warning('Resource exists. Updating %s' % resource_name) self._dataset_merge_filestore_resource(resource, updated_resource, filestore_resources, ignore_fields) break updated_resource_names = set() for updated_resource in updated_resources: updated_resource_name = updated_resource['name'] updated_resource_names.add(updated_resource_name) if not updated_resource_name in resource_names: self._dataset_merge_filestore_newresource(updated_resource, ignore_fields, filestore_resources) if remove_additional_resources: resources_to_delete = list() for i, resource in enumerate(self.resources): resource_name = resource['name'] if resource_name not in updated_resource_names: logger.warning('Removing additional resource %s!' % resource_name) resources_to_delete.append(i) for i in sorted(resources_to_delete, reverse=True): del self.resources[i] else: # update resources by position for i, updated_resource in enumerate(updated_resources): if len(self.resources) > i: updated_resource_name = updated_resource['name'] resource = self.resources[i] resource_name = resource['name'] logger.warning('Resource exists. Updating %s' % resource_name) if resource_name != updated_resource_name: logger.warning('Changing resource name to: %s' % updated_resource_name) self._dataset_merge_filestore_resource(resource, updated_resource, filestore_resources, ignore_fields) else: self._dataset_merge_filestore_newresource(updated_resource, ignore_fields, filestore_resources) if remove_additional_resources: resources_to_delete = list() for i, resource in enumerate(self.resources): if len(updated_resources) <= i: logger.warning('Removing additional resource %s!' % resource['name']) resources_to_delete.append(i) for i in sorted(resources_to_delete, reverse=True): del self.resources[i] if self.resources: self.data['resources'] = self._convert_hdxobjects(self.resources) ignore_field = self.configuration['dataset'].get('ignore_on_update') self.check_required_fields(ignore_fields=[ignore_field]) self._save_to_hdx('update', 'id') self._add_filestore_resources(filestore_resources, create_default_views, hxl_update)
<SYSTEM_TASK:> Check if dataset exists in HDX and if so, update it <END_TASK> <USER_TASK:> Description: def update_in_hdx(self, update_resources=True, update_resources_by_name=True, remove_additional_resources=False, create_default_views=True, hxl_update=True): # type: (bool, bool, bool, bool, bool) -> None """Check if dataset exists in HDX and if so, update it Args: update_resources (bool): Whether to update resources. Defaults to True. update_resources_by_name (bool): Compare resource names rather than position in list. Defaults to True. remove_additional_resources (bool): Remove additional resources found in dataset. Defaults to False. create_default_views (bool): Whether to call package_create_default_resource_views. Defaults to True. hxl_update (bool): Whether to call package_hxl_update. Defaults to True. Returns: None """
loaded = False if 'id' in self.data: self._check_existing_object('dataset', 'id') if self._dataset_load_from_hdx(self.data['id']): loaded = True else: logger.warning('Failed to load dataset with id %s' % self.data['id']) if not loaded: self._check_existing_object('dataset', 'name') if not self._dataset_load_from_hdx(self.data['name']): raise HDXError('No existing dataset to update!') self._dataset_merge_hdx_update(update_resources=update_resources, update_resources_by_name=update_resources_by_name, remove_additional_resources=remove_additional_resources, create_default_views=create_default_views, hxl_update=hxl_update)
<SYSTEM_TASK:> Check if dataset exists in HDX and if so, update it, otherwise create it <END_TASK> <USER_TASK:> Description: def create_in_hdx(self, allow_no_resources=False, update_resources=True, update_resources_by_name=True, remove_additional_resources=False, create_default_views=True, hxl_update=True): # type: (bool, bool, bool, bool, bool, bool) -> None """Check if dataset exists in HDX and if so, update it, otherwise create it Args: allow_no_resources (bool): Whether to allow no resources. Defaults to False. update_resources (bool): Whether to update resources (if updating). Defaults to True. update_resources_by_name (bool): Compare resource names rather than position in list. Defaults to True. remove_additional_resources (bool): Remove additional resources found in dataset (if updating). Defaults to False. create_default_views (bool): Whether to call package_create_default_resource_views (if updating). Defaults to True. hxl_update (bool): Whether to call package_hxl_update. Defaults to True. Returns: None """
self.check_required_fields(allow_no_resources=allow_no_resources) loadedid = None if 'id' in self.data: if self._dataset_load_from_hdx(self.data['id']): loadedid = self.data['id'] else: logger.warning('Failed to load dataset with id %s' % self.data['id']) if not loadedid: if self._dataset_load_from_hdx(self.data['name']): loadedid = self.data['name'] if loadedid: logger.warning('Dataset exists. Updating %s' % loadedid) self._dataset_merge_hdx_update(update_resources=update_resources, update_resources_by_name=update_resources_by_name, remove_additional_resources=remove_additional_resources, create_default_views=create_default_views, hxl_update=hxl_update) return filestore_resources = list() if self.resources: ignore_fields = ['package_id'] for resource in self.resources: resource.check_required_fields(ignore_fields=ignore_fields) if resource.get_file_to_upload(): filestore_resources.append(resource) resource['url'] = Dataset.temporary_url self.data['resources'] = self._convert_hdxobjects(self.resources) self._save_to_hdx('create', 'name') self._add_filestore_resources(filestore_resources, False, hxl_update)
<SYSTEM_TASK:> Searches for datasets in HDX <END_TASK> <USER_TASK:> Description: def search_in_hdx(cls, query='*:*', configuration=None, page_size=1000, **kwargs): # type: (Optional[str], Optional[Configuration], int, Any) -> List['Dataset'] """Searches for datasets in HDX Args: query (Optional[str]): Query (in Solr format). Defaults to '*:*'. configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. page_size (int): Size of page to return. Defaults to 1000. **kwargs: See below fq (string): Any filter queries to apply sort (string): Sorting of the search results. Defaults to 'relevance asc, metadata_modified desc'. rows (int): Number of matching rows to return. Defaults to all datasets (sys.maxsize). start (int): Offset in the complete result for where the set of returned datasets should begin facet (string): Whether to enable faceted results. Default to True. facet.mincount (int): Minimum counts for facet fields should be included in the results facet.limit (int): Maximum number of values the facet fields return (- = unlimited). Defaults to 50. facet.field (List[str]): Fields to facet upon. Default is empty. use_default_schema (bool): Use default package schema instead of custom schema. Defaults to False. Returns: List[Dataset]: list of datasets resulting from query """
dataset = Dataset(configuration=configuration) total_rows = kwargs.get('rows', cls.max_int) start = kwargs.get('start', 0) all_datasets = None attempts = 0 while attempts < cls.max_attempts and all_datasets is None: # if the count values vary for multiple calls, then must redo query all_datasets = list() counts = set() for page in range(total_rows // page_size + 1): pagetimespagesize = page * page_size kwargs['start'] = start + pagetimespagesize rows_left = total_rows - pagetimespagesize rows = min(rows_left, page_size) kwargs['rows'] = rows _, result = dataset._read_from_hdx('dataset', query, 'q', Dataset.actions()['search'], **kwargs) datasets = list() if result: count = result.get('count', None) if count: counts.add(count) no_results = len(result['results']) for datasetdict in result['results']: dataset = Dataset(configuration=configuration) dataset.old_data = dict() dataset.data = datasetdict dataset._dataset_create_resources() datasets.append(dataset) all_datasets += datasets if no_results < rows: break else: break else: logger.debug(result) if all_datasets and len(counts) != 1: # Make sure counts are all same for multiple calls to HDX all_datasets = None attempts += 1 else: ids = [dataset['id'] for dataset in all_datasets] # check for duplicates (shouldn't happen) if len(ids) != len(set(ids)): all_datasets = None attempts += 1 if attempts == cls.max_attempts and all_datasets is None: raise HDXError('Maximum attempts reached for searching for datasets!') return all_datasets
<SYSTEM_TASK:> Get all dataset names in HDX <END_TASK> <USER_TASK:> Description: def get_all_dataset_names(configuration=None, **kwargs): # type: (Optional[Configuration], Any) -> List[str] """Get all dataset names in HDX Args: configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. **kwargs: See below limit (int): Number of rows to return. Defaults to all dataset names. offset (int): Offset in the complete result for where the set of returned dataset names should begin Returns: List[str]: list of all dataset names in HDX """
dataset = Dataset(configuration=configuration) dataset['id'] = 'all dataset names' # only for error message if produced return dataset._write_to_hdx('list', kwargs, 'id')
<SYSTEM_TASK:> Get all datasets in HDX <END_TASK> <USER_TASK:> Description: def get_all_datasets(cls, configuration=None, page_size=1000, check_duplicates=True, **kwargs): # type: (Optional[Configuration], int, bool, Any) -> List['Dataset'] """Get all datasets in HDX Args: configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. page_size (int): Size of page to return. Defaults to 1000. check_duplicates (bool): Whether to check for duplicate datasets. Defaults to True. **kwargs: See below limit (int): Number of rows to return. Defaults to all datasets (sys.maxsize) offset (int): Offset in the complete result for where the set of returned datasets should begin Returns: List[Dataset]: list of all datasets in HDX """
dataset = Dataset(configuration=configuration) dataset['id'] = 'all datasets' # only for error message if produced total_rows = kwargs.get('limit', cls.max_int) start = kwargs.get('offset', 0) all_datasets = None attempts = 0 while attempts < cls.max_attempts and all_datasets is None: # if the dataset names vary for multiple calls, then must redo query all_datasets = list() for page in range(total_rows // page_size + 1): pagetimespagesize = page * page_size kwargs['offset'] = start + pagetimespagesize rows_left = total_rows - pagetimespagesize rows = min(rows_left, page_size) kwargs['limit'] = rows result = dataset._write_to_hdx('all', kwargs, 'id') datasets = list() if isinstance(result, list): no_results = len(result) if no_results == 0 and page == 0: all_datasets = None break for datasetdict in result: dataset = Dataset(configuration=configuration) dataset.old_data = dict() dataset.data = datasetdict dataset._dataset_create_resources() datasets.append(dataset) all_datasets += datasets if no_results < rows: break else: logger.debug(result) if all_datasets is None: attempts += 1 elif check_duplicates: names_list = [dataset['name'] for dataset in all_datasets] names = set(names_list) if len(names_list) != len(names): # check for duplicates (shouldn't happen) all_datasets = None attempts += 1 # This check is no longer valid because of showcases being returned by package_list! # elif total_rows == max_int: # all_names = set(Dataset.get_all_dataset_names()) # check dataset names match package_list # if names != all_names: # all_datasets = None # attempts += 1 if attempts == cls.max_attempts and all_datasets is None: raise HDXError('Maximum attempts reached for getting all datasets!') return all_datasets
<SYSTEM_TASK:> Get supplied dataset date as string in specified format. <END_TASK> <USER_TASK:> Description: def _get_formatted_date(dataset_date, date_format=None): # type: (Optional[datetime], Optional[str]) -> Optional[str] """Get supplied dataset date as string in specified format. If no format is supplied, an ISO 8601 string is returned. Args: dataset_date (Optional[datetime.datetime]): dataset date in datetime.datetime format date_format (Optional[str]): Date format. None is taken to be ISO 8601. Defaults to None. Returns: Optional[str]: Dataset date string or None if no date is set """
if dataset_date: if date_format: return dataset_date.strftime(date_format) else: return dataset_date.date().isoformat() else: return None
<SYSTEM_TASK:> Parse dataset date from string using specified format. If no format is supplied, the function will guess. <END_TASK> <USER_TASK:> Description: def _parse_date(dataset_date, date_format): # type: (str, Optional[str]) -> datetime """Parse dataset date from string using specified format. If no format is supplied, the function will guess. For unambiguous formats, this should be fine. Args: dataset_date (str): Dataset date string date_format (Optional[str]): Date format. If None is given, will attempt to guess. Defaults to None. Returns: datetime.datetime """
if date_format is None: try: return parser.parse(dataset_date) except (ValueError, OverflowError) as e: raisefrom(HDXError, 'Invalid dataset date!', e) else: try: return datetime.strptime(dataset_date, date_format) except ValueError as e: raisefrom(HDXError, 'Invalid dataset date!', e)
<SYSTEM_TASK:> Set dataset date from string using specified format. If no format is supplied, the function will guess. <END_TASK> <USER_TASK:> Description: def set_dataset_date(self, dataset_date, dataset_end_date=None, date_format=None): # type: (str, Optional[str], Optional[str]) -> None """Set dataset date from string using specified format. If no format is supplied, the function will guess. For unambiguous formats, this should be fine. Args: dataset_date (str): Dataset date string dataset_end_date (Optional[str]): Dataset end date string date_format (Optional[str]): Date format. If None is given, will attempt to guess. Defaults to None. Returns: None """
parsed_date = self._parse_date(dataset_date, date_format) if dataset_end_date is None: self.set_dataset_date_from_datetime(parsed_date) else: parsed_end_date = self._parse_date(dataset_end_date, date_format) self.set_dataset_date_from_datetime(parsed_date, parsed_end_date)
<SYSTEM_TASK:> Set dataset date as a range from year or start and end year. <END_TASK> <USER_TASK:> Description: def set_dataset_year_range(self, dataset_year, dataset_end_year=None): # type: (Union[str, int], Optional[Union[str, int]]) -> None """Set dataset date as a range from year or start and end year. Args: dataset_year (Union[str, int]): Dataset year given as string or int dataset_end_year (Optional[Union[str, int]]): Dataset end year given as string or int Returns: None """
if isinstance(dataset_year, int): dataset_date = '01/01/%d' % dataset_year elif isinstance(dataset_year, str): dataset_date = '01/01/%s' % dataset_year else: raise hdx.data.hdxobject.HDXError('dataset_year has type %s which is not supported!' % type(dataset_year).__name__) if dataset_end_year is None: dataset_end_year = dataset_year if isinstance(dataset_end_year, int): dataset_end_date = '31/12/%d' % dataset_end_year elif isinstance(dataset_end_year, str): dataset_end_date = '31/12/%s' % dataset_end_year else: raise hdx.data.hdxobject.HDXError('dataset_end_year has type %s which is not supported!' % type(dataset_end_year).__name__) self.set_dataset_date(dataset_date, dataset_end_date)
<SYSTEM_TASK:> Remove a tag <END_TASK> <USER_TASK:> Description: def remove_tag(self, tag): # type: (str) -> bool """Remove a tag Args: tag (str): Tag to remove Returns: bool: True if tag removed or False if not """
return self._remove_hdxobject(self.data.get('tags'), tag, matchon='name')
<SYSTEM_TASK:> Return the dataset's location <END_TASK> <USER_TASK:> Description: def get_location(self, locations=None): # type: (Optional[List[str]]) -> List[str] """Return the dataset's location Args: locations (Optional[List[str]]): Valid locations list. Defaults to list downloaded from HDX. Returns: List[str]: list of locations or [] if there are none """
countries = self.data.get('groups', None) if not countries: return list() return [Locations.get_location_from_HDX_code(x['name'], locations=locations, configuration=self.configuration) for x in countries]
<SYSTEM_TASK:> Add a country. If an iso 3 code is not provided, value is parsed and if it is a valid country name, <END_TASK> <USER_TASK:> Description: def add_country_location(self, country, exact=True, locations=None, use_live=True): # type: (str, bool, Optional[List[str]], bool) -> bool """Add a country. If an iso 3 code is not provided, value is parsed and if it is a valid country name, converted to an iso 3 code. If the country is already added, it is ignored. Args: country (str): Country to add exact (bool): True for exact matching or False to allow fuzzy matching. Defaults to True. locations (Optional[List[str]]): Valid locations list. Defaults to list downloaded from HDX. use_live (bool): Try to get use latest country data from web rather than file in package. Defaults to True. Returns: bool: True if country added or False if country already present """
iso3, match = Country.get_iso3_country_code_fuzzy(country, use_live=use_live) if iso3 is None: raise HDXError('Country: %s - cannot find iso3 code!' % country) return self.add_other_location(iso3, exact=exact, alterror='Country: %s with iso3: %s could not be found in HDX list!' % (country, iso3), locations=locations)
<SYSTEM_TASK:> Add a list of countries. If iso 3 codes are not provided, values are parsed and where they are valid country <END_TASK> <USER_TASK:> Description: def add_country_locations(self, countries, locations=None, use_live=True): # type: (List[str], Optional[List[str]], bool) -> bool """Add a list of countries. If iso 3 codes are not provided, values are parsed and where they are valid country names, converted to iso 3 codes. If any country is already added, it is ignored. Args: countries (List[str]): list of countries to add locations (Optional[List[str]]): Valid locations list. Defaults to list downloaded from HDX. use_live (bool): Try to get use latest country data from web rather than file in package. Defaults to True. Returns: bool: True if all countries added or False if any already present. """
allcountriesadded = True for country in countries: if not self.add_country_location(country, locations=locations, use_live=use_live): allcountriesadded = False return allcountriesadded
<SYSTEM_TASK:> Add all countries in a region. If a 3 digit UNStats M49 region code is not provided, value is parsed as a <END_TASK> <USER_TASK:> Description: def add_region_location(self, region, locations=None, use_live=True): # type: (str, Optional[List[str]], bool) -> bool """Add all countries in a region. If a 3 digit UNStats M49 region code is not provided, value is parsed as a region name. If any country is already added, it is ignored. Args: region (str): M49 region, intermediate region or subregion to add locations (Optional[List[str]]): Valid locations list. Defaults to list downloaded from HDX. use_live (bool): Try to get use latest country data from web rather than file in package. Defaults to True. Returns: bool: True if all countries in region added or False if any already present. """
return self.add_country_locations(Country.get_countries_in_region(region, exception=HDXError, use_live=use_live), locations=locations)
<SYSTEM_TASK:> Add a location which is not a country or region. Value is parsed and compared to existing locations in <END_TASK> <USER_TASK:> Description: def add_other_location(self, location, exact=True, alterror=None, locations=None): # type: (str, bool, Optional[str], Optional[List[str]]) -> bool """Add a location which is not a country or region. Value is parsed and compared to existing locations in HDX. If the location is already added, it is ignored. Args: location (str): Location to add exact (bool): True for exact matching or False to allow fuzzy matching. Defaults to True. alterror (Optional[str]): Alternative error message to builtin if location not found. Defaults to None. locations (Optional[List[str]]): Valid locations list. Defaults to list downloaded from HDX. Returns: bool: True if location added or False if location already present """
hdx_code, match = Locations.get_HDX_code_from_location_partial(location, locations=locations, configuration=self.configuration) if hdx_code is None or (exact is True and match is False): if alterror is None: raise HDXError('Location: %s - cannot find in HDX!' % location) else: raise HDXError(alterror) groups = self.data.get('groups', None) hdx_code = hdx_code.lower() if groups: if hdx_code in [x['name'] for x in groups]: return False else: groups = list() groups.append({'name': hdx_code}) self.data['groups'] = groups return True
<SYSTEM_TASK:> Remove a location. If the location is already added, it is ignored. <END_TASK> <USER_TASK:> Description: def remove_location(self, location): # type: (str) -> bool """Remove a location. If the location is already added, it is ignored. Args: location (str): Location to remove Returns: bool: True if location removed or False if not """
res = self._remove_hdxobject(self.data.get('groups'), location, matchon='name') if not res: res = self._remove_hdxobject(self.data.get('groups'), location.upper(), matchon='name') if not res: res = self._remove_hdxobject(self.data.get('groups'), location.lower(), matchon='name') return res
<SYSTEM_TASK:> Get the dataset's maintainer. <END_TASK> <USER_TASK:> Description: def get_maintainer(self): # type: () -> hdx.data.user.User """Get the dataset's maintainer. Returns: User: Dataset's maintainer """
return hdx.data.user.User.read_from_hdx(self.data['maintainer'], configuration=self.configuration)
<SYSTEM_TASK:> Set the dataset's maintainer. <END_TASK> <USER_TASK:> Description: def set_maintainer(self, maintainer): # type: (Union[hdx.data.user.User,Dict,str]) -> None """Set the dataset's maintainer. Args: maintainer (Union[User,Dict,str]): Either a user id or User metadata from a User object or dictionary. Returns: None """
if isinstance(maintainer, hdx.data.user.User) or isinstance(maintainer, dict): if 'id' not in maintainer: maintainer = hdx.data.user.User.read_from_hdx(maintainer['name'], configuration=self.configuration) maintainer = maintainer['id'] elif not isinstance(maintainer, str): raise HDXError('Type %s cannot be added as a maintainer!' % type(maintainer).__name__) if is_valid_uuid(maintainer) is False: raise HDXError('%s is not a valid user id for a maintainer!' % maintainer) self.data['maintainer'] = maintainer
<SYSTEM_TASK:> Set the dataset's organization. <END_TASK> <USER_TASK:> Description: def set_organization(self, organization): # type: (Union[hdx.data.organization.Organization,Dict,str]) -> None """Set the dataset's organization. Args: organization (Union[Organization,Dict,str]): Either an Organization id or Organization metadata from an Organization object or dictionary. Returns: None """
if isinstance(organization, hdx.data.organization.Organization) or isinstance(organization, dict): if 'id' not in organization: organization = hdx.data.organization.Organization.read_from_hdx(organization['name'], configuration=self.configuration) organization = organization['id'] elif not isinstance(organization, str): raise HDXError('Type %s cannot be added as a organization!' % type(organization).__name__) if is_valid_uuid(organization) is False and organization != 'hdx': raise HDXError('%s is not a valid organization id!' % organization) self.data['owner_org'] = organization
<SYSTEM_TASK:> Get any showcases the dataset is in <END_TASK> <USER_TASK:> Description: def get_showcases(self): # type: () -> List[hdx.data.showcase.Showcase] """Get any showcases the dataset is in Returns: List[Showcase]: list of showcases """
assoc_result, showcases_dicts = self._read_from_hdx('showcase', self.data['id'], fieldname='package_id', action=hdx.data.showcase.Showcase.actions()['list_showcases']) showcases = list() if assoc_result: for showcase_dict in showcases_dicts: showcase = hdx.data.showcase.Showcase(showcase_dict, configuration=self.configuration) showcases.append(showcase) return showcases
<SYSTEM_TASK:> Get dataset showcase dict <END_TASK> <USER_TASK:> Description: def _get_dataset_showcase_dict(self, showcase): # type: (Union[hdx.data.showcase.Showcase, Dict,str]) -> Dict """Get dataset showcase dict Args: showcase (Union[Showcase,Dict,str]): Either a showcase id or Showcase metadata from a Showcase object or dictionary Returns: dict: dataset showcase dict """
if isinstance(showcase, hdx.data.showcase.Showcase) or isinstance(showcase, dict): if 'id' not in showcase: showcase = hdx.data.showcase.Showcase.read_from_hdx(showcase['name']) showcase = showcase['id'] elif not isinstance(showcase, str): raise HDXError('Type %s cannot be added as a showcase!' % type(showcase).__name__) if is_valid_uuid(showcase) is False: raise HDXError('%s is not a valid showcase id!' % showcase) return {'package_id': self.data['id'], 'showcase_id': showcase}
<SYSTEM_TASK:> Add dataset to showcase <END_TASK> <USER_TASK:> Description: def add_showcase(self, showcase, showcases_to_check=None): # type: (Union[hdx.data.showcase.Showcase,Dict,str], List[hdx.data.showcase.Showcase]) -> bool """Add dataset to showcase Args: showcase (Union[Showcase,Dict,str]): Either a showcase id or showcase metadata from a Showcase object or dictionary showcases_to_check (List[Showcase]): list of showcases against which to check existence of showcase. Defaults to showcases containing dataset. Returns: bool: True if the showcase was added, False if already present """
dataset_showcase = self._get_dataset_showcase_dict(showcase) if showcases_to_check is None: showcases_to_check = self.get_showcases() for showcase in showcases_to_check: if dataset_showcase['showcase_id'] == showcase['id']: return False showcase = hdx.data.showcase.Showcase({'id': dataset_showcase['showcase_id']}, configuration=self.configuration) showcase._write_to_hdx('associate', dataset_showcase, 'package_id') return True
<SYSTEM_TASK:> Add dataset to multiple showcases <END_TASK> <USER_TASK:> Description: def add_showcases(self, showcases, showcases_to_check=None): # type: (List[Union[hdx.data.showcase.Showcase,Dict,str]], List[hdx.data.showcase.Showcase]) -> bool """Add dataset to multiple showcases Args: showcases (List[Union[Showcase,Dict,str]]): A list of either showcase ids or showcase metadata from Showcase objects or dictionaries showcases_to_check (List[Showcase]): list of showcases against which to check existence of showcase. Defaults to showcases containing dataset. Returns: bool: True if all showcases added or False if any already present """
if showcases_to_check is None: showcases_to_check = self.get_showcases() allshowcasesadded = True for showcase in showcases: if not self.add_showcase(showcase, showcases_to_check=showcases_to_check): allshowcasesadded = False return allshowcasesadded
<SYSTEM_TASK:> Remove dataset from showcase <END_TASK> <USER_TASK:> Description: def remove_showcase(self, showcase): # type: (Union[hdx.data.showcase.Showcase,Dict,str]) -> None """Remove dataset from showcase Args: showcase (Union[Showcase,Dict,str]): Either a showcase id string or showcase metadata from a Showcase object or dictionary Returns: None """
dataset_showcase = self._get_dataset_showcase_dict(showcase) showcase = hdx.data.showcase.Showcase({'id': dataset_showcase['showcase_id']}, configuration=self.configuration) showcase._write_to_hdx('disassociate', dataset_showcase, 'package_id')
<SYSTEM_TASK:> Set the dataset to be of type requestable or not <END_TASK> <USER_TASK:> Description: def set_requestable(self, requestable=True): # type: (bool) -> None """Set the dataset to be of type requestable or not Args: requestable (bool): Set whether dataset is requestable. Defaults to True. Returns: None """
self.data['is_requestdata_type'] = requestable if requestable: self.data['private'] = False
<SYSTEM_TASK:> Return list of filetypes in your data <END_TASK> <USER_TASK:> Description: def get_filetypes(self): # type: () -> List[str] """Return list of filetypes in your data Returns: List[str]: List of filetypes """
if not self.is_requestable(): return [resource.get_file_type() for resource in self.get_resources()] return self._get_stringlist_from_commastring('file_types')
<SYSTEM_TASK:> Clean dataset tags according to tags cleanup spreadsheet and return if any changes occurred <END_TASK> <USER_TASK:> Description: def clean_dataset_tags(self): # type: () -> Tuple[bool, bool] """Clean dataset tags according to tags cleanup spreadsheet and return if any changes occurred Returns: Tuple[bool, bool]: Returns (True if tags changed or False if not, True if error or False if not) """
tags_dict, wildcard_tags = Tags.tagscleanupdicts() def delete_tag(tag): logger.info('%s - Deleting tag %s!' % (self.data['name'], tag)) return self.remove_tag(tag), False def update_tag(tag, final_tags, wording, remove_existing=True): text = '%s - %s: %s -> ' % (self.data['name'], wording, tag) if not final_tags: logger.error('%snothing!' % text) return False, True tags_lower_five = final_tags[:5].lower() if tags_lower_five == 'merge' or tags_lower_five == 'split' or ( ';' not in final_tags and len(final_tags) > 50): logger.error('%s%s - Invalid final tag!' % (text, final_tags)) return False, True if remove_existing: self.remove_tag(tag) tags = ', '.join(self.get_tags()) if self.add_tags(final_tags.split(';')): logger.info('%s%s! Dataset tags: %s' % (text, final_tags, tags)) else: logger.warning( '%s%s - At least one of the tags already exists! Dataset tags: %s' % (text, final_tags, tags)) return True, False def do_action(tag, tags_dict_key): whattodo = tags_dict[tags_dict_key] action = whattodo[u'action'] final_tags = whattodo[u'final tags (semicolon separated)'] if action == u'Delete': changed, error = delete_tag(tag) elif action == u'Merge': changed, error = update_tag(tag, final_tags, 'Merging') elif action == u'Fix spelling': changed, error = update_tag(tag, final_tags, 'Fixing spelling') elif action == u'Non English': changed, error = update_tag(tag, final_tags, 'Anglicising', remove_existing=False) else: changed = False error = False return changed, error def process_tag(tag): changed = False error = False if tag in tags_dict.keys(): changed, error = do_action(tag, tag) else: for wildcard_tag in wildcard_tags: if fnmatch.fnmatch(tag, wildcard_tag): changed, error = do_action(tag, wildcard_tag) break return changed, error anychange = False anyerror = False for tag in self.get_tags(): changed, error = process_tag(tag) if changed: anychange = True if error: anyerror = True return anychange, anyerror