docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Retrieves a specific data type definition by name. Args: name (str): name of the data type definition. Returns: DataTypeDefinition: data type definition or None if not available.
def GetDefinitionByName(self, name): lookup_name = name.lower() if lookup_name not in self._definitions: lookup_name = self._aliases.get(name, None) return self._definitions.get(lookup_name, None)
814,332
Registers a data type definition. The data type definitions are identified based on their lower case name. Args: data_type_definition (DataTypeDefinition): data type definitions. Raises: KeyError: if data type definition is already set for the corresponding name.
def RegisterDefinition(self, data_type_definition): name_lower = data_type_definition.name.lower() if name_lower in self._definitions: raise KeyError('Definition already set for name: {0:s}.'.format( data_type_definition.name)) if data_type_definition.name in self._aliases: raise KeyError('Alias already set for name: {0:s}.'.format( data_type_definition.name)) for alias in data_type_definition.aliases: if alias in self._aliases: raise KeyError('Alias already set for name: {0:s}.'.format(alias)) self._definitions[name_lower] = data_type_definition for alias in data_type_definition.aliases: self._aliases[alias] = name_lower if data_type_definition.TYPE_INDICATOR == definitions.TYPE_INDICATOR_FORMAT: self._format_definitions.append(name_lower)
814,333
Set dictionary items but do not allow setting of resources Args: key (Any): Key in dictionary value (Any): Value to put in dictionary Returns: None
def __setitem__(self, key, value): # type: (Any, Any) -> None if key == 'resources': self.add_update_resources(value, ignore_datasetid=True) return super(Dataset, self).__setitem__(key, value)
814,351
Add new or update existing resource in dataset with new metadata Args: resource (Union[hdx.data.resource.Resource,Dict,str]): Either resource id or resource metadata from a Resource object or a dictionary Returns: hdx.data.resource.Resource: Resource object
def _get_resource_from_obj(self, resource): # type: (Union[hdx.data.resource.Resource,Dict,str]) -> hdx.data.resource.Resource if isinstance(resource, str): if is_valid_uuid(resource) is False: raise HDXError('%s is not a valid resource id!' % resource) resource = hdx.data.resource.Resource.read_from_hdx(resource, configuration=self.configuration) elif isinstance(resource, dict): resource = hdx.data.resource.Resource(resource, configuration=self.configuration) if not isinstance(resource, hdx.data.resource.Resource): raise HDXError('Type %s cannot be added as a resource!' % type(resource).__name__) return resource
814,353
Add new or update existing resource in dataset with new metadata Args: resource (Union[hdx.data.resource.Resource,Dict,str]): Either resource id or resource metadata from a Resource object or a dictionary ignore_datasetid (bool): Whether to ignore dataset id in the resource Returns: None
def add_update_resource(self, resource, ignore_datasetid=False): # type: (Union[hdx.data.resource.Resource,Dict,str], bool) -> None resource = self._get_resource_from_obj(resource) if 'package_id' in resource: if not ignore_datasetid: raise HDXError('Resource %s being added already has a dataset id!' % (resource['name'])) resource.check_url_filetoupload() resource_updated = self._addupdate_hdxobject(self.resources, 'name', resource) if resource.get_file_to_upload(): resource_updated.set_file_to_upload(resource.get_file_to_upload())
814,354
Add new or update existing resources with new metadata to the dataset Args: resources (List[Union[hdx.data.resource.Resource,Dict,str]]): A list of either resource ids or resources metadata from either Resource objects or dictionaries ignore_datasetid (bool): Whether to ignore dataset id in the resource. Defaults to False. Returns: None
def add_update_resources(self, resources, ignore_datasetid=False): # type: (List[Union[hdx.data.resource.Resource,Dict,str]], bool) -> None if not isinstance(resources, list): raise HDXError('Resources should be a list!') for resource in resources: self.add_update_resource(resource, ignore_datasetid)
814,355
Delete a resource from the dataset and also from HDX by default Args: resource (Union[hdx.data.resource.Resource,Dict,str]): Either resource id or resource metadata from a Resource object or a dictionary delete (bool): Whetehr to delete the resource from HDX (not just the dataset). Defaults to True. Returns: bool: True if resource removed or False if not
def delete_resource(self, resource, delete=True): # type: (Union[hdx.data.resource.Resource,Dict,str], bool) -> bool if isinstance(resource, str): if is_valid_uuid(resource) is False: raise HDXError('%s is not a valid resource id!' % resource) return self._remove_hdxobject(self.resources, resource, delete=delete)
814,356
Reorder resources in dataset according to provided list. If only some resource ids are supplied then these are assumed to be first and the other resources will stay in their original order. Args: resource_ids (List[str]): List of resource ids hxl_update (bool): Whether to call package_hxl_update. Defaults to True. Returns: None
def reorder_resources(self, resource_ids, hxl_update=True): # type: (List[str], bool) -> None dataset_id = self.data.get('id') if not dataset_id: raise HDXError('Dataset has no id! It must be read, created or updated first.') data = {'id': dataset_id, 'order': resource_ids} self._write_to_hdx('reorder', data, 'package_id') if hxl_update: self.hxl_update()
814,357
Update dataset metadata with static metadata from YAML file Args: path (str): Path to YAML dataset metadata. Defaults to config/hdx_dataset_static.yml. Returns: None
def update_from_yaml(self, path=join('config', 'hdx_dataset_static.yml')): # type: (str) -> None super(Dataset, self).update_from_yaml(path) self.separate_resources()
814,358
Update dataset metadata with static metadata from JSON file Args: path (str): Path to JSON dataset metadata. Defaults to config/hdx_dataset_static.json. Returns: None
def update_from_json(self, path=join('config', 'hdx_dataset_static.json')): # type: (str) -> None super(Dataset, self).update_from_json(path) self.separate_resources()
814,359
Reads the dataset given by identifier from HDX and returns Dataset object Args: identifier (str): Identifier of dataset configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. Returns: Optional[Dataset]: Dataset object if successful read, None if not
def read_from_hdx(identifier, configuration=None): # type: (str, Optional[Configuration]) -> Optional['Dataset'] dataset = Dataset(configuration=configuration) result = dataset._dataset_load_from_hdx(identifier) if result: return dataset return None
814,360
Loads the dataset given by either id or name from HDX Args: id_or_name (str): Either id or name of dataset Returns: bool: True if loaded, False if not
def _dataset_load_from_hdx(self, id_or_name): # type: (str) -> bool if not self._load_from_hdx('dataset', id_or_name): return False self._dataset_create_resources() return True
814,362
Check that metadata for dataset and its resources is complete. The parameter ignore_fields should be set if required to any fields that should be ignored for the particular operation. Args: ignore_fields (List[str]): Fields to ignore. Default is []. allow_no_resources (bool): Whether to allow no resources. Defaults to False. Returns: None
def check_required_fields(self, ignore_fields=list(), allow_no_resources=False): # type: (List[str], bool) -> None if self.is_requestable(): self._check_required_fields('dataset-requestable', ignore_fields) else: self._check_required_fields('dataset', ignore_fields) if len(self.resources) == 0 and not allow_no_resources: raise HDXError('There are no resources! Please add at least one resource!') for resource in self.resources: ignore_fields = ['package_id'] resource.check_required_fields(ignore_fields=ignore_fields)
814,363
Helper method to add new resource from dataset including filestore. Args: new_resource (hdx.data.Resource): New resource from dataset ignore_fields (List[str]): List of fields to ignore when checking resource filestore_resources (List[hdx.data.Resource]): List of resources that use filestore (to be appended to) Returns: None
def _dataset_merge_filestore_newresource(self, new_resource, ignore_fields, filestore_resources): # type: (hdx.data.Resource, List[str], List[hdx.data.Resource]) -> None new_resource.check_required_fields(ignore_fields=ignore_fields) self.resources.append(new_resource) if new_resource.get_file_to_upload(): filestore_resources.append(new_resource) new_resource['url'] = Dataset.temporary_url
814,365
Helper method to create files in filestore by updating resources. Args: filestore_resources (List[hdx.data.Resource]): List of resources that use filestore (to be appended to) create_default_views (bool): Whether to call package_create_default_resource_views. hxl_update (bool): Whether to call package_hxl_update. Returns: None
def _add_filestore_resources(self, filestore_resources, create_default_views, hxl_update): # type: (List[hdx.data.Resource], bool, bool) -> None for resource in filestore_resources: for created_resource in self.data['resources']: if resource['name'] == created_resource['name']: merge_two_dictionaries(resource.data, created_resource) del resource['url'] resource.update_in_hdx() merge_two_dictionaries(created_resource, resource.data) break self.init_resources() self.separate_resources() if create_default_views: self.create_default_views() if hxl_update: self.hxl_update()
814,366
Get all dataset names in HDX Args: configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. **kwargs: See below limit (int): Number of rows to return. Defaults to all dataset names. offset (int): Offset in the complete result for where the set of returned dataset names should begin Returns: List[str]: list of all dataset names in HDX
def get_all_dataset_names(configuration=None, **kwargs): # type: (Optional[Configuration], Any) -> List[str] dataset = Dataset(configuration=configuration) dataset['id'] = 'all dataset names' # only for error message if produced return dataset._write_to_hdx('list', kwargs, 'id')
814,371
Get all resources from a list of datasets (such as returned by search) Args: datasets (List[Dataset]): list of datasets Returns: List[hdx.data.resource.Resource]: list of resources within those datasets
def get_all_resources(datasets): # type: (List['Dataset']) -> List[hdx.data.resource.Resource] resources = [] for dataset in datasets: for resource in dataset.get_resources(): resources.append(resource) return resources
814,373
Get supplied dataset date as string in specified format. If no format is supplied, an ISO 8601 string is returned. Args: dataset_date (Optional[datetime.datetime]): dataset date in datetime.datetime format date_format (Optional[str]): Date format. None is taken to be ISO 8601. Defaults to None. Returns: Optional[str]: Dataset date string or None if no date is set
def _get_formatted_date(dataset_date, date_format=None): # type: (Optional[datetime], Optional[str]) -> Optional[str] if dataset_date: if date_format: return dataset_date.strftime(date_format) else: return dataset_date.date().isoformat() else: return None
814,376
Get dataset date as string in specified format. For range returns start date. If no format is supplied, an ISO 8601 string is returned. Args: date_format (Optional[str]): Date format. None is taken to be ISO 8601. Defaults to None. Returns: Optional[str]: Dataset date string or None if no date is set
def get_dataset_date(self, date_format=None): # type: (Optional[str]) -> Optional[str] dataset_date = self.get_dataset_date_as_datetime() return self._get_formatted_date(dataset_date, date_format)
814,377
Get dataset date as string in specified format. For range returns start date. If no format is supplied, an ISO 8601 string is returned. Args: date_format (Optional[str]): Date format. None is taken to be ISO 8601. Defaults to None. Returns: Optional[str]: Dataset date string or None if no date is set
def get_dataset_end_date(self, date_format=None): # type: (Optional[str]) -> Optional[str] dataset_date = self.get_dataset_end_date_as_datetime() return self._get_formatted_date(dataset_date, date_format)
814,378
Set dataset date from datetime.datetime object Args: dataset_date (datetime.datetime): Dataset date dataset_end_date (Optional[datetime.datetime]): Dataset end date Returns: None
def set_dataset_date_from_datetime(self, dataset_date, dataset_end_date=None): # type: (datetime, Optional[datetime]) -> None start_date = dataset_date.strftime('%m/%d/%Y') if dataset_end_date is None: self.data['dataset_date'] = start_date else: end_date = dataset_end_date.strftime('%m/%d/%Y') self.data['dataset_date'] = '%s-%s' % (start_date, end_date)
814,379
Parse dataset date from string using specified format. If no format is supplied, the function will guess. For unambiguous formats, this should be fine. Args: dataset_date (str): Dataset date string date_format (Optional[str]): Date format. If None is given, will attempt to guess. Defaults to None. Returns: datetime.datetime
def _parse_date(dataset_date, date_format): # type: (str, Optional[str]) -> datetime if date_format is None: try: return parser.parse(dataset_date) except (ValueError, OverflowError) as e: raisefrom(HDXError, 'Invalid dataset date!', e) else: try: return datetime.strptime(dataset_date, date_format) except ValueError as e: raisefrom(HDXError, 'Invalid dataset date!', e)
814,380
Set dataset date from string using specified format. If no format is supplied, the function will guess. For unambiguous formats, this should be fine. Args: dataset_date (str): Dataset date string dataset_end_date (Optional[str]): Dataset end date string date_format (Optional[str]): Date format. If None is given, will attempt to guess. Defaults to None. Returns: None
def set_dataset_date(self, dataset_date, dataset_end_date=None, date_format=None): # type: (str, Optional[str], Optional[str]) -> None parsed_date = self._parse_date(dataset_date, date_format) if dataset_end_date is None: self.set_dataset_date_from_datetime(parsed_date) else: parsed_end_date = self._parse_date(dataset_end_date, date_format) self.set_dataset_date_from_datetime(parsed_date, parsed_end_date)
814,381
Set dataset date as a range from year or start and end year. Args: dataset_year (Union[str, int]): Dataset year given as string or int dataset_end_year (Optional[Union[str, int]]): Dataset end year given as string or int Returns: None
def set_dataset_year_range(self, dataset_year, dataset_end_year=None): # type: (Union[str, int], Optional[Union[str, int]]) -> None if isinstance(dataset_year, int): dataset_date = '01/01/%d' % dataset_year elif isinstance(dataset_year, str): dataset_date = '01/01/%s' % dataset_year else: raise hdx.data.hdxobject.HDXError('dataset_year has type %s which is not supported!' % type(dataset_year).__name__) if dataset_end_year is None: dataset_end_year = dataset_year if isinstance(dataset_end_year, int): dataset_end_date = '31/12/%d' % dataset_end_year elif isinstance(dataset_end_year, str): dataset_end_date = '31/12/%s' % dataset_end_year else: raise hdx.data.hdxobject.HDXError('dataset_end_year has type %s which is not supported!' % type(dataset_end_year).__name__) self.set_dataset_date(dataset_date, dataset_end_date)
814,382
Set expected update frequency Args: update_frequency (str): Update frequency Returns: None
def set_expected_update_frequency(self, update_frequency): # type: (str) -> None try: int(update_frequency) except ValueError: update_frequency = Dataset.transform_update_frequency(update_frequency) if not update_frequency: raise HDXError('Invalid update frequency supplied!') self.data['data_update_frequency'] = update_frequency
814,384
Remove a tag Args: tag (str): Tag to remove Returns: bool: True if tag removed or False if not
def remove_tag(self, tag): # type: (str) -> bool return self._remove_hdxobject(self.data.get('tags'), tag, matchon='name')
814,385
Return the dataset's location Args: locations (Optional[List[str]]): Valid locations list. Defaults to list downloaded from HDX. Returns: List[str]: list of locations or [] if there are none
def get_location(self, locations=None): # type: (Optional[List[str]]) -> List[str] countries = self.data.get('groups', None) if not countries: return list() return [Locations.get_location_from_HDX_code(x['name'], locations=locations, configuration=self.configuration) for x in countries]
814,386
Remove a location. If the location is already added, it is ignored. Args: location (str): Location to remove Returns: bool: True if location removed or False if not
def remove_location(self, location): # type: (str) -> bool res = self._remove_hdxobject(self.data.get('groups'), location, matchon='name') if not res: res = self._remove_hdxobject(self.data.get('groups'), location.upper(), matchon='name') if not res: res = self._remove_hdxobject(self.data.get('groups'), location.lower(), matchon='name') return res
814,391
Set the dataset's maintainer. Args: maintainer (Union[User,Dict,str]): Either a user id or User metadata from a User object or dictionary. Returns: None
def set_maintainer(self, maintainer): # type: (Union[hdx.data.user.User,Dict,str]) -> None if isinstance(maintainer, hdx.data.user.User) or isinstance(maintainer, dict): if 'id' not in maintainer: maintainer = hdx.data.user.User.read_from_hdx(maintainer['name'], configuration=self.configuration) maintainer = maintainer['id'] elif not isinstance(maintainer, str): raise HDXError('Type %s cannot be added as a maintainer!' % type(maintainer).__name__) if is_valid_uuid(maintainer) is False: raise HDXError('%s is not a valid user id for a maintainer!' % maintainer) self.data['maintainer'] = maintainer
814,393
Set the dataset's organization. Args: organization (Union[Organization,Dict,str]): Either an Organization id or Organization metadata from an Organization object or dictionary. Returns: None
def set_organization(self, organization): # type: (Union[hdx.data.organization.Organization,Dict,str]) -> None if isinstance(organization, hdx.data.organization.Organization) or isinstance(organization, dict): if 'id' not in organization: organization = hdx.data.organization.Organization.read_from_hdx(organization['name'], configuration=self.configuration) organization = organization['id'] elif not isinstance(organization, str): raise HDXError('Type %s cannot be added as a organization!' % type(organization).__name__) if is_valid_uuid(organization) is False and organization != 'hdx': raise HDXError('%s is not a valid organization id!' % organization) self.data['owner_org'] = organization
814,395
Get dataset showcase dict Args: showcase (Union[Showcase,Dict,str]): Either a showcase id or Showcase metadata from a Showcase object or dictionary Returns: dict: dataset showcase dict
def _get_dataset_showcase_dict(self, showcase): # type: (Union[hdx.data.showcase.Showcase, Dict,str]) -> Dict if isinstance(showcase, hdx.data.showcase.Showcase) or isinstance(showcase, dict): if 'id' not in showcase: showcase = hdx.data.showcase.Showcase.read_from_hdx(showcase['name']) showcase = showcase['id'] elif not isinstance(showcase, str): raise HDXError('Type %s cannot be added as a showcase!' % type(showcase).__name__) if is_valid_uuid(showcase) is False: raise HDXError('%s is not a valid showcase id!' % showcase) return {'package_id': self.data['id'], 'showcase_id': showcase}
814,397
Add dataset to showcase Args: showcase (Union[Showcase,Dict,str]): Either a showcase id or showcase metadata from a Showcase object or dictionary showcases_to_check (List[Showcase]): list of showcases against which to check existence of showcase. Defaults to showcases containing dataset. Returns: bool: True if the showcase was added, False if already present
def add_showcase(self, showcase, showcases_to_check=None): # type: (Union[hdx.data.showcase.Showcase,Dict,str], List[hdx.data.showcase.Showcase]) -> bool dataset_showcase = self._get_dataset_showcase_dict(showcase) if showcases_to_check is None: showcases_to_check = self.get_showcases() for showcase in showcases_to_check: if dataset_showcase['showcase_id'] == showcase['id']: return False showcase = hdx.data.showcase.Showcase({'id': dataset_showcase['showcase_id']}, configuration=self.configuration) showcase._write_to_hdx('associate', dataset_showcase, 'package_id') return True
814,398
Add dataset to multiple showcases Args: showcases (List[Union[Showcase,Dict,str]]): A list of either showcase ids or showcase metadata from Showcase objects or dictionaries showcases_to_check (List[Showcase]): list of showcases against which to check existence of showcase. Defaults to showcases containing dataset. Returns: bool: True if all showcases added or False if any already present
def add_showcases(self, showcases, showcases_to_check=None): # type: (List[Union[hdx.data.showcase.Showcase,Dict,str]], List[hdx.data.showcase.Showcase]) -> bool if showcases_to_check is None: showcases_to_check = self.get_showcases() allshowcasesadded = True for showcase in showcases: if not self.add_showcase(showcase, showcases_to_check=showcases_to_check): allshowcasesadded = False return allshowcasesadded
814,399
Remove dataset from showcase Args: showcase (Union[Showcase,Dict,str]): Either a showcase id string or showcase metadata from a Showcase object or dictionary Returns: None
def remove_showcase(self, showcase): # type: (Union[hdx.data.showcase.Showcase,Dict,str]) -> None dataset_showcase = self._get_dataset_showcase_dict(showcase) showcase = hdx.data.showcase.Showcase({'id': dataset_showcase['showcase_id']}, configuration=self.configuration) showcase._write_to_hdx('disassociate', dataset_showcase, 'package_id')
814,400
Set the dataset to be of type requestable or not Args: requestable (bool): Set whether dataset is requestable. Defaults to True. Returns: None
def set_requestable(self, requestable=True): # type: (bool) -> None self.data['is_requestdata_type'] = requestable if requestable: self.data['private'] = False
814,401
Set the resource that will be used for displaying QuickCharts in dataset preview Args: resource (Union[hdx.data.resource.Resource,Dict,str,int]): Either resource id or name, resource metadata from a Resource object or a dictionary or position Returns: bool: Returns True if resource for QuickCharts in dataset preview set or False if not
def set_quickchart_resource(self, resource): # type: (Union[hdx.data.resource.Resource,Dict,str,int]) -> bool if isinstance(resource, int) and not isinstance(resource, bool): resource = self.get_resources()[resource] if isinstance(resource, hdx.data.resource.Resource) or isinstance(resource, dict): res = resource.get('id') if res is None: resource = resource['name'] else: resource = res elif not isinstance(resource, str): raise hdx.data.hdxobject.HDXError('Resource id cannot be found in type %s!' % type(resource).__name__) if is_valid_uuid(resource) is True: search = 'id' else: search = 'name' changed = False for dataset_resource in self.resources: if dataset_resource[search] == resource: dataset_resource.enable_dataset_preview() self.preview_resource() changed = True else: dataset_resource.disable_dataset_preview() return changed
814,404
Create default resource views for all resources in dataset Args: create_datastore_views (bool): Whether to try to create resource views that point to the datastore Returns: None
def create_default_views(self, create_datastore_views=False): # type: (bool) -> None package = deepcopy(self.data) if self.resources: package['resources'] = self._convert_hdxobjects(self.resources) data = {'package': package, 'create_datastore_views': create_datastore_views} self._write_to_hdx('create_default_views', data, 'package')
814,405
Calls the remote CKAN Args: *args: Arguments to pass to remote CKAN call_action method **kwargs: Keyword arguments to pass to remote CKAN call_action method Returns: Dict: The response from the remote CKAN call_action method
def call_remoteckan(self, *args, **kwargs): # type: (Any, Any) -> Dict requests_kwargs = kwargs.get('requests_kwargs', dict()) credentials = self._get_credentials() if credentials: requests_kwargs['auth'] = credentials kwargs['requests_kwargs'] = requests_kwargs apikey = kwargs.get('apikey', self.get_api_key()) kwargs['apikey'] = apikey return self.remoteckan().call_action(*args, **kwargs)
814,408
Overwrite keyword arguments with environment variables Args: **kwargs: See below hdx_url (str): HDX url to use. Overrides hdx_site. hdx_site (str): HDX site to use eg. prod, test. Defaults to test. hdx_key (str): Your HDX key. Ignored if hdx_read_only = True. Returns: kwargs: Changed keyword arguments
def _environment_variables(**kwargs): # type: (Any) -> Any hdx_key = os.getenv('HDX_KEY') if hdx_key is not None: kwargs['hdx_key'] = hdx_key hdx_url = os.getenv('HDX_URL') if hdx_url is not None: kwargs['hdx_url'] = hdx_url else: hdx_site = os.getenv('HDX_SITE') if hdx_site is not None: kwargs['hdx_site'] = hdx_site return kwargs
814,409
Set up remote CKAN from provided CKAN or by creating from configuration Args: remoteckan (Optional[ckanapi.RemoteCKAN]): CKAN instance. Defaults to setting one up from configuration. Returns: None
def setup_remoteckan(self, remoteckan=None, **kwargs): # type: (Optional[ckanapi.RemoteCKAN], Any) -> None if remoteckan is None: self._remoteckan = self.create_remoteckan(self.get_hdx_site_url(), full_agent=self.get_user_agent(), **kwargs) else: self._remoteckan = remoteckan
814,411
Reads the resource view given by identifier from HDX and returns ResourceView object Args: identifier (str): Identifier of resource view configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. Returns: Optional[ResourceView]: ResourceView object if successful read, None if not
def read_from_hdx(identifier, configuration=None): # type: (str, Optional[Configuration]) -> Optional['ResourceView'] resourceview = ResourceView(configuration=configuration) result = resourceview._load_from_hdx('resource view', identifier) if result: return resourceview return None
814,428
Read all resource views for a resource given by identifier from HDX and returns list of ResourceView objects Args: identifier (str): Identifier of resource configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. Returns: List[ResourceView]: List of ResourceView objects
def get_all_for_resource(identifier, configuration=None): # type: (str, Optional[Configuration]) -> List['ResourceView'] resourceview = ResourceView(configuration=configuration) success, result = resourceview._read_from_hdx('resource view', identifier, 'id', ResourceView.actions()['list']) resourceviews = list() if success: for resourceviewdict in result: resourceview = ResourceView(resourceviewdict, configuration=configuration) resourceviews.append(resourceview) return resourceviews
814,429
Copies all fields except id, resource_id and package_id from another resource view. Args: resource_view (Union[ResourceView,Dict,str]): Either a resource view id or resource view metadata either from a ResourceView object or a dictionary Returns: None
def copy(self, resource_view): # type: (Union[ResourceView,Dict,str]) -> None if isinstance(resource_view, str): if is_valid_uuid(resource_view) is False: raise HDXError('%s is not a valid resource view id!' % resource_view) resource_view = ResourceView.read_from_hdx(resource_view) if not isinstance(resource_view, dict) and not isinstance(resource_view, ResourceView): raise HDXError('%s is not a valid resource view!' % resource_view) for key in resource_view: if key not in ('id', 'resource_id', 'package_id'): self.data[key] = resource_view[key]
814,432
Reads the user given by identifier from HDX and returns User object Args: identifier (str): Identifier of user configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. Returns: Optional[User]: User object if successful read, None if not
def read_from_hdx(identifier, configuration=None): # type: (str, Optional[Configuration]) -> Optional['User'] user = User(configuration=configuration) result = user._load_from_hdx('user', identifier) if result: return user return None
814,436
Emails a user. Args: subject (str): Email subject text_body (str): Plain text email body html_body (str): HTML email body sender (Optional[str]): Email sender. Defaults to SMTP username. **kwargs: See below mail_options (List): Mail options (see smtplib documentation) rcpt_options (List): Recipient options (see smtplib documentation) Returns: None
def email(self, subject, text_body, html_body=None, sender=None, **kwargs): # type: (str, str, Optional[str], Optional[str], Any) -> None self.configuration.emailer().send([self.data['email']], subject, text_body, html_body=html_body, sender=sender, **kwargs)
814,439
Get all users in HDX Args: configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. **kwargs: See below q (str): Restrict to names containing a string. Defaults to all users. order_by (str): Field by which to sort - any user field or edits (number_of_edits). Defaults to 'name'. Returns: List[User]: List of all users in HDX
def get_all_users(configuration=None, **kwargs): # type: (Optional[Configuration], Any) -> List['User'] user = User(configuration=configuration) user['id'] = 'all users' # only for error message if produced result = user._write_to_hdx('list', kwargs, 'id') users = list() if result: for userdict in result: user = User(userdict, configuration=configuration) users.append(user) else: logger.debug(result) return users
814,440
Get organizations in HDX that this user is a member of. Args: permission (str): Permission to check for. Defaults to 'read'. Returns: List[Organization]: List of organizations in HDX that this user is a member of
def get_organizations(self, permission='read'): # type: (str) -> List['Organization'] success, result = self._read_from_hdx('user', self.data['name'], 'id', self.actions()['listorgs'], permission=permission) organizations = list() if success: for organizationdict in result: organization = hdx.data.organization.Organization.read_from_hdx(organizationdict['id']) organizations.append(organization) return organizations
814,442
Facade to simplify project setup that calls project main function Args: projectmainfn ((None) -> None): main function of project **kwargs: configuration parameters to pass to HDX Configuration class Returns: None
def facade(projectmainfn, **kwargs): # (Callable[[None], None], Any) -> None # # Setting up configuration # site_url = Configuration._create(**kwargs) logger.info('--------------------------------------------------') logger.info('> Using HDX Python API Library %s' % Configuration.apiversion) logger.info('> HDX Site: %s' % site_url) UserAgent.user_agent = Configuration.read().user_agent projectmainfn()
814,446
Initializes a Python struct-base byte stream operation. Args: format_string (str): format string as used by Python struct. Raises: FormatError: if the struct operation cannot be determined from the data type definition.
def __init__(self, format_string): try: struct_object = struct.Struct(format_string) except (TypeError, struct.error) as exception: raise errors.FormatError(( 'Unable to create struct object from data type definition ' 'with error: {0!s}').format(exception)) super(StructOperation, self).__init__() self._struct = struct_object self._struct_format_string = format_string
814,456
Read values from a byte stream. Args: byte_stream (bytes): byte stream. Returns: tuple[object, ...]: values copies from the byte stream. Raises: IOError: if byte stream cannot be read. OSError: if byte stream cannot be read.
def ReadFrom(self, byte_stream): try: return self._struct.unpack_from(byte_stream) except (TypeError, struct.error) as exception: raise IOError('Unable to read byte stream with error: {0!s}'.format( exception))
814,457
Writes values to a byte stream. Args: values (tuple[object, ...]): values to copy to the byte stream. Returns: bytes: byte stream. Raises: IOError: if byte stream cannot be written. OSError: if byte stream cannot be read.
def WriteTo(self, values): try: return self._struct.pack(*values) except (TypeError, struct.error) as exception: raise IOError('Unable to write stream with error: {0!s}'.format( exception))
814,458
Reads the showcase given by identifier from HDX and returns Showcase object Args: identifier (str): Identifier of showcase configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. Returns: Optional[Showcase]: Showcase object if successful read, None if not
def read_from_hdx(identifier, configuration=None): # type: (str, Optional[Configuration]) -> Optional['Showcase'] showcase = Showcase(configuration=configuration) result = showcase._load_from_hdx('showcase', identifier) if result: return showcase return None
814,480
Get showcase dataset dict Args: showcase (Union[Showcase,Dict,str]): Either a showcase id or Showcase metadata from a Showcase object or dictionary Returns: Dict: showcase dataset dict
def _get_showcase_dataset_dict(self, dataset): # type: (Union[hdx.data.dataset.Dataset,Dict,str]) -> Dict if isinstance(dataset, hdx.data.dataset.Dataset) or isinstance(dataset, dict): if 'id' not in dataset: dataset = hdx.data.dataset.Dataset.read_from_hdx(dataset['name']) dataset = dataset['id'] elif not isinstance(dataset, str): raise hdx.data.hdxobject.HDXError('Type %s cannot be added as a dataset!' % type(dataset).__name__) if is_valid_uuid(dataset) is False: raise hdx.data.hdxobject.HDXError('%s is not a valid dataset id!' % dataset) return {'showcase_id': self.data['id'], 'package_id': dataset}
814,482
Add a dataset Args: dataset (Union[Dataset,Dict,str]): Either a dataset id or dataset metadata either from a Dataset object or a dictionary datasets_to_check (List[Dataset]): List of datasets against which to check existence of dataset. Defaults to datasets in showcase. Returns: bool: True if the dataset was added, False if already present
def add_dataset(self, dataset, datasets_to_check=None): # type: (Union[hdx.data.dataset.Dataset,Dict,str], List[hdx.data.dataset.Dataset]) -> bool showcase_dataset = self._get_showcase_dataset_dict(dataset) if datasets_to_check is None: datasets_to_check = self.get_datasets() for dataset in datasets_to_check: if showcase_dataset['package_id'] == dataset['id']: return False self._write_to_hdx('associate', showcase_dataset, 'package_id') return True
814,483
Add multiple datasets Args: datasets (List[Union[Dataset,Dict,str]]): A list of either dataset ids or dataset metadata from Dataset objects or dictionaries datasets_to_check (List[Dataset]): List of datasets against which to check existence of dataset. Defaults to datasets in showcase. Returns: bool: True if all datasets added or False if any already present
def add_datasets(self, datasets, datasets_to_check=None): # type: (List[Union[hdx.data.dataset.Dataset,Dict,str]], List[hdx.data.dataset.Dataset]) -> bool if datasets_to_check is None: datasets_to_check = self.get_datasets() alldatasetsadded = True for dataset in datasets: if not self.add_dataset(dataset, datasets_to_check=datasets_to_check): alldatasetsadded = False return alldatasetsadded
814,484
Append values at the end of the list Allow chaining. Args: values: values to be appened at the end. Example: >>> from ww import l >>> lst = l([]) >>> lst.append(1) [1] >>> lst [1] >>> lst.append(2, 3).append(4,5) [1, 2, 3, 4, 5] >>> lst [1, 2, 3, 4, 5]
def append(self, *values): for value in values: list.append(self, value) return self
814,488
Add all values of all iterables at the end of the list Args: iterables: iterable which content to add at the end Example: >>> from ww import l >>> lst = l([]) >>> lst.extend([1, 2]) [1, 2] >>> lst [1, 2] >>> lst.extend([3, 4]).extend([5, 6]) [1, 2, 3, 4, 5, 6] >>> lst [1, 2, 3, 4, 5, 6]
def extend(self, *iterables): for value in iterables: list.extend(self, value) return self
814,489
Process value for writing into a cell. Args: value: any type of variable Returns: json serialized value if value is list or dict, else value
def normalize_cell_value(value): if isinstance(value, dict) or isinstance(value, list): return json.dumps(value) return value
814,490
Creates an Excel file containing data returned by the Analytics API Args: data: Analytics API data as a list of dicts output_file_name: File name for output Excel file (use .xlsx extension).
def export_analytics_data_to_excel(data, output_file_name, result_info_key, identifier_keys): workbook = create_excel_workbook(data, result_info_key, identifier_keys) workbook.save(output_file_name) print('Saved Excel file to {}'.format(output_file_name))
814,590
Creates CSV files containing data returned by the Analytics API. Creates one file per requested endpoint and saves it into the specified output_folder Args: data: Analytics API data as a list of dicts output_folder: Path to a folder to save the CSV files into
def export_analytics_data_to_csv(data, output_folder, result_info_key, identifier_keys): workbook = create_excel_workbook(data, result_info_key, identifier_keys) suffix = '.csv' if not os.path.exists(output_folder): os.makedirs(output_folder) for worksheet in workbook.worksheets: file_name = utilities.convert_title_to_snake_case(worksheet.title) file_path = os.path.join(output_folder, file_name + suffix) mode = 'w' if sys.version_info[0] < 3: mode = 'wb' with io.open(file_path, mode) as output_file: csv_writer = csv.writer(output_file) for row in worksheet.rows: csv_writer.writerow([cell.value for cell in row]) print('Saved CSV files to {}'.format(output_folder))
814,591
Adjust column width in worksheet. Args: worksheet: worksheet to be adjusted
def adjust_column_width(worksheet): dims = {} padding = 1 for row in worksheet.rows: for cell in row: if not cell.value: continue dims[cell.column] = max( dims.get(cell.column, 0), len(str(cell.value)) ) for col, value in list(dims.items()): worksheet.column_dimensions[col].width = value + padding
814,600
Factory for creating the correct type of Response based on the data. Args: endpoint_name (str) - The endpoint of the request, such as "property/value" json_body - The response body in json format. original_response (response object) - server response returned from an http request.
def create(cls, endpoint_name, json_body, original_response): if endpoint_name == "property/value_report": return ValueReportResponse(endpoint_name, json_body, original_response) if endpoint_name == "property/rental_report": return RentalReportResponse(endpoint_name, json_body, original_response) prefix = endpoint_name.split("/")[0] if prefix == "block": return BlockResponse(endpoint_name, json_body, original_response) if prefix == "zip": return ZipCodeResponse(endpoint_name, json_body, original_response) if prefix == "msa": return MsaResponse(endpoint_name, json_body, original_response) return PropertyResponse(endpoint_name, json_body, original_response)
814,605
Call the value_report component Value Report only supports a single address. Args: - address - zipcode Kwargs: - report_type - "full" or "summary". Default is "full". - format_type - "json", "pdf", "xlsx" or "all". Default is "json".
def value_report(self, address, zipcode, report_type="full", format_type="json"): query_params = { "report_type": report_type, "format": format_type, "address": address, "zipcode": zipcode } return self._api_client.fetch_synchronous("property/value_report", query_params)
814,724
Call the rental_report component Rental Report only supports a single address. Args: - address - zipcode Kwargs: - format_type - "json", "xlsx" or "all". Default is "json".
def rental_report(self, address, zipcode, format_type="json"): # only json is supported by rental report. query_params = { "format": format_type, "address": address, "zipcode": zipcode } return self._api_client.fetch_synchronous("property/rental_report", query_params)
814,725
Call the zip component_mget endpoint Args: - zip_data - As described in the class docstring. - components - A list of strings for each component to include in the request. Example: ["zip/details", "zip/volatility"]
def component_mget(self, zip_data, components): if not isinstance(components, list): print("Components param must be a list") return query_params = {"components": ",".join(components)} return self.fetch_identifier_component( "zip/component_mget", zip_data, query_params)
814,728
Deserialize property json data into a Property object Args: json_data (dict): The json data for this property Returns: Property object
def create_from_json(cls, json_data): prop = Property() address_info = json_data["address_info"] prop.address = address_info["address"] prop.block_id = address_info["block_id"] prop.zipcode = address_info["zipcode"] prop.zipcode_plus4 = address_info["zipcode_plus4"] prop.address_full = address_info["address_full"] prop.city = address_info["city"] prop.county_fips = address_info["county_fips"] prop.geo_precision = address_info["geo_precision"] prop.lat = address_info["lat"] prop.lng = address_info["lng"] prop.slug = address_info["slug"] prop.state = address_info["state"] prop.unit = address_info["unit"] prop.meta = None if "meta" in json_data: prop.meta = json_data["meta"] prop.component_results = _create_component_results(json_data, "address_info") return prop
814,744
Deserialize block json data into a Block object Args: json_data (dict): The json data for this block Returns: Block object
def create_from_json(cls, json_data): block = Block() block_info = json_data["block_info"] block.block_id = block_info["block_id"] block.num_bins = block_info["num_bins"] if "num_bins" in block_info else None block.property_type = block_info["property_type"] if "property_type" in block_info else None block.meta = json_data["meta"] if "meta" in json_data else None block.component_results = _create_component_results(json_data, "block_info") return block
814,746
Deserialize zipcode json data into a ZipCode object Args: json_data (dict): The json data for this zipcode Returns: Zip object
def create_from_json(cls, json_data): zipcode = ZipCode() zipcode.zipcode = json_data["zipcode_info"]["zipcode"] zipcode.meta = json_data["meta"] if "meta" in json_data else None zipcode.component_results = _create_component_results(json_data, "zipcode_info") return zipcode
814,748
Deserialize msa json data into a Msa object Args: json_data (dict): The json data for this msa Returns: Msa object
def create_from_json(cls, json_data): msa = Msa() msa.msa = json_data["msa_info"]["msa"] msa.meta = json_data["meta"] if "meta" in json_data else None msa.component_results = _create_component_results(json_data, "msa_info") return msa
814,750
Start yielding items when a condition arise. Args: iterable: the iterable to filter. condition: if the callable returns True once, start yielding items. If it's not a callable, it will be converted to one as `lambda condition: condition == item`. Example: >>> list(starts_when(range(10), lambda x: x > 5)) [6, 7, 8, 9] >>> list(starts_when(range(10), 7)) [7, 8, 9]
def starts_when(iterable, condition): # type: (Iterable, Union[Callable, Any]) -> Iterable if not callable(condition): cond_value = condition def condition(x): return x == cond_value return itertools.dropwhile(lambda x: not condition(x), iterable)
814,755
Stop yielding items when a condition arise. Args: iterable: the iterable to filter. condition: if the callable returns True once, stop yielding items. If it's not a callable, it will be converted to one as `lambda condition: condition == item`. Example: >>> list(stops_when(range(10), lambda x: x > 5)) [0, 1, 2, 3, 4, 5] >>> list(stops_when(range(10), 7)) [0, 1, 2, 3, 4, 5, 6]
def stops_when(iterable, condition): # type: (Iterable, Union[Callable, Any]) -> Iterable if not callable(condition): cond_value = condition def condition(x): return x == cond_value return itertools.takewhile(lambda x: not condition(x), iterable)
814,756
Return key, self[key] as generator for key in keys. Raise KeyError if a key does not exist Args: keys: Iterable containing keys Example: >>> from ww import d >>> list(d({1: 1, 2: 2, 3: 3}).isubset(1, 3)) [(1, 1), (3, 3)]
def isubset(self, *keys): # type: (*Hashable) -> ww.g return ww.g((key, self[key]) for key in keys)
814,849
Create a new d from Args: iterable: Iterable containing keys value: value to associate with each key. If callable, will be value[key] Returns: new DictWrapper Example: >>> from ww import d >>> sorted(d.fromkeys('123', value=4).items()) [('1', 4), ('2', 4), ('3', 4)] >>> sorted(d.fromkeys(range(3), value=lambda e:e**2).items()) [(0, 0), (1, 1), (2, 4)]
def fromkeys(cls, iterable, value=None): # TODO : type: (Iterable, Union[Any, Callable]) -> DictWrapper # https://github.com/python/mypy/issues/2254 if not callable(value): return cls(dict.fromkeys(iterable, value)) return cls((key, value(key)) for key in iterable)
814,851
Add other in self and return new dict Args: other: dict to add in self Returns: Merged dict Example: >>> from ww import d >>> current_dict = d({1: 1, 2: 2, 3: 3}) >>> to_merge_dict = {3: 4, 4: 5} >>> current_dict + to_merge_dict {1: 1, 2: 2, 3: 4, 4: 5}
def __add__(self, other): # type: (dict) -> DictWrapper copy = self.__class__(self.copy()) return copy.merge(other)
814,852
Add other in self, and return new dict Args: other: dict to add in self Returns: Merged dict Example: >>> from ww import d >>> current_dict = {1: 1, 2: 2, 3: 3} >>> to_merge_dict = d({3: 4, 4: 5}) >>> current_dict + to_merge_dict {1: 1, 2: 2, 3: 4, 4: 5}
def __radd__(self, other): # type: (dict) -> DictWrapper copy = self.__class__(other.copy()) return copy.merge(self)
814,853
Generates an Excel workbook object given api_data returned by the Analytics API Args: api_data: Analytics API data as a list of dicts (one per identifier) result_info_key: the key in api_data dicts that contains the data results identifier_keys: the list of keys used as requested identifiers (address, zipcode, block_id, etc) Returns: raw excel file data
def get_excel_workbook(api_data, result_info_key, identifier_keys): cleaned_data = [] for item_data in api_data: result_info = item_data.pop(result_info_key, {}) cleaned_item_data = {} if 'meta' in item_data: meta = item_data.pop('meta') cleaned_item_data['meta'] = meta for key in item_data: cleaned_item_data[key] = item_data[key]['result'] cleaned_item_data[result_info_key] = result_info cleaned_data.append(cleaned_item_data) data_list = copy.deepcopy(cleaned_data) workbook = openpyxl.Workbook() write_worksheets(workbook, data_list, result_info_key, identifier_keys) return workbook
814,855
Writes rest of the worksheets to workbook. Args: workbook: workbook to write into data_list: Analytics API data as a list of dicts result_info_key: the key in api_data dicts that contains the data results identifier_keys: the list of keys used as requested identifiers (address, zipcode, block_id, etc)
def write_worksheets(workbook, data_list, result_info_key, identifier_keys): # we can use the first item to figure out the worksheet keys worksheet_keys = get_worksheet_keys(data_list[0], result_info_key) for key in worksheet_keys: title = key.split('/')[1] title = utilities.convert_snake_to_title_case(title) title = KEY_TO_WORKSHEET_MAP.get(title, title) if key == 'property/nod': # the property/nod endpoint needs to be split into two worksheets create_property_nod_worksheets(workbook, data_list, result_info_key, identifier_keys) else: # all other endpoints are written to a single worksheet # Maximum 31 characters allowed in sheet title worksheet = workbook.create_sheet(title=title[:31]) processed_data = process_data(key, data_list, result_info_key, identifier_keys) write_data(worksheet, processed_data) # remove the first, unused empty sheet workbook.remove_sheet(workbook.active)
814,856
Gets sorted keys from the dict, ignoring result_info_key and 'meta' key Args: data_dict: dict to pull keys from Returns: list of keys in the dict other than the result_info_key
def get_worksheet_keys(data_dict, result_info_key): keys = set(data_dict.keys()) keys.remove(result_info_key) if 'meta' in keys: keys.remove('meta') return sorted(keys)
814,858
Gets all possible keys from a list of dicts, sorting by leading_columns first Args: data_list: list of dicts to pull keys from leading_columns: list of keys to put first in the result Returns: list of keys to be included as columns in excel worksheet
def get_keys(data_list, leading_columns=LEADING_COLUMNS): all_keys = set().union(*(list(d.keys()) for d in data_list)) leading_keys = [] for key in leading_columns: if key not in all_keys: continue leading_keys.append(key) all_keys.remove(key) return leading_keys + sorted(all_keys)
814,859
Writes data into worksheet. Args: worksheet: worksheet to write into data: data to be written
def write_data(worksheet, data): if not data: return if isinstance(data, list): rows = data else: rows = [data] if isinstance(rows[0], dict): keys = get_keys(rows) worksheet.append([utilities.convert_snake_to_title_case(key) for key in keys]) for row in rows: values = [get_value_from_row(row, key) for key in keys] worksheet.append(values) elif isinstance(rows[0], list): for row in rows: values = [utilities.normalize_cell_value(value) for value in row] worksheet.append(values) else: for row in rows: worksheet.append([utilities.normalize_cell_value(row)])
814,860
Helper method to flatten a nested dict of dicts (one level) Example: {'a': {'b': 'bbb'}} becomes {'a_-_b': 'bbb'} The separator '_-_' gets formatted later for the column headers Args: data: the dict to flatten top_level_keys: a list of the top level keys to flatten ('a' in the example above)
def flatten_top_level_keys(data, top_level_keys): flattened_data = {} for top_level_key in top_level_keys: if data[top_level_key] is None: flattened_data[top_level_key] = None else: for key in data[top_level_key]: flattened_data['{}_-_{}'.format(top_level_key, key)] = data[top_level_key][key] return flattened_data
814,863
Create an authentication handler for HouseCanary API V1 requests Args: auth_key (string) - The HouseCanary API auth key auth_secret (string) - The HouseCanary API secret
def __init__(self, auth_key, auth_secret): self._auth_key = auth_key self._auth_secret = auth_secret
815,084
returns a blocking generator yielding Slack event objects params: - etypes(str): If defined, Slack event type(s) not matching the filter will be ignored. See https://api.slack.com/events for a listing of valid event types. - idle_timeout(int): optional maximum amount of time (in seconds) to wait between events before returning
def events(self, *etypes, idle_timeout=None): while self._state != STATE_STOPPED: try: yield self.get_event(*etypes, timeout=idle_timeout) except Queue.Empty: log.info('idle timeout reached for events()') return
815,365
Send a message to a channel or group via Slack RTM socket, returning the resulting message object params: - text(str): Message text to send - channel(Channel): Target channel - confirm(bool): If True, wait for a reply-to confirmation before returning.
def send_msg(self, text, channel, confirm=True): self._send_id += 1 msg = SlackMsg(self._send_id, channel.id, text) self.ws.send(msg.json) self._stats['messages_sent'] += 1 if confirm: # Wait for confirmation our message was received for e in self.events(): if e.get('reply_to') == self._send_id: msg.sent = True msg.ts = e.ts return msg else: return msg
815,366
Convert a 2D feature to a 3D feature by sampling a raster Parameters: raster (rasterio): raster to provide the z coordinate feature (dict): fiona feature record to convert Returns: result (Point or Linestring): shapely Point or LineString of xyz coordinate triples
def drape(raster, feature): coords = feature['geometry']['coordinates'] geom_type = feature['geometry']['type'] if geom_type == 'Point': xyz = sample(raster, [coords]) result = Point(xyz[0]) elif geom_type == 'LineString': xyz = sample(raster, coords) points = [Point(x, y, z) for x, y, z in xyz] result = LineString(points) else: logging.error('drape not implemented for {}'.format(geom_type)) return result
815,849
Sample a raster at given coordinates Given a list of coordinates, return a list of x,y,z triples with z coordinates sampled from an input raster Parameters: raster (rasterio): raster dataset to sample coords: array of tuples containing coordinate pairs (x,y) or triples (x,y,z) Returns: result: array of tuples containing coordinate triples (x,y,z)
def sample(raster, coords): if len(coords[0]) == 3: logging.info('Input is a 3D geometry, z coordinate will be updated.') z = raster.sample([(x, y) for x, y, z in coords], indexes=raster.indexes) else: z = raster.sample(coords, indexes=raster.indexes) result = [(vert[0], vert[1], vert_z) for vert, vert_z in zip(coords, z)] return result
815,850
Annotate locations in a string that contain periods as being true periods or periods that are a part of shorthand (and thus should not be treated as punctuation marks). Arguments: ---------- text : str split_locations : list<int>, same length as text.
def protect_shorthand(text, split_locations): word_matches = list(re.finditer(word_with_period, text)) total_words = len(word_matches) for i, match in enumerate(word_matches): match_start = match.start() match_end = match.end() for char_pos in range(match_start, match_end): if split_locations[char_pos] == SHOULD_SPLIT and match_end - char_pos > 1: match_start = char_pos word = text[match_start:match_end] if not word.endswith('.'): # ensure that words contained within other words: # e.g. 'chocolate.Mountains of' -> 'chocolate. Mountains of' if (not word[0].isdigit() and split_locations[match_start] == UNDECIDED): split_locations[match_start] = SHOULD_SPLIT continue period_pos = match_end - 1 # this is not the last word, abbreviation # is not the final period of the sentence, # moreover: word_is_in_abbr = word[:-1].lower() in ABBR is_abbr_like = ( word_is_in_abbr or one_letter_long_or_repeating.match(word[:-1]) is not None ) is_digit = False if is_abbr_like else word[:-1].isdigit() is_last_word = i == (total_words - 1) is_ending = is_last_word and (match_end == len(text) or text[match_end:].isspace()) is_not_ending = not is_ending abbreviation_and_not_end = ( len(word) > 1 and is_abbr_like and is_not_ending ) if abbreviation_and_not_end and ( (not is_last_word and word_matches[i+1].group(0)[0].islower()) or (not is_last_word and word_matches[i+1].group(0) in PUNCT_SYMBOLS) or word[0].isupper() or word_is_in_abbr or len(word) == 2): # next word is lowercase (e.g. not a new sentence?), or next word # is punctuation or next word is totally uppercase (e.g. 'Mister. # ABAGNALE called to the stand') if split_locations[period_pos] == SHOULD_SPLIT and period_pos + 1 < len(split_locations): split_locations[period_pos + 1] = SHOULD_SPLIT split_locations[period_pos] = SHOULD_NOT_SPLIT elif (is_digit and len(word[:-1]) <= 2 and not is_last_word and word_matches[i+1].group(0).lower() in MONTHS): # a date or weird number with a period: if split_locations[period_pos] == SHOULD_SPLIT and period_pos + 1 < len(split_locations): split_locations[period_pos + 1] = SHOULD_SPLIT split_locations[period_pos] = SHOULD_NOT_SPLIT elif split_locations[period_pos] == UNDECIDED: # split this period into its own segment: split_locations[period_pos] = SHOULD_SPLIT
816,709
Use an integer list to split the string contained in `text`. Arguments: ---------- text : str, same length as locations. locations : list<int>, contains values 'SHOULD_SPLIT', 'UNDECIDED', and 'SHOULD_NOT_SPLIT'. Will create strings between each 'SHOULD_SPLIT' locations. Returns: -------- Generator<str> : the substrings of text corresponding to the slices given in locations.
def split_with_locations(text, locations): start = 0 for pos, decision in enumerate(locations): if decision == SHOULD_SPLIT: if start != pos: yield text[start:pos] start = pos if start != len(text): yield text[start:]
816,710
Convert a single string into a list of substrings split along punctuation and word boundaries. Keep whitespace intact by always attaching it to the previous token. Arguments: ---------- text : str normalize_ascii : bool, perform some replacements on non-ascii characters to canonicalize the string (defaults to True). Returns: -------- list<str>, list of substring tokens.
def tokenize(text, normalize_ascii=True): # 1. If there's no punctuation, return immediately if no_punctuation.match(text): return [text] # 2. let's standardize the input text to ascii (if desired) # Note: this will no longer respect input-to-output character positions if normalize_ascii: # normalize these greco-roman characters to ascii: text = text.replace(u"œ", "oe").replace(u"æ", "ae") # normalize dashes: text = repeated_dash_converter.sub("-", text) # 3. let's construct an integer array of the possible split locations: split_locations = [UNDECIDED] * len(text) regexes = ( pure_whitespace, left_quote_shifter, left_quote_converter, left_single_quote_converter, remaining_quote_converter, # regex can't fix this -> regex ca n't fix this english_nots, # you'll dig this -> you 'll dig this english_contractions, # the rhino's horns -> the rhino 's horns english_specific_appendages, # qu'a tu fais au rhino -> qu ' a tu fais au rhino, french_appendages ) # 4. Mark end locations for specific regular expressions: for regex in regexes: mark_regex(regex, text, split_locations) begin_end_regexes = ( multi_single_quote_finder, right_single_quote_converter, # use dashes as the breakpoint: # the rhino--truck -> the rhino -- truck simple_dash_finder if normalize_ascii else advanced_dash_finder, numerical_expression, url_file_finder, shifted_ellipses, # the #rhino! -> the # rhino ! ; # the rino[sic] -> the rino [ sic ] shifted_standard_punctuation ) # 5. Mark begin and end locations for other regular expressions: for regex in begin_end_regexes: mark_begin_end_regex(regex, text, split_locations) # 6. Remove splitting on exceptional uses of periods: # I'm with Mr. -> I 'm with Mr. , I'm with Mister. -> I 'm with Mister . protect_shorthand(text, split_locations) if normalize_ascii: text = dash_converter.sub("-", text) # 7. Return the split string using the integer list: return list(split_with_locations(text, split_locations))
816,713
Description: Set the input Call with no arguments to get current setting Arguments: opt: string Name provided from input list or key from yaml ("HDMI 1" or "hdmi_1")
def input(self, opt): for key in self.command['input']: if (key == opt) or (self.command['input'][key]['name'] == opt): return self._send_command(['input', key, 'command']) return False
817,017
Description: Change Channel (Digital) Pass Channels "XX.YY" as TV.digital_channel_air(XX, YY) Arguments: opt1: integer 1-99: Major Channel opt2: integer (optional) 1-99: Minor Channel
def digital_channel_air(self, opt1='?', opt2='?'): if opt1 == '?': parameter = '?' elif opt2 == '?': parameter = str(opt1).rjust(4, "0") else: parameter = '{:02d}{:02d}'.format(opt1, opt2) return self._send_command('digital_channel_air', parameter)
817,018
Description: Change Channel (Digital) Pass Channels "XXX.YYY" as TV.digital_channel_cable(XXX, YYY) Arguments: opt1: integer 1-999: Major Channel opt2: integer (optional) 0-999: Minor Channel
def digital_channel_cable(self, opt1='?', opt2=0): if opt1 == '?': parameter = '?' elif self.command['digital_channel_cable_minor'] == '': parameter = str(opt1).rjust(4, "0") else: self._send_command('digital_channel_cable_minor', str(opt1).rjust(3, "0")) parameter = str(opt2).rjust(3, "0") return self._send_command('digital_channel_cable_major', parameter)
817,019
Perform aggreration Arguments: :_aggregations_params: Dict of aggregation params. Root key is an aggregation name. Required. :_raise_on_empty: Boolean indicating whether to raise exception when IndexNotFoundException exception happens. Optional, defaults to False.
def aggregate(self, **params): _aggregations_params = params.pop('_aggregations_params', None) _raise_on_empty = params.pop('_raise_on_empty', False) if not _aggregations_params: raise Exception('Missing _aggregations_params') # Set limit so ES won't complain. It is ignored in the end params['_limit'] = 0 search_params = self.build_search_params(params) search_params.pop('size', None) search_params.pop('from_', None) search_params.pop('sort', None) search_params['body']['aggregations'] = _aggregations_params log.debug('Performing aggregation: {}'.format(_aggregations_params)) try: response = self.api.search(**search_params) except IndexNotFoundException: if _raise_on_empty: raise JHTTPNotFound( 'Aggregation failed: Index does not exist') return {} try: return response['aggregations'] except KeyError: raise JHTTPNotFound('No aggregations returned from ES')
817,191
Set proper headers. Sets following headers: Allow Access-Control-Allow-Methods Access-Control-Allow-Headers Arguments: :methods: Sequence of HTTP method names that are value for requested URI
def _set_options_headers(self, methods): request = self.request response = request.response response.headers['Allow'] = ', '.join(sorted(methods)) if 'Access-Control-Request-Method' in request.headers: response.headers['Access-Control-Allow-Methods'] = \ ', '.join(sorted(methods)) if 'Access-Control-Request-Headers' in request.headers: response.headers['Access-Control-Allow-Headers'] = \ 'origin, x-requested-with, content-type' return response
817,304
Get names of HTTP methods that can be used at requested URI. Arguments: :actions_map: Map of actions. Must have the same structure as self._item_actions and self._collection_actions
def _get_handled_methods(self, actions_map): methods = ('OPTIONS',) defined_actions = [] for action_name in actions_map.keys(): view_method = getattr(self, action_name, None) method_exists = view_method is not None method_defined = view_method != self.not_allowed_action if method_exists and method_defined: defined_actions.append(action_name) for action in defined_actions: methods += actions_map[action] return methods
817,305
Return a list of generated strings. Args: cnt (int): length of list unique (bool): whether to make entries unique Returns: list. We keep track of total attempts because a template may specify something impossible to attain, like [1-9]{} with cnt==1000
def render_list(self, cnt, unique=False, progress_callback=None, **kwargs): rendered_list = [] i = 0 total_attempts = 0 while True: if i >= cnt: break if total_attempts > cnt * self.unique_attempts_factor: raise StringGenerator.UniquenessError(u"couldn't satisfy uniqueness") s = self.render(**kwargs) if unique: if not s in rendered_list: rendered_list.append(s) i += 1 else: rendered_list.append(s) i += 1 total_attempts += 1 # Optionally trigger the progress indicator to inform others about our progress if progress_callback and callable(progress_callback): progress_callback(i, cnt) return rendered_list
818,704
Get the path from a given url, including the querystring. Args: url (str) Returns: str
def get_path(url): url = urlsplit(url) path = url.path if url.query: path += "?{}".format(url.query) return path
818,848
Constructor Args: content (str): Markdown text css (str): Custom CSS style. If not set, use default CSS style. image_root (str): Root directory for inline images.
def __init__(self, content, css=None, image_root='.'): self._md = markdown.Markdown(extensions=[ 'markdown.extensions.tables', 'markdown.extensions.meta']) self._html = None self._inline_images = None self._convert(content, css, image_root)
818,920
Initialize object and creates the week day map. Args: workdays: List or tuple of week days considered 'work days'. Anything not in this list is considered a rest day. Defaults to [MO, TU, WE, TH, FR]. holidays: List or tuple of holidays (or strings). Default is [].
def __init__(self, workdays=None, holidays=None): if workdays is None: self.workdays = [MO, TU, WE, TH, FR] else: self.workdays = sorted(list(set(workdays))) # sorted and unique if holidays is None: holidays = [] # create week day map structure in local variable to speed up # this structure is the soul of this class, it is used in all # calculations and is the secret that enables the custom work day list weekdaymap = [] for wkday in range(0, 7): wmap = {} wmap['dayofweek'] = wkday if wkday in self.workdays: wmap['isworkday'] = True i = self.workdays.index(wkday) # assign transition to next work day if i == len(self.workdays) - 1: # last work day of week wmap['nextworkday'] = self.workdays[0] wmap['offsetnext'] = wmap['nextworkday'] + 7 - wkday else: wmap['nextworkday'] = self.workdays[i+1] wmap['offsetnext'] = wmap['nextworkday'] - wkday # assign transition to previous work day if i == 0: # first work day of week wmap['prevworkday'] = self.workdays[-1] wmap['offsetprev'] = wmap['prevworkday'] - wkday - 7 else: wmap['prevworkday'] = self.workdays[i-1] wmap['offsetprev'] = wmap['prevworkday'] - wkday else: wmap['isworkday'] = False # assign transition to next work day after = [x for x in range(wkday+1, 7) if x in self.workdays] if after: # there is a work day after this non-work day wmap['nextworkday'] = after[0] wmap['offsetnext'] = wmap['nextworkday'] - wkday else: wmap['nextworkday'] = self.workdays[0] wmap['offsetnext'] = wmap['nextworkday'] + 7 - wkday # assign transition to previous work day before = [x for x in range(0, wkday) if x in self.workdays] if before: # there is a work day before this non-work day wmap['prevworkday'] = before[-1] wmap['offsetprev'] = wmap['prevworkday'] - wkday else: wmap['prevworkday'] = self.workdays[-1] wmap['offsetprev'] = wmap['prevworkday'] - wkday - 7 weekdaymap.append(DayOfWeek(**wmap)) self.weekdaymap = weekdaymap # add holidays but eliminate non-work days and repetitions holidays = set([parsefun(hol) for hol in holidays]) self.holidays = sorted( [hol for hol in holidays if weekdaymap[hol.weekday()].isworkday])
818,993
Check if a given date is a work date, ignoring holidays. Args: date (date, datetime or str): Date to be checked. Returns: bool: True if the date is a work date, False otherwise.
def isworkday(self, date): date = parsefun(date) return self.weekdaymap[date.weekday()].isworkday
818,994