docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Load all known fabsetup addons which are installed as pypi pip-packages. Args: _globals(dict): the globals() namespace of the fabric script. Return: None
def load_pip_addons(_globals): for package_name in known_pip_addons: _, username = package_username(package_name) try: load_addon(username, package_name.replace('-', '_'), _globals) except ImportError: pass
826,548
Load all fabsetup addons which are stored under ~/.fabsetup-addon-repos as git repositories. Args: _globals(dict): the globals() namespace of the fabric script. Return: None
def load_repo_addons(_globals): repos_dir = os.path.expanduser('~/.fabsetup-addon-repos') if os.path.isdir(repos_dir): basedir, repos, _ = next(os.walk(repos_dir)) for repo_dir in [os.path.join(basedir, repo) for repo in repos # omit dot dirs like '.rope' # or 'fabsetup-theno-termdown.disabled' if '.' not in repo]: sys.path.append(repo_dir) package_name, username = package_username(repo_dir.split('/')[-1]) load_addon(username, package_name, _globals)
826,550
Get the decrypted value of an SSM parameter Args: parameter_name - the name of the stored parameter of interest Return: Value if allowed and present else None
def get_ssm_parameter(parameter_name): try: response = boto3.client('ssm').get_parameters( Names=[parameter_name], WithDecryption=True ) return response.get('Parameters', None)[0].get('Value', '') except Exception: pass return ''
826,587
Cloud stack utility init method. Args: config_block - a dictionary creates from the CLI driver. See that script for the things that are required and optional. Returns: not a damn thing Raises: SystemError - if everything isn't just right
def __init__(self, config_block): if config_block: self._config = config_block else: logging.error('config block was garbage') raise SystemError
826,624
The main event of the utility. Create or update a Cloud Formation stack. Injecting properties where needed Args: None Returns: True if the stack create/update is started successfully else False if the start goes off in the weeds. Exits: If the user asked for a dryrun exit(with a code 0) the thing here. There is no point continuing after that point.
def upsert(self): required_parameters = [] self._stackParameters = [] try: self._initialize_upsert() except Exception: return False try: available_parameters = self._parameters.keys() for parameter_name in self._template.get('Parameters', {}): required_parameters.append(str(parameter_name)) logging.info(' required parameters: ' + str(required_parameters)) logging.info('available parameters: ' + str(available_parameters)) parameters = [] for required_parameter in required_parameters: parameter = {} parameter['ParameterKey'] = str(required_parameter) required_parameter = str(required_parameter) if required_parameter in self._parameters: parameter['ParameterValue'] = self._parameters[required_parameter] else: parameter['ParameterValue'] = self._parameters[required_parameter.lower()] parameters.append(parameter) if not self._analyze_stuff(): sys.exit(1) if self._config.get('dryrun', False): logging.info('Generating change set') set_id = self._generate_change_set(parameters) if set_id: self._describe_change_set(set_id) logging.info('This was a dryrun') sys.exit(0) self._tags.append({"Key": "CODE_VERSION_SD", "Value": self._config.get('codeVersion')}) self._tags.append({"Key": "ANSWER", "Value": str(42)}) if self._updateStack: stack = self._cloudFormation.update_stack( StackName=self._config.get('environment', {}).get('stack_name', None), TemplateURL=self._templateUrl, Parameters=parameters, Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'], Tags=self._tags, ClientRequestToken=str(uuid.uuid4()) ) logging.info('existing stack ID: {}'.format(stack.get('StackId', 'unknown'))) else: stack = self._cloudFormation.create_stack( StackName=self._config.get('environment', {}).get('stack_name', None), TemplateURL=self._templateUrl, Parameters=parameters, Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'], Tags=self._tags, ClientRequestToken=str(uuid.uuid4()) ) logging.info('new stack ID: {}'.format(stack.get('StackId', 'unknown'))) except Exception as x: if self._verbose: logging.error(x, exc_info=True) else: logging.error(x, exc_info=False) return False return True
826,625
List the existing stacks in the indicated region Args: None Returns: True if True Todo: Figure out what could go wrong and take steps to hanlde problems.
def list(self): self._initialize_list() interested = True response = self._cloudFormation.list_stacks() print('Stack(s):') while interested: if 'StackSummaries' in response: for stack in response['StackSummaries']: stack_status = stack['StackStatus'] if stack_status != 'DELETE_COMPLETE': print(' [{}] - {}'.format(stack['StackStatus'], stack['StackName'])) next_token = response.get('NextToken', None) if next_token: response = self._cloudFormation.list_stacks(NextToken=next_token) else: interested = False return True
826,630
Smash the given stack Args: None Returns: True if True Todo: Figure out what could go wrong and take steps to hanlde problems.
def smash(self): self._initialize_smash() try: stack_name = self._config.get('environment', {}).get('stack_name', None) response = self._cloudFormation.describe_stacks(StackName=stack_name) logging.debug('smash pre-flight returned: {}'.format( json.dumps(response, indent=4, default=json_util.default ))) except ClientError as wtf: logging.warning('your stack is in another castle [0].') return False except Exception as wtf: logging.error('failed to find intial status of smash candidate: {}'.format(wtf)) return False response = self._cloudFormation.delete_stack(StackName=stack_name) logging.info('delete started for stack: {}'.format(stack_name)) logging.debug('delete_stack returned: {}'.format(json.dumps(response, indent=4))) return self.poll_stack()
826,631
The utililty requires boto3 clients to Cloud Formation and S3. Here is where we make them. Args: None Returns: Good or Bad; True or False
def _init_boto3_clients(self): try: profile = self._config.get('environment', {}).get('profile') region = self._config.get('environment', {}).get('region') if profile: self._b3Sess = boto3.session.Session(profile_name=profile) else: self._b3Sess = boto3.session.Session() self._s3 = self._b3Sess.client('s3') self._cloudFormation = self._b3Sess.client('cloudformation', region_name=region) self._ssm = self._b3Sess.client('ssm', region_name=region) return True except Exception as wtf: logging.error('Exception caught in intialize_session(): {}'.format(wtf)) traceback.print_exc(file=sys.stdout) return False
826,632
Get parameters from Simple Systems Manager Args: p - a parameter name Returns: a value, decrypted if needed, if successful or None if things go sideways.
def _get_ssm_parameter(self, p): try: response = self._ssm.get_parameter(Name=p, WithDecryption=True) return response.get('Parameter', {}).get('Value', None) except Exception as ruh_roh: logging.error(ruh_roh, exc_info=False) return None
826,634
Fill in the _parameters dict from the properties file. Args: None Returns: True Todo: Figure out what could go wrong and at least acknowledge the the fact that Murphy was an optimist.
def _fill_parameters(self): self._parameters = self._config.get('parameters', {}) self._fill_defaults() for k in self._parameters.keys(): try: if self._parameters[k].startswith(self.SSM) and self._parameters[k].endswith(']'): parts = self._parameters[k].split(':') tmp = parts[1].replace(']', '') val = self._get_ssm_parameter(tmp) if val: self._parameters[k] = val else: logging.error('SSM parameter {} not found'.format(tmp)) return False elif self._parameters[k] == self.ASK: val = None a1 = '__x___' a2 = '__y___' prompt1 = "Enter value for '{}': ".format(k) prompt2 = "Confirm value for '{}': ".format(k) while a1 != a2: a1 = getpass.getpass(prompt=prompt1) a2 = getpass.getpass(prompt=prompt2) if a1 == a2: val = a1 else: print('values do not match, try again') self._parameters[k] = val except: pass return True
826,635
Fill in the _tags dict from the tags file. Args: None Returns: True Todo: Figure what could go wrong and at least acknowledge the the fact that Murphy was an optimist.
def _read_tags(self): tags = self._config.get('tags', {}) logging.info('Tags:') for tag_name in tags.keys(): tag = {} tag['Key'] = tag_name tag['Value'] = tags[tag_name] self._tags.append(tag) logging.info('{} = {}'.format(tag_name, tags[tag_name])) logging.debug(json.dumps( self._tags, indent=2, sort_keys=True )) return True
826,636
Determine if we are creating a new stack or updating and existing one. The update member is set as you would expect at the end of this query. Args: None Returns: True
def _set_update(self): try: self._updateStack = False stack_name = self._config.get('environment', {}).get('stack_name', None) response = self._cloudFormation.describe_stacks(StackName=stack_name) stack = response['Stacks'][0] if stack['StackStatus'] == 'ROLLBACK_COMPLETE': logging.info('stack is in ROLLBACK_COMPLETE status and should be deleted') del_stack_resp = self._cloudFormation.delete_stack(StackName=stack_name) logging.info('delete started for stack: {}'.format(stack_name)) logging.debug('delete_stack returned: {}'.format(json.dumps(del_stack_resp, indent=4))) stack_delete = self.poll_stack() if not stack_delete: return False if stack['StackStatus'] in ['CREATE_COMPLETE', 'UPDATE_COMPLETE', 'UPDATE_ROLLBACK_COMPLETE']: self._updateStack = True except: self._updateStack = False logging.info('update_stack: ' + str(self._updateStack)) return True
826,637
Cloud Formation likes to take the template from S3 so here we put the template into S3. We also store the parameters file that was used in this run. Note: you can pass anything as the version string but you should at least consider a version control tag or git commit hash as the version. Args: None Returns: True if the stuff lands in S3 or False if the file doesn't really exist or the upload goes sideways.
def _archive_elements(self): try: stackfile_key, propertyfile_key = self._craft_s3_keys() template_file = self._config.get('environment', {}).get('template', None) bucket = self._config.get('environment', {}).get('bucket', None) if not os.path.isfile(template_file): logging.info("{} is not actually a file".format(template_file)) return False logging.info('Copying parameters to s3://{}/{}'.format(bucket, propertyfile_key)) temp_file_name = '/tmp/{}'.format((str(uuid.uuid4()))[:8]) with open(temp_file_name, 'w') as dump_file: json.dump(self._parameters, dump_file, indent=4) self._s3.upload_file(temp_file_name, bucket, propertyfile_key) logging.info('Copying {} to s3://{}/{}'.format(template_file, bucket, stackfile_key)) self._s3.upload_file(template_file, bucket, stackfile_key) self._templateUrl = 'https://s3.amazonaws.com/{}/{}'.format(bucket, stackfile_key) logging.info("template_url: " + self._templateUrl) return True except Exception as x: logging.error('Exception caught in copy_stuff_to_S3(): {}'.format(x)) traceback.print_exc(file=sys.stdout) return False
826,638
We are putting stuff into S3, were supplied the bucket. Here we craft the key of the elements we are putting up there in the internet clouds. Args: None Returns: a tuple of teplate file key and property file key
def _craft_s3_keys(self): now = time.gmtime() stub = "templates/{stack_name}/{version}".format( stack_name=self._config.get('environment', {}).get('stack_name', None), version=self._config.get('codeVersion') ) stub = stub + "/" + str(now.tm_year) stub = stub + "/" + str('%02d' % now.tm_mon) stub = stub + "/" + str('%02d' % now.tm_mday) stub = stub + "/" + str('%02d' % now.tm_hour) stub = stub + ":" + str('%02d' % now.tm_min) stub = stub + ":" + str('%02d' % now.tm_sec) if self._yaml: template_key = stub + "/stack.yaml" else: template_key = stub + "/stack.json" property_key = stub + "/stack.properties" return template_key, property_key
826,639
Spin in a loop while the Cloud Formation process either fails or succeeds Args: None Returns: Good or bad; True or False
def poll_stack(self): logging.info('polling stack status, POLL_INTERVAL={}'.format(POLL_INTERVAL)) time.sleep(POLL_INTERVAL) completed_states = [ 'CREATE_COMPLETE', 'UPDATE_COMPLETE', 'DELETE_COMPLETE' ] stack_name = self._config.get('environment', {}).get('stack_name', None) while True: try: response = self._cloudFormation.describe_stacks(StackName=stack_name) stack = response['Stacks'][0] current_status = stack['StackStatus'] logging.info('current status of {}: {}'.format(stack_name, current_status)) if current_status.endswith('COMPLETE') or current_status.endswith('FAILED'): if current_status in completed_states: return True else: return False time.sleep(POLL_INTERVAL) except ClientError as wtf: if str(wtf).find('does not exist') == -1: logging.error('Exception caught in wait_for_stack(): {}'.format(wtf)) traceback.print_exc(file=sys.stdout) return False else: logging.info('{} is gone'.format(stack_name)) return True except Exception as wtf: logging.error('Exception caught in wait_for_stack(): {}'.format(wtf)) traceback.print_exc(file=sys.stdout) return False
826,640
Get IP geolocation. Args: ip (str): IP address to use if no data provided. hit_api (bool): whether to hit api if info not found. Returns: str: latitude and longitude, comma-separated.
def ip_geoloc(ip, hit_api=True): from ..logs.models import IPInfoCheck try: obj = IPInfoCheck.objects.get(ip_address=ip).ip_info except IPInfoCheck.DoesNotExist: if hit_api: try: obj = IPInfoCheck.check_ip(ip) except RateExceededError: return None else: return None return obj.latitude, obj.longitude
826,715
Get a link to google maps pointing on this IP's geolocation. Args: data (str/tuple): IP address or (latitude, longitude). Returns: str: a link to google maps pointing on this IP's geolocation.
def google_maps_geoloc_link(data): if isinstance(data, str): lat_lon = ip_geoloc(data) if lat_lon is None: return '' lat, lon = lat_lon else: lat, lon = data loc = '%s,%s' % (lat, lon) return 'https://www.google.com/maps/place/@%s,17z/' \ 'data=!3m1!4b1!4m5!3m4!1s0x0:0x0!8m2!3d%s!4d%s' % ( loc, lat, lon)
826,716
Get a link to open street map pointing on this IP's geolocation. Args: data (str/tuple): IP address or (latitude, longitude). Returns: str: a link to open street map pointing on this IP's geolocation.
def open_street_map_geoloc_link(data): if isinstance(data, str): lat_lon = ip_geoloc(data) if lat_lon is None: return '' lat, lon = lat_lon else: lat, lon = data return 'https://www.openstreetmap.org/search' \ '?query=%s%%2C%s#map=7/%s/%s' % (lat, lon, lat, lon)
826,717
Return the URL patterns for the logs views. Args: admin_view (callable): admin_view method from an AdminSite instance. Returns: list: the URL patterns for the logs views.
def logs_urlpatterns(admin_view=lambda x: x): return [ url(r'^$', admin_view(LogsMenu.as_view()), name='logs'), url(r'^status_codes$', admin_view(LogsStatusCodes.as_view()), name='logs_status_codes'), url(r'^status_codes_by_date$', admin_view(LogsStatusCodesByDate.as_view()), name='logs_status_codes_by_date'), url(r'^most_visited_pages$', admin_view(LogsMostVisitedPages.as_view()), name='logs_most_visited_pages') ]
826,752
Get information about an IP. Args: ip (str): an IP (xxx.xxx.xxx.xxx). Returns: dict: see http://ipinfo.io/developers/getting-started
def _get(self, ip): # Geoloc updated up to once a week: # http://ipinfo.io/developers/data#geolocation-data retries = 10 for retry in range(retries): try: response = requests.get('http://ipinfo.io/%s/json' % ip, verify=False, timeout=1) # nosec if response.status_code == 429: raise RateExceededError return response.json() except (requests.ReadTimeout, requests.ConnectTimeout): pass return {}
826,761
Check if URL is part of the current project's URLs. Args: url (str): URL to check. default (callable): used to filter out some URLs attached to function. Returns:
def url_is_project(url, default='not_a_func'): try: u = resolve(url) if u and u.func != default: return True except Resolver404: static_url = settings.STATIC_URL static_url_wd = static_url.lstrip('/') if url.startswith(static_url): url = url[len(static_url):] elif url.startswith(static_url_wd): url = url[len(static_url_wd):] else: return False if finders.find(url): return True return False
826,783
Function generator. Args: white_list (dict): dict with PREFIXES and CONSTANTS keys (list values). Returns: func: a function to check if a URL is...
def url_is(white_list): def func(url): prefixes = white_list.get('PREFIXES', ()) for prefix in prefixes: if url.startswith(prefix): return True constants = white_list.get('CONSTANTS', ()) for exact_url in constants: if url == exact_url: return True return False return func
826,784
Search the ORCID public API Specfically, return a dictionary with the personal details (name, etc.) of the person associated with the given ORCID Args: orcid (`str`): The ORCID to be searched Returns: `dict`: Dictionary with the JSON response from the API Raises: `~requests.HTTPError`: If the given ORCID cannot be found, an `~requests.HTTPError` is raised with status code 404
def search_orcid(orcid): url = 'https://pub.orcid.org/v2.1/{orcid}/person'.format(orcid=orcid) r = requests.get(url, headers=headers) if r.status_code != 200: r.raise_for_status() return r.json()
826,819
Yield one date per day from starting date to ending date. Args: start_date (date): starting date. end_date (date): ending date. Yields: date: a date for each day within the range.
def daterange(start_date, end_date): for n in range(int((end_date - start_date).days)): yield start_date + timedelta(n)
826,820
Convert a month name (MMM) to its number (01-12). Args: month (str): 3-letters string describing month. to_int (bool): cast number to int or not. Returns: str/int: the month's number (between 01 and 12).
def month_name_to_number(month, to_int=False): number = { 'Jan': '01', 'Feb': '02', 'Mar': '03', 'Apr': '04', 'May': '05', 'Jun': '06', 'Jul': '07', 'Aug': '08', 'Sep': '09', 'Oct': '10', 'Nov': '11', 'Dec': '12', }.get(month) return int(number) if to_int else number
826,821
Read and parse ReSpecTh XML file metadata (file author, version, etc.) Args: root (`~xml.etree.ElementTree.Element`): Root of ReSpecTh XML file Returns: properties (`dict`): Dictionary with file metadata
def get_file_metadata(root): properties = {} file_author = getattr(root.find('fileAuthor'), 'text', False) # Test for missing attribute or empty string in the same statement if not file_author: raise MissingElementError('fileAuthor') else: properties['file-authors'] = [{'name': file_author}] # Default version is 0 for the ChemKED file properties['file-version'] = 0 # Default ChemKED version properties['chemked-version'] = __version__ return properties
826,823
Read reference info from root of ReSpecTh XML file. Args: root (`~xml.etree.ElementTree.Element`): Root of ReSpecTh XML file Returns: properties (`dict`): Dictionary with reference information
def get_reference(root): reference = {} elem = root.find('bibliographyLink') if elem is None: raise MissingElementError('bibliographyLink') # Try to get reference info via DOI, fall back on preferredKey if necessary. ref_doi = elem.get('doi', None) ref_key = elem.get('preferredKey', None) if ref_doi is not None: try: ref = crossref_api.works(ids=ref_doi)['message'] except (HTTPError, habanero.RequestError, ConnectionError): if ref_key is None: raise KeywordError('DOI not found and preferredKey attribute not set') else: warn('Missing doi attribute in bibliographyLink or lookup failed. ' 'Setting "detail" key as a fallback; please update to the appropriate fields.' ) reference['detail'] = ref_key if reference['detail'][-1] != '.': reference['detail'] += '.' else: if ref_key is not None: warn('Using DOI to obtain reference information, rather than preferredKey.') reference['doi'] = elem.attrib['doi'] # Now get elements of the reference data # Assume that the reference returned by the DOI lookup always has a container-title reference['journal'] = ref.get('container-title')[0] ref_year = ref.get('published-print') or ref.get('published-online') reference['year'] = int(ref_year['date-parts'][0][0]) reference['volume'] = int(ref.get('volume')) reference['pages'] = ref.get('page') reference['authors'] = [] for author in ref['author']: auth = {} auth['name'] = ' '.join([author['given'], author['family']]) # Add ORCID if available orcid = author.get('ORCID') if orcid: auth['ORCID'] = orcid.lstrip('http://orcid.org/') reference['authors'].append(auth) elif ref_key is not None: warn('Missing doi attribute in bibliographyLink. ' 'Setting "detail" key as a fallback; please update to the appropriate fields.' ) reference['detail'] = ref_key if reference['detail'][-1] != '.': reference['detail'] += '.' else: # Need one of DOI or preferredKey raise MissingAttributeError('preferredKey', 'bibliographyLink') return reference
826,824
Read common properties from root of ReSpecTh XML file. Args: root (`~xml.etree.ElementTree.Element`): Root of ReSpecTh XML file Returns: properties (`dict`): Dictionary with experiment type and apparatus information.
def get_experiment_kind(root): properties = {} if root.find('experimentType').text == 'Ignition delay measurement': properties['experiment-type'] = 'ignition delay' else: raise NotImplementedError(root.find('experimentType').text + ' not (yet) supported') properties['apparatus'] = {'kind': '', 'institution': '', 'facility': ''} kind = getattr(root.find('apparatus/kind'), 'text', False) # Test for missing attribute or empty string if not kind: raise MissingElementError('apparatus/kind') elif kind in ['shock tube', 'rapid compression machine']: properties['apparatus']['kind'] = kind else: raise NotImplementedError(kind + ' experiment not (yet) supported') return properties
826,825
Read common properties from root of ReSpecTh XML file. Args: root (`~xml.etree.ElementTree.Element`): Root of ReSpecTh XML file Returns: properties (`dict`): Dictionary with common properties
def get_common_properties(root): properties = {} for elem in root.iterfind('commonProperties/property'): name = elem.attrib['name'] if name == 'initial composition': properties['composition'] = {'species': [], 'kind': None} for child in elem.iter('component'): spec = {} spec['species-name'] = child.find('speciesLink').attrib['preferredKey'] units = child.find('amount').attrib['units'] # use InChI for unique species identifier (if present) try: spec['InChI'] = child.find('speciesLink').attrib['InChI'] except KeyError: # TODO: add InChI validator/search warn('Missing InChI for species ' + spec['species-name']) pass # If mole or mass fraction, just set value if units in ['mole fraction', 'mass fraction', 'mole percent']: spec['amount'] = [float(child.find('amount').text)] elif units == 'percent': # assume this means mole percent warn('Assuming percent in composition means mole percent') spec['amount'] = [float(child.find('amount').text)] units = 'mole percent' elif units == 'ppm': # assume molar ppm, convert to mole fraction warn('Assuming molar ppm in composition and converting to mole fraction') spec['amount'] = [float(child.find('amount').text) * 1.e-6] units = 'mole fraction' elif units == 'ppb': # assume molar ppb, convert to mole fraction warn('Assuming molar ppb in composition and converting to mole fraction') spec['amount'] = [float(child.find('amount').text) * 1.e-9] units = 'mole fraction' else: raise KeywordError('Composition units need to be one of: mole fraction, ' 'mass fraction, mole percent, percent, ppm, or ppb.' ) properties['composition']['species'].append(spec) # check consistency of composition type if properties['composition']['kind'] is None: properties['composition']['kind'] = units elif properties['composition']['kind'] != units: raise KeywordError('composition units ' + units + ' not consistent with ' + properties['composition']['kind'] ) elif name in datagroup_properties: field = name.replace(' ', '-') units = elem.attrib['units'] if units == 'Torr': units = 'torr' quantity = 1.0 * unit_registry(units) try: quantity.to(property_units[field]) except pint.DimensionalityError: raise KeywordError('units incompatible for property ' + name) properties[field] = [' '.join([elem.find('value').text, units])] else: raise KeywordError('Property ' + name + ' not supported as common property') return properties
826,826
Gets ignition type and target. Args: root (`~xml.etree.ElementTree.Element`): Root of ReSpecTh XML file Returns: properties (`dict`): Dictionary with ignition type/target information
def get_ignition_type(root): properties = {} elem = root.find('ignitionType') if elem is None: raise MissingElementError('ignitionType') elem = elem.attrib if 'target' in elem: ign_target = elem['target'].rstrip(';').upper() else: raise MissingAttributeError('target', 'ignitionType') if 'type' in elem: ign_type = elem['type'] if ign_type == 'baseline max intercept from d/dt': ign_type = 'd/dt max extrapolated' else: raise MissingAttributeError('type', 'ignitionType') # ReSpecTh allows multiple ignition targets if len(ign_target.split(';')) > 1: raise NotImplementedError('Multiple ignition targets not supported.') # Acceptable ignition targets include pressure, temperature, and species # concentrations if ign_target == 'OHEX': ign_target = 'OH*' elif ign_target == 'CHEX': ign_target = 'CH*' elif ign_target == 'P': ign_target = 'pressure' elif ign_target == 'T': ign_target = 'temperature' if ign_target not in ['pressure', 'temperature', 'OH', 'OH*', 'CH*', 'CH']: raise KeywordError(ign_target + ' not valid ignition target') if ign_type not in ['max', 'd/dt max', '1/2 max', 'min', 'd/dt max extrapolated']: raise KeywordError(ign_type + ' not valid ignition type') properties['type'] = ign_type properties['target'] = ign_target return properties
826,827
Parse datapoints with ignition delay from file. Args: root (`~xml.etree.ElementTree.Element`): Root of ReSpecTh XML file Returns: properties (`dict`): Dictionary with ignition delay data
def get_datapoints(root): # Shock tube experiment will have one data group, while RCM may have one # or two (one for ignition delay, one for volume-history) dataGroups = root.findall('dataGroup') if not dataGroups: raise MissingElementError('dataGroup') # all situations will have main experimental data in first dataGroup dataGroup = dataGroups[0] property_id = {} unit_id = {} species_id = {} # get properties of dataGroup for prop in dataGroup.findall('property'): unit_id[prop.attrib['id']] = prop.attrib['units'] temp_prop = prop.attrib['name'] if temp_prop not in datagroup_properties + ['composition']: raise KeyError(temp_prop + ' not valid dataPoint property') property_id[prop.attrib['id']] = temp_prop if temp_prop == 'composition': spec = {'species-name': prop.find('speciesLink').attrib['preferredKey']} # use InChI for unique species identifier (if present) try: spec['InChI'] = prop.find('speciesLink').attrib['InChI'] except KeyError: # TODO: add InChI validator/search warn('Missing InChI for species ' + spec['species-name']) pass species_id[prop.attrib['id']] = spec if not property_id: raise MissingElementError('property') # now get data points datapoints = [] for dp in dataGroup.findall('dataPoint'): datapoint = {} if 'composition' in property_id.values(): datapoint['composition'] = {'species': [], 'kind': None} for val in dp: # handle "regular" properties differently than composition if property_id.get(val.tag) in datagroup_properties: units = unit_id[val.tag] if units == 'Torr': units = 'torr' datapoint[property_id[val.tag].replace(' ', '-')] = [val.text + ' ' + units] elif property_id.get(val.tag) == 'composition': spec = {} spec['species-name'] = species_id[val.tag]['species-name'] spec['InChI'] = species_id[val.tag].get('InChI') units = unit_id[val.tag] # If mole or mass fraction, just set value if units in ['mole fraction', 'mass fraction', 'mole percent']: spec['amount'] = [float(val.text)] elif units == 'percent': # assume this means mole percent warn('Assuming percent in composition means mole percent') spec['amount'] = [float(val.text)] units = 'mole percent' elif units == 'ppm': # assume molar ppm, convert to mole fraction warn('Assuming molar ppm in composition and converting to mole fraction') spec['amount'] = [float(val.text) * 1.e-6] units = 'mole fraction' elif units == 'ppb': # assume molar ppb, convert to mole fraction warn('Assuming molar ppb in composition and converting to mole fraction') spec['amount'] = [float(val.text) * 1.e-9] units = 'mole fraction' else: raise KeywordError('composition units need to be one of: mole fraction, ' 'mass fraction, mole percent, percent, ppm, or ppb.' ) # check consistency of composition type if datapoint['composition']['kind'] is None: datapoint['composition']['kind'] = units elif datapoint['composition']['kind'] != units: raise KeywordError( 'composition units ' + units + ' not consistent with ' + datapoint['composition']['kind'] ) datapoint['composition']['species'].append(spec) else: raise KeywordError('value missing from properties: ' + val.tag) datapoints.append(datapoint) if len(datapoints) == 0: raise MissingElementError('dataPoint') # ReSpecTh files can have other dataGroups with pressure, volume, or temperature histories if len(dataGroups) > 1: datapoints[0]['time-histories'] = [] for dataGroup in dataGroups[1:]: time_tag = None quant_tags = [] quant_dicts = [] quant_types = [] for prop in dataGroup.findall('property'): if prop.attrib['name'] == 'time': time_dict = {'units': prop.attrib['units'], 'column': 0} time_tag = prop.attrib['id'] elif prop.attrib['name'] in ['volume', 'temperature', 'pressure']: quant_types.append(prop.attrib['name']) quant_dicts.append({'units': prop.attrib['units'], 'column': 1}) quant_tags.append(prop.attrib['id']) else: raise KeywordError('Only volume, temperature, pressure, and time are allowed ' 'in a time-history dataGroup.') if time_tag is None or len(quant_tags) == 0: raise KeywordError('Both time and quantity properties required for time-history.') time_histories = [ {'time': time_dict, 'quantity': q, 'type': t, 'values': []} for (q, t) in zip(quant_dicts, quant_types) ] # collect volume-time history for dp in dataGroup.findall('dataPoint'): time = None quants = {} for val in dp: if val.tag == time_tag: time = float(val.text) elif val.tag in quant_tags: quant = float(val.text) tag_idx = quant_tags.index(val.tag) quant_type = quant_types[tag_idx] quants[quant_type] = quant else: raise KeywordError('Value tag {} not found in dataGroup tags: ' '{}'.format(val.tag, quant_tags)) if time is None or len(quants) == 0: raise KeywordError('Both time and quantity values required in each ' 'time-history dataPoint.') for t in time_histories: t['values'].append([time, quants[t['type']]]) datapoints[0]['time-histories'].extend(time_histories) return datapoints
826,828
Get or create an entry using obtained information from an IP. Args: ip (str): IP address xxx.xxx.xxx.xxx. Returns: ip_info: an instance of IPInfo.
def get_or_create_from_ip(ip): data = ip_api_handler.get(ip) if data and any(v for v in data.values()): if data.get('ip_address', None) is None or not data['ip_address']: data['ip_address'] = ip return IPInfo.objects.get_or_create(**data) return None, False
826,845
Update the IP info. Args: since_days (int): if checked less than this number of days ago, don't check again (default to 10 days). save (bool): whether to save anyway or not. force (bool): whether to update ip_info to last checked one. Returns: bool: check was run. IPInfo might not have been updated.
def update_ip_info(self, since_days=10, save=False, force=False): # If ip already checked try: last_check = IPInfoCheck.objects.get( ip_address=self.client_ip_address) # If checked less than since_days ago, don't check again since_last = datetime.date.today() - last_check.date if since_last <= datetime.timedelta(days=since_days): if not self.ip_info or ( self.ip_info != last_check.ip_info and force): self.ip_info = last_check.ip_info self.save() return True elif save: self.save() return False # Get or create ip_info object ip_info, created = IPInfo.get_or_create_from_ip( self.client_ip_address) # Update check time last_check.date = datetime.date.today() last_check.save() # Maybe data changed if created: last_check.ip_info = ip_info self.ip_info = ip_info self.save() return True elif save: self.save() return False except IPInfoCheck.DoesNotExist: # Else if ip never checked, check it and set ip_info self.ip_info = IPInfoCheck.check_ip(self.client_ip_address) self.save() return True
826,848
Validate the parsed YAML file for adherance to the ChemKED format. Arguments: properties (`dict`): Dictionary created from the parsed YAML file Raises: `ValueError`: If the YAML file cannot be validated, a `ValueError` is raised whose string contains the errors that are present.
def validate_yaml(self, properties): validator = OurValidator(schema) if not validator.validate(properties): for key, value in validator.errors.items(): if any(['unallowed value' in v for v in value]): print(('{key} has an illegal value. Allowed values are {values} and are case ' 'sensitive.').format(key=key, values=schema[key]['allowed'])) raise ValueError(validator.errors)
826,867
Convert ChemKED record to ReSpecTh XML file. This converter uses common information in a ChemKED file to generate a ReSpecTh XML file. Note that some information may be lost, as ChemKED stores some additional attributes. Arguments: filename (`str`): Filename for output ReSpecTh XML file. Example: >>> dataset = ChemKED(yaml_file) >>> dataset.convert_to_ReSpecTh(xml_file)
def convert_to_ReSpecTh(self, filename): root = etree.Element('experiment') file_author = etree.SubElement(root, 'fileAuthor') file_author.text = self.file_authors[0]['name'] # right now ChemKED just uses an integer file version file_version = etree.SubElement(root, 'fileVersion') major_version = etree.SubElement(file_version, 'major') major_version.text = str(self.file_version) minor_version = etree.SubElement(file_version, 'minor') minor_version.text = '0' respecth_version = etree.SubElement(root, 'ReSpecThVersion') major_version = etree.SubElement(respecth_version, 'major') major_version.text = '1' minor_version = etree.SubElement(respecth_version, 'minor') minor_version.text = '0' # Only ignition delay currently supported exp = etree.SubElement(root, 'experimentType') if self.experiment_type == 'ignition delay': exp.text = 'Ignition delay measurement' else: raise NotImplementedError('Only ignition delay type supported for conversion.') reference = etree.SubElement(root, 'bibliographyLink') citation = '' for author in self.reference.authors: citation += author['name'] + ', ' citation += (self.reference.journal + ' (' + str(self.reference.year) + ') ' + str(self.reference.volume) + ':' + self.reference.pages + '. ' + self.reference.detail ) reference.set('preferredKey', citation) reference.set('doi', self.reference.doi) apparatus = etree.SubElement(root, 'apparatus') kind = etree.SubElement(apparatus, 'kind') kind.text = self.apparatus.kind common_properties = etree.SubElement(root, 'commonProperties') # ChemKED objects have no common properties once loaded. Check for properties # among datapoints that tend to be common common = [] composition = self.datapoints[0].composition # Composition type *has* to be the same composition_type = self.datapoints[0].composition_type if not all(dp.composition_type == composition_type for dp in self.datapoints): raise NotImplementedError('Error: ReSpecTh does not support varying composition ' 'type among datapoints.' ) if all([composition == dp.composition for dp in self.datapoints]): # initial composition is common common.append('composition') prop = etree.SubElement(common_properties, 'property') prop.set('name', 'initial composition') for species_name, species in composition.items(): component = etree.SubElement(prop, 'component') species_link = etree.SubElement(component, 'speciesLink') species_link.set('preferredKey', species_name) if species.InChI is not None: species_link.set('InChI', species.InChI) amount = etree.SubElement(component, 'amount') amount.set('units', composition_type) amount.text = str(species.amount.magnitude) # If multiple datapoints present, then find any common properties. If only # one datapoint, then composition should be the only "common" property. if len(self.datapoints) > 1: for prop_name in datagroup_properties: attribute = prop_name.replace(' ', '_') quantities = [getattr(dp, attribute, False) for dp in self.datapoints] # All quantities must have the property in question and all the # values must be equal if all(quantities) and quantities.count(quantities[0]) == len(quantities): common.append(prop_name) prop = etree.SubElement(common_properties, 'property') prop.set('description', '') prop.set('name', prop_name) prop.set('units', str(quantities[0].units)) value = etree.SubElement(prop, 'value') value.text = str(quantities[0].magnitude) # Ignition delay can't be common, unless only a single datapoint. datagroup = etree.SubElement(root, 'dataGroup') datagroup.set('id', 'dg1') datagroup_link = etree.SubElement(datagroup, 'dataGroupLink') datagroup_link.set('dataGroupID', '') datagroup_link.set('dataPointID', '') property_idx = {} labels = {'temperature': 'T', 'pressure': 'P', 'ignition delay': 'tau', 'pressure rise': 'dP/dt', } for prop_name in datagroup_properties: attribute = prop_name.replace(' ', '_') # This can't be hasattr because properties are set to the value None # if no value is specified in the file, so the attribute always exists prop_indices = [i for i, dp in enumerate(self.datapoints) if getattr(dp, attribute) is not None ] if prop_name in common or not prop_indices: continue prop = etree.SubElement(datagroup, 'property') prop.set('description', '') prop.set('name', prop_name) units = str(getattr(self.datapoints[prop_indices[0]], attribute).units) prop.set('units', units) idx = 'x{}'.format(len(property_idx) + 1) property_idx[idx] = {'name': prop_name, 'units': units} prop.set('id', idx) prop.set('label', labels[prop_name]) # Need to handle datapoints with possibly different species in the initial composition if 'composition' not in common: for dp in self.datapoints: for species in dp.composition.values(): # Only add new property for species not already considered has_spec = any([species.species_name in d.values() for d in property_idx.values() ]) if not has_spec: prop = etree.SubElement(datagroup, 'property') prop.set('description', '') idx = 'x{}'.format(len(property_idx) + 1) property_idx[idx] = {'name': species.species_name} prop.set('id', idx) prop.set('label', '[' + species.species_name + ']') prop.set('name', 'composition') prop.set('units', self.datapoints[0].composition_type) species_link = etree.SubElement(prop, 'speciesLink') species_link.set('preferredKey', species.species_name) if species.InChI is not None: species_link.set('InChI', species.InChI) for dp in self.datapoints: datapoint = etree.SubElement(datagroup, 'dataPoint') for idx, val in property_idx.items(): # handle regular properties a bit differently than composition if val['name'] in datagroup_properties: value = etree.SubElement(datapoint, idx) quantity = getattr(dp, val['name'].replace(' ', '_')).to(val['units']) value.text = str(quantity.magnitude) else: # composition for item in dp.composition.values(): if item.species_name == val['name']: value = etree.SubElement(datapoint, idx) value.text = str(item.amount.magnitude) # See https://stackoverflow.com/a/16097112 for the None.__ne__ history_types = ['volume_history', 'temperature_history', 'pressure_history', 'piston_position_history', 'light_emission_history', 'OH_emission_history', 'absorption_history'] time_histories = [getattr(dp, p) for dp in self.datapoints for p in history_types] time_histories = list(filter(None.__ne__, time_histories)) if len(self.datapoints) > 1 and len(time_histories) > 1: raise NotImplementedError('Error: ReSpecTh files do not support multiple datapoints ' 'with a time history.') elif len(time_histories) > 0: for dg_idx, hist in enumerate(time_histories): if hist.type not in ['volume', 'temperature', 'pressure']: warn('The time-history type {} is not supported by ReSpecTh for ' 'ignition delay experiments'.format(hist.type)) continue datagroup = etree.SubElement(root, 'dataGroup') datagroup.set('id', 'dg{}'.format(dg_idx)) datagroup_link = etree.SubElement(datagroup, 'dataGroupLink') datagroup_link.set('dataGroupID', '') datagroup_link.set('dataPointID', '') # Time history has two properties: time and quantity. prop = etree.SubElement(datagroup, 'property') prop.set('description', '') prop.set('name', 'time') prop.set('units', str(hist.time.units)) time_idx = 'x{}'.format(len(property_idx) + 1) property_idx[time_idx] = {'name': 'time'} prop.set('id', time_idx) prop.set('label', 't') prop = etree.SubElement(datagroup, 'property') prop.set('description', '') prop.set('name', hist.type) prop.set('units', str(hist.quantity.units)) quant_idx = 'x{}'.format(len(property_idx) + 1) property_idx[quant_idx] = {'name': hist.type} prop.set('id', quant_idx) prop.set('label', 'V') for time, quantity in zip(hist.time, hist.quantity): datapoint = etree.SubElement(datagroup, 'dataPoint') value = etree.SubElement(datapoint, time_idx) value.text = str(time.magnitude) value = etree.SubElement(datapoint, quant_idx) value.text = str(quantity.magnitude) ign_types = [getattr(dp, 'ignition_type', False) for dp in self.datapoints] # All datapoints must have the same ignition target and type if all(ign_types) and ign_types.count(ign_types[0]) == len(ign_types): # In ReSpecTh files all datapoints must share ignition type ignition = etree.SubElement(root, 'ignitionType') if ign_types[0]['target'] in ['pressure', 'temperature']: ignition.set('target', ign_types[0]['target'][0].upper()) else: # options left are species ignition.set('target', self.datapoints[0].ignition_type['target']) if ign_types[0]['type'] == 'd/dt max extrapolated': ignition.set('type', 'baseline max intercept from d/dt') else: ignition.set('type', self.datapoints[0].ignition_type['type']) else: raise NotImplementedError('Different ignition targets or types for multiple datapoints ' 'are not supported in ReSpecTh.') et = etree.ElementTree(root) et.write(filename, encoding='utf-8', xml_declaration=True) # now do a "pretty" rewrite xml = minidom.parse(filename) xml_string = xml.toprettyxml(indent=' ') with open(filename, 'w') as f: f.write(xml_string) print('Converted to ' + filename)
826,870
Return a list where the duplicates have been removed. Args: l (list): the list to filter. Returns: list: the same list without duplicates.
def distinct(l): seen = set() seen_add = seen.add return (_ for _ in l if not (_ in seen or seen_add(_)))
826,967
Initialize the class Arguments: device: string containing the serial device allocated to SCSGate logger: instance of logging
def __init__(self, device, logger): self._serial = pyserial.Serial(device, 115200) logger.info("Clearing buffers") self._serial.write(b"@b") ret = self._serial.read(1) if ret != b"k": raise RuntimeError("Error while clearing buffers") # ensure pending operations are terminated (eg: @r, @l) self._serial.write(b"@c") ret = self._serial.read() if ret != b"k": raise RuntimeError("Error while cancelling pending operations") logger.info("Enabling ASCII mode") self._serial.write(b"@MA") ret = self._serial.read(1) if ret != b"k": raise RuntimeError("Error while enabling ASCII mode") logger.info("Filter Ack messages") self._serial.write(b"@F2") ret = self._serial.read(1) if ret != b"k": raise RuntimeError("Error while setting filter")
827,125
Load values into the class's ConfigProperty attributes (validating types if possible) Args: loaders: iterable of AbstractLoader instances ConfigProperty values are loaded from these sources; and the order indicates preference.
def __init__(self, loaders): if not loaders: # Require loaders only if the class has ConfigProperty attributes if any(self._iter_config_props()): raise AssertionError('Class has ConfigProperty attributes: must provide loader(s)') self._update_property_keys() self.varz = {} self._loaders = loaders self._load()
827,129
Return an absolute path to a target file that is located in the same directory as as_file Args: as_file: File name (including __file__) Use the directory path of this file target_file: Name of the target file
def in_same_dir(as_file, target_file): return os.path.abspath(os.path.join(os.path.dirname(as_file), target_file))
827,139
Checks that the temperature ranges given for thermo data are valid Args: isvalid_t_range (`bool`): flag from schema indicating T range is to be checked field (`str`): T_range values (`list`): List of temperature values indicating low, middle, and high ranges The rule's arguments are validated against this schema: {'isvalid_t_range': {'type': 'bool'}, 'field': {'type': 'str'}, 'value': {'type': 'list'}}
def _validate_isvalid_t_range(self, isvalid_t_range, field, values): if all([isinstance(v, (float, int)) for v in values]): # If no units given, assume Kelvin T_low = Q_(values[0], 'K') T_mid = Q_(values[1], 'K') T_hi = Q_(values[2], 'K') elif all([isinstance(v, str) for v in values]): T_low = Q_(values[0]) T_mid = Q_(values[1]) T_hi = Q_(values[2]) else: self._error(field, 'The temperatures in the range must all be either with units or ' 'without units, they cannot be mixed') return False if min([T_low, T_mid, T_hi]) != T_low: self._error(field, 'The first element of the T_range must be the lower limit') if max([T_low, T_mid, T_hi]) != T_hi: self._error(field, 'The last element of the T_range must be the upper limit')
827,151
Checks for appropriate units using Pint unit registry. Args: isvalid_unit (`bool`): flag from schema indicating units to be checked. field (`str`): property associated with units in question. value (`dict`): dictionary of values from file associated with this property. The rule's arguments are validated against this schema: {'isvalid_unit': {'type': 'bool'}, 'field': {'type': 'str'}, 'value': {'type': 'dict'}}
def _validate_isvalid_unit(self, isvalid_unit, field, value): quantity = 1.0 * units(value['units']) try: quantity.to(property_units[field]) except pint.DimensionalityError: self._error(field, 'incompatible units; should be consistent ' 'with ' + property_units[field] )
827,152
Checks valid reference metadata using DOI (if present). Args: isvalid_reference (`bool`): flag from schema indicating reference to be checked. field (`str`): 'reference' value (`dict`): dictionary of reference metadata. The rule's arguments are validated against this schema: {'isvalid_reference': {'type': 'bool'}, 'field': {'type': 'str'}, 'value': {'type': 'dict'}}
def _validate_isvalid_reference(self, isvalid_reference, field, value): if 'doi' in value: try: ref = crossref_api.works(ids=value['doi'])['message'] except (HTTPError, habanero.RequestError): self._error(field, 'DOI not found') return except ConnectionError: warn('network not available, DOI not validated.') return # Assume that the reference returned by the DOI lookup always has a container-title ref_container = ref.get('container-title')[0] # TODO: Add other container types: value.get('journal') or value.get('report') or ... # note that there's a type field in the ref that is journal-article, proceedings-article container = value.get('journal') if container is None or container != ref_container: self._error(field, 'journal should be {}'.format(ref_container)) # Assume that the reference returned by DOI lookup always has a year ref_year = ref.get('published-print') or ref.get('published-online') ref_year = ref_year['date-parts'][0][0] year = value.get('year') if year is None or year != ref_year: self._error(field, 'year should be {}'.format(ref_year)) # Volume number might not be in the reference ref_volume = ref.get('volume') volume = value.get('volume') if ref_volume is None: if volume is not None: self._error(field, 'Volume was specified in the YAML but is not present in the ' 'DOI reference.') else: if volume is None or int(volume) != int(ref_volume): self._error(field, 'volume should be {}'.format(ref_volume)) # Pages might not be in the reference ref_pages = ref.get('page') pages = value.get('pages') if ref_pages is None: if pages is not None: self._error(field, 'Pages were specified in the YAML but are not present in ' 'the DOI reference.') else: if pages is None or pages != ref_pages: self._error(field, 'pages should be {}'.format(ref_pages)) # check that all authors present authors = value['authors'][:] author_names = [a['name'] for a in authors] for author in ref['author']: # find using family name author_match = next( (a for a in authors if compare_name(author['given'], author['family'], a['name']) ), None ) # error if missing author in given reference information if author_match is None: self._error(field, 'Missing author: ' + ' '.join([author['given'], author['family']]) ) else: author_names.remove(author_match['name']) # validate ORCID if given orcid = author.get('ORCID') if orcid: # Crossref may give ORCID as http://orcid.org/####-####-####-#### # so need to strip the leading URL orcid = orcid[orcid.rfind('/') + 1:] if 'ORCID' in author_match: if author_match['ORCID'] != orcid: self._error( field, author_match['name'] + ' ORCID does ' + 'not match that in reference. Reference: ' + orcid + '. Given: ' + author_match['ORCID'] ) else: # ORCID not given, suggest adding it warn('ORCID ' + orcid + ' missing for ' + author_match['name']) # check for extra names given if len(author_names) > 0: self._error(field, 'Extra author(s) given: ' + ', '.join(author_names) )
827,156
Checks for valid ORCID if given. Args: isvalid_orcid (`bool`): flag from schema indicating ORCID to be checked. field (`str`): 'author' value (`dict`): dictionary of author metadata. The rule's arguments are validated against this schema: {'isvalid_orcid': {'type': 'bool'}, 'field': {'type': 'str'}, 'value': {'type': 'dict'}}
def _validate_isvalid_orcid(self, isvalid_orcid, field, value): if isvalid_orcid and 'ORCID' in value: try: res = search_orcid(value['ORCID']) except ConnectionError: warn('network not available, ORCID not validated.') return except HTTPError: self._error(field, 'ORCID incorrect or invalid for ' + value['name'] ) return family_name = res['name']['family-name']['value'] given_name = res['name']['given-names']['value'] if not compare_name(given_name, family_name, value['name']): self._error(field, 'Name and ORCID do not match. Name supplied: ' + value['name'] + '. Name associated with ORCID: ' + ' '.join([given_name, family_name]) )
827,157
Checks for valid specification of composition. Args: isvalid_composition (bool): flag from schema indicating composition to be checked. field (str): 'composition' value (dict): dictionary of composition The rule's arguments are validated against this schema: {'isvalid_composition': {'type': 'bool'}, 'field': {'type': 'str'}, 'value': {'type': 'dict'}}
def _validate_isvalid_composition(self, isvalid_composition, field, value): sum_amount = 0.0 if value['kind'] in ['mass fraction', 'mole fraction']: low_lim = 0.0 up_lim = 1.0 total_amount = 1.0 elif value['kind'] in ['mole percent']: low_lim = 0.0 up_lim = 100.0 total_amount = 100.0 else: self._error(field, 'composition kind must be "mole percent", "mass fraction", or ' '"mole fraction"') return False for sp in value['species']: amount = sp['amount'][0] sum_amount += amount # Check that amount within bounds, based on kind specified if amount < low_lim: self._error(field, 'Species ' + sp['species-name'] + ' ' + value['kind'] + ' must be greater than {:.1f}'.format(low_lim) ) elif amount > up_lim: self._error(field, 'Species ' + sp['species-name'] + ' ' + value['kind'] + ' must be less than {:.1f}'.format(up_lim) ) # Make sure mole/mass fraction sum to 1 if not np.isclose(total_amount, sum_amount): self._error(field, 'Species ' + value['kind'] + 's do not sum to {:.1f}: '.format(total_amount) + '{:f}'.format(sum_amount) )
827,158
Init method. Args: file_path_regex (regex): the regex to find the log files. log_format_regex (regex): the regex to parse the log files. top_dir (str): the path to the root directory containing the logs.
def __init__(self, file_path_regex=None, log_format_regex=None, top_dir=None): if file_path_regex is not None: self.file_path_regex = file_path_regex if log_format_regex is not None: self.log_format_regex = log_format_regex if top_dir is not None: self.top_dir = top_dir self._content = None
827,205
Get stats for most visited pages. Args: logs (list): logs data to use. Returns: dict: more_than_10 and less_than_10: list of dict (bound + url list).
def most_visited_pages_stats(): stats = {'more_than_10': [], 'less_than_10': {}} counter = Counter(list(RequestLog.objects.values_list('url', flat=True))) most_visited_pages = counter.most_common() bounds = (10000, 1000, 100, 10) subsets = [[] for _ in bounds] for u, c in most_visited_pages: if url_is_ignored(u): continue if c >= bounds[0]: subsets[0].append([u, c]) elif c < bounds[-1]: subsets[-1].append([u, c]) else: for i, bound in enumerate(bounds[:-1]): if bound > c >= bounds[i+1]: subsets[i+1].append([u, c]) break stats['more_than_10'] = [ {'bound': bound, 'subset': subset} for bound, subset in zip(bounds[:-1], subsets[:-1])] for subset in subsets[:-1]: for uc in subset: if url_is_project(uc[0]): if url_is_asset(uc[0]): uc.append(ASSET) else: uc.append(PROJECT) else: if url_is_asset(uc[0]): uc.append(OLD_ASSET) elif url_is_common_asset(uc[0]): uc.append(COMMON_ASSET) elif url_is_old_project(uc[0]): uc.append(OLD_PROJECT) elif url_is_false_negative(uc[0]): uc.append(FALSE_NEGATIVE) else: uc.append(SUSPICIOUS) occurrences = {name: {'distinct': 0, 'total': 0} for name in set(URL_TYPE.keys()) - {IGNORED}} for u, c in subsets[-1]: if url_is_project(u): if url_is_asset(u): occurrences[ASSET]['distinct'] += 1 occurrences[ASSET]['total'] += c else: occurrences[PROJECT]['distinct'] += 1 occurrences[PROJECT]['total'] += c else: if url_is_asset(u): occurrences[OLD_ASSET]['distinct'] += 1 occurrences[OLD_ASSET]['total'] += c elif url_is_common_asset(u): occurrences[COMMON_ASSET]['distinct'] += 1 occurrences[COMMON_ASSET]['total'] += c elif url_is_old_project(u): occurrences[OLD_PROJECT]['distinct'] += 1 occurrences[OLD_PROJECT]['total'] += c elif url_is_false_negative(u): occurrences[FALSE_NEGATIVE]['distinct'] += 1 occurrences[FALSE_NEGATIVE]['total'] += c else: occurrences[SUSPICIOUS]['distinct'] += 1 occurrences[SUSPICIOUS]['total'] += c stats['less_than_10'] = occurrences return stats
827,255
Validation type for external resources Attempts to connect to the resource, backing off on failure. Args: max_tries: Max number of times to attempt a connection before failing max_wait: Max number of seconds to wait between connection attempts. This can be used to cap the exponential backoff.
def __init__(self, max_tries=5, max_wait=10, *args, **kwargs): self._max_tries = max_tries if self._max_tries < 1: raise TypeError('max_tries must be a positive integer') self._max_wait = max_wait if self._max_wait < 1: raise TypeError('max_wait must be >= 1') super(NetworkType, self).__init__(*args, **kwargs)
827,281
Attempt to connect to http Args: url: string in the form "http://[host]"
def _test_connection(url): import requests try: # Don't care about status code here as long as the connection was successful requests.head(url) except requests.exceptions.ConnectionError as e: raise ValidationError(e)
827,283
Attempt to connect to redis Args: url: string in the form "redis://[:password@]host[:port][/db-number][?option=value]"
def _test_connection(url): import redis try: with _disconnecting(redis.StrictRedis.from_url(url)) as conn: conn.ping() except redis.connection.ConnectionError as e: raise ValidationError(e)
827,284
Attempt to connect to postgres Args: url: string in the form "postgres://[user]:[password]@[host][:port][/database]"
def _test_connection(url): import psycopg2 try: with closing(psycopg2.connect(dsn=url)) as conn: conn.cursor() except psycopg2.OperationalError as e: raise ValidationError(e)
827,285
Attempt to connect to amqp Args: url: string in the form "amqp://[user]:[password]@[host]"
def _test_connection(url): import pika try: with closing(pika.BlockingConnection(pika.URLParameters(url))) as conn: conn.channel() except pika.exceptions.ConnectionClosed as e: raise ValidationError(e)
827,286
Attempt to connect to etcd Args: url: string in the form "[host]:[port]"
def _test_connection(url): import etcd host, port = url.split(':') try: etcd.Client(host=host, port=int(port)).get('/') except etcd.EtcdConnectionFailed as e: raise ValidationError(e)
827,287
Initializer. Args: source_dict: used to initialize the class. Use constructors to read from Vault. url: Vault url path: Vault path where secrets are stored vault_token: token (must have access to vault path)
def __init__(self, source_dict, url, path, token): self._vault_url = url self._path = path self._token = token super(VaultLoader, self).__init__(source_dict)
827,324
Constructor: use token authentication to read secrets from a Vault path See https://www.vaultproject.io/docs/auth/token.html Args: url: Vault url path: Vault path where secrets are stored vault_token: token (must have access to vault path)
def from_token(cls, url, path, token): source_dict = cls._fetch_secrets(url, path, token) return cls(source_dict, url, path, token)
827,325
Constructor: use AppRole authentication to read secrets from a Vault path See https://www.vaultproject.io/docs/auth/approle.html Args: url: Vault url path: Vault path where secrets are stored role_id: Vault RoleID secret_id: Vault SecretID
def from_app_role(cls, url, path, role_id, secret_id): token = cls._fetch_app_role_token(url, role_id, secret_id) source_dict = cls._fetch_secrets(url, path, token) return cls(source_dict, url, path, token)
827,326
Checks if a string represents a valid quantities unit. Args: w (str): A string to be tested against the set of valid quantities units. Returns: True if the string can be used as a unit in the quantities module.
def isValidUnit(self, w): bad = set(['point', 'a']) if w in bad: return False try: pq.Quantity(0.0, w) return True except: return w == '/'
827,397
Collects all the valid units from an inp string. Works by appending consecutive words from the string and cross-referncing them with a set of valid units. Args: inp (str): Some text which hopefully contains descriptions of different units. Returns: A list of strings, each entry in which is a valid quantities unit.
def extractUnits(self, inp): inp = self._preprocess(inp) units = [] description = "" for w in inp.split(' '): if self.isValidUnit(w) or w == '/': if description: description += " " description += w else: if description: units.append(description) description = "" if description: units.append(description) return units
827,398
Converts a string representation of some quantity of units into a quantities object. Args: inp (str): A textual representation of some quantity of units, e.g., "fifty kilograms". Returns: A quantities object representing the described quantity and its units.
def convert(self, inp): inp = self._preprocess(inp) n = NumberService().longestNumber(inp) units = self.extractUnits(inp) # Convert to quantity object, attempt conversion quantity = pq.Quantity(float(n), units[0]) quantity.units = units[1] return quantity
827,399
Solves the equation specified by the input string. Args: inp (str): An equation, specified in words, containing some combination of numbers, binary, and unary operations. Returns: The floating-point result of carrying out the computation.
def parseEquation(self, inp): inp = MathService._preprocess(inp) split = inp.split(' ') # Recursive call on unary operators for i, w in enumerate(split): if w in self.__unaryOperators__: op = self.__unaryOperators__[w] # Split equation into halves eq1 = ' '.join(split[:i]) eq2 = ' '.join(split[i + 1:]) # Calculate second half result = MathService._applyUnary(self.parseEquation(eq2), op) return self.parseEquation(eq1 + " " + str(result)) def extractNumbersAndSymbols(inp): numbers = [] symbols = [] # Divide into values (numbers), operators (symbols) next_number = "" for w in inp.split(' '): if w in self.__binaryOperators__: symbols.append(self.__binaryOperators__[w]) if next_number: numbers.append(next_number) next_number = "" else: if next_number: next_number += " " next_number += w if next_number: numbers.append(next_number) # Cast numbers from words to integers def convert(n): if n in self.__constants__: return self.__constants__[n] converter = NumberService() return converter.parse(n) numbers = [convert(n) for n in numbers] return numbers, symbols numbers, symbols = extractNumbersAndSymbols(inp) return MathService._calculate(numbers, symbols)
827,881
A general method for parsing word-representations of numbers. Supports floats and integers. Args: words (str): Description of an arbitrary number. Returns: A double representation of the words.
def parse(self, words): def exact(words): try: return float(words) except: return None guess = exact(words) if guess is not None: return guess split = words.split(' ') # Replace final ordinal/fraction with number if split[-1] in self.__fractions__: split[-1] = self.__fractions__[split[-1]] elif split[-1] in self.__ordinals__: split[-1] = self.__ordinals__[split[-1]] parsed_ordinals = ' '.join(split) return self.parseFloat(parsed_ordinals)
828,169
Convert a floating-point number described in words to a double. Supports two kinds of descriptions: those with a 'point' (e.g., "one point two five") and those with a fraction (e.g., "one and a quarter"). Args: words (str): Description of the floating-point number. Returns: A double representation of the words.
def parseFloat(self, words): def pointFloat(words): m = re.search(r'(.*) point (.*)', words) if m: whole = m.group(1) frac = m.group(2) total = 0.0 coeff = 0.10 for digit in frac.split(' '): total += coeff * self.parse(digit) coeff /= 10.0 return self.parseInt(whole) + total return None def fractionFloat(words): m = re.search(r'(.*) and (.*)', words) if m: whole = self.parseInt(m.group(1)) frac = m.group(2) # Replace plurals frac = re.sub(r'(\w+)s(\b)', '\g<1>\g<2>', frac) # Convert 'a' to 'one' (e.g., 'a third' to 'one third') frac = re.sub(r'(\b)a(\b)', '\g<1>one\g<2>', frac) split = frac.split(' ') # Split fraction into num (regular integer), denom (ordinal) num = split[:1] denom = split[1:] while denom: try: # Test for valid num, denom num_value = self.parse(' '.join(num)) denom_value = self.parse(' '.join(denom)) return whole + float(num_value) / denom_value except: # Add another word to num num += denom[:1] denom = denom[1:] return None # Extract "one point two five"-type float result = pointFloat(words) if result: return result # Extract "one and a quarter"-type float result = fractionFloat(words) if result: return result # Parse as integer return self.parseInt(words)
828,170
Parses words to the integer they describe. Args: words (str): Description of the integer. Returns: An integer representation of the words.
def parseInt(self, words): # Remove 'and', case-sensitivity words = words.replace(" and ", " ").lower() # 'a' -> 'one' words = re.sub(r'(\b)a(\b)', '\g<1>one\g<2>', words) def textToNumber(s): a = re.split(r"[\s-]+", s) n = 0 g = 0 for w in a: x = NumberService.__small__.get(w, None) if x is not None: g += x elif w == "hundred": g *= 100 else: x = NumberService.__magnitude__.get(w, None) if x is not None: n += g * x g = 0 else: raise NumberService.NumberException( "Unknown number: " + w) return n + g return textToNumber(words)
828,171
Parses a number m into a human-ready string representation. For example, crops off floats if they're too accurate. Arguments: m (float): Floating-point number to be cleaned. Returns: Human-ready string description of the number.
def parseMagnitude(m): m = NumberService().parse(m) def toDecimalPrecision(n, k): return float("%.*f" % (k, round(n, k))) # Cast to two digits of precision digits = 2 magnitude = toDecimalPrecision(m, digits) # If value is really small, keep going while not magnitude: digits += 1 magnitude = toDecimalPrecision(m, digits) # If item is less than one, go one beyond 'necessary' number of digits if m < 1.0: magnitude = toDecimalPrecision(m, digits + 1) # Ignore decimal accuracy if irrelevant if int(magnitude) == magnitude: magnitude = int(magnitude) # Adjust for scientific notation magString = str(magnitude) magString = re.sub(r'(\d)e-(\d+)', '\g<1> times ten to the negative \g<2>', magString) magString = re.sub(r'(\d)e\+(\d+)', '\g<1> times ten to the \g<2>', magString) magString = re.sub(r'-(\d+)', 'negative \g<1>', magString) magString = re.sub(r'\b0(\d+)', '\g<1>', magString) return magString
828,172
Extracts the longest valid numerical description from a string. Not guaranteed to return a result even if some valid numerical description exists (i.e., method is not particularly advanced). Args: inp (str): An arbitrary string, hopefully containing a number. Returns: The number with the longest string description in input, or None if not found.
def longestNumber(self, inp): split = inp.split(' ') # Assume just a single number numStart = None numEnd = None for i, w in enumerate(split): if self.isValid(w): if numStart is None: numStart = i numEnd = i else: # Check for ordinal, which would signify end w = re.sub(r'(\w+)s(\b)', '\g<1>\g<2>', w) if w in self.__ordinals__: if self.isValid(' '.join(split[numStart:i + 1])): numEnd = i break description = ' '.join(split[numStart:numEnd + 1]) return self.parse(description)
828,173
Extracts all day-related information from an input string. Ignores any information related to the specific time-of-day. Args: inp (str): Input string to be parsed. Returns: A list of datetime objects containing the extracted date from the input snippet, or an empty list if none found.
def extractDays(self, inp): inp = self._preprocess(inp) def extractDayOfWeek(dayMatch): if dayMatch.group(5) in self.__daysOfWeek__: return self.__daysOfWeek__.index(dayMatch.group(5)) elif dayMatch.group(6) in self.__daysOfWeek__: return self.__daysOfWeek__.index(dayMatch.group(6)) def extractMonth(dayMatch): if dayMatch.group(7) in self.__months__: return self.__months__.index(dayMatch.group(7)) + 1 elif dayMatch.group(7) in self.__shortMonths__: return self.__shortMonths__.index(dayMatch.group(7)) + 1 def extractDay(dayMatch): combined = dayMatch.group(8) + dayMatch.group(9) if combined in self.__dateDescriptors__: return self.__dateDescriptors__[combined] elif dayMatch.group(8) in self.__dateDescriptors__: return self.__dateDescriptors__[dayMatch.group(8)] elif int(dayMatch.group(8)) in self.__dateDescriptors__.values(): return int(dayMatch.group(8)) def extractDaysFrom(dayMatch): if not dayMatch.group(1): return 0 def numericalPrefix(dayMatch): # Grab 'three' of 'three weeks from' prefix = inp.split(dayMatch.group(1))[0].strip().split(' ') prefix.reverse() prefix = list(filter(lambda s: s != 'and', prefix)) # Generate best guess number service = NumberService() num = prefix[0] if service.isValid(num): for n in prefix[1:]: inc = n + " " + num if service.isValid(inc): num = inc else: break return service.parse(num) return 1 factor = numericalPrefix(dayMatch) if dayMatch.group(2) == 'week': return factor * 7 elif dayMatch.group(2) == 'day': return factor * 1 def handleMatch(dayMatch): def safe(exp): try: return exp() except: return False days_from = safe(lambda: extractDaysFrom(dayMatch)) today = safe(lambda: dayMatch.group(3) in self.__todayMatches__) tomorrow = safe(lambda: dayMatch.group(3) in self.__tomorrowMatches__) next_week = safe(lambda: dayMatch.group(4) == 'next') day_of_week = safe(lambda: extractDayOfWeek(dayMatch)) month = safe(lambda: extractMonth(dayMatch)) day = safe(lambda: extractDay(dayMatch)) # Convert extracted terms to datetime object if not dayMatch: return None elif today: d = self.now elif tomorrow: d = self.now + datetime.timedelta(days=1) elif type(day_of_week) == int: current_day_of_week = self.now.weekday() num_days_away = (day_of_week - current_day_of_week) % 7 if next_week: num_days_away += 7 d = self.now + \ datetime.timedelta(days=num_days_away) elif month and day: d = datetime.datetime( self.now.year, month, day, self.now.hour, self.now.minute) if days_from: d += datetime.timedelta(days=days_from) return d matches = self._dayRegex.finditer(inp) return [handleMatch(dayMatch) for dayMatch in matches]
828,521
Extracts time-related information from an input string. Ignores any information related to the specific date, focusing on the time-of-day. Args: inp (str): Input string to be parsed. Returns: A list of datetime objects containing the extracted times from the input snippet, or an empty list if none found.
def extractTimes(self, inp): def handleMatch(time): relative = False if not time: return None # Default times: 8am, 12pm, 7pm elif time.group(1) == 'morning': h = 8 m = 0 elif time.group(1) == 'afternoon': h = 12 m = 0 elif time.group(1) == 'evening': h = 19 m = 0 elif time.group(4) and time.group(5): h, m = 0, 0 # Extract hours difference converter = NumberService() try: diff = converter.parse(time.group(4)) except: return None if time.group(5) == 'hours': h += diff else: m += diff # Extract minutes difference if time.group(6): converter = NumberService() try: diff = converter.parse(time.group(7)) except: return None if time.group(8) == 'hours': h += diff else: m += diff relative = True else: # Convert from "HH:MM pm" format t = time.group(2) h, m = int(t.split(':')[0]) % 12, int(t.split(':')[1]) try: if time.group(3) == 'pm': h += 12 except IndexError: pass if relative: return self.now + datetime.timedelta(hours=h, minutes=m) else: return datetime.datetime( self.now.year, self.now.month, self.now.day, h, m ) inp = self._preprocess(inp) return [handleMatch(time) for time in self._timeRegex.finditer(inp)]
828,522
Extract semantic date information from an input string. In effect, runs both parseDay and parseTime on the input string and merges the results to produce a comprehensive datetime object. Args: inp (str): Input string to be parsed. Returns: A list of datetime objects containing the extracted dates from the input snippet, or an empty list if not found.
def extractDates(self, inp): def merge(param): day, time = param if not (day or time): return None if not day: return time if not time: return day return datetime.datetime( day.year, day.month, day.day, time.hour, time.minute ) days = self.extractDays(inp) times = self.extractTimes(inp) return map(merge, zip_longest(days, times, fillvalue=None))
828,523
Convert a datetime object representing a time into a human-ready string that can be read, spoken aloud, etc. Args: time (datetime.date): A datetime object to be converted into text. Returns: A string representation of the input time, ignoring any day-related information.
def convertTime(self, time): # if ':00', ignore reporting minutes m_format = "" if time.minute: m_format = ":%M" timeString = time.strftime("%I" + m_format + " %p") # if '07:30', cast to '7:30' if not int(timeString[0]): timeString = timeString[1:] return timeString
828,526
Initializing and validating fields. Args: kwargs (dict): application command line options.
def __init__(self, **kwargs): try: arguments = Adapter(Schema(ApplicationOptions.SCHEMA).validate(kwargs)) self.definition = arguments.definition self.matrix_tags = [entry for entry in arguments.matrix_tags.split(',') if len(entry) > 0] self.tags = [entry for entry in arguments.tags.split(',') if len(entry) > 0] self.validate_only = arguments.validate_only self.dry_run = arguments.dry_run self.event_logging = arguments.event_logging self.logging_config = arguments.logging_config self.debug = arguments.debug self.strict = arguments.strict self.report = arguments.report self.temporary_scripts_path = arguments.temporary_scripts_path except SchemaError as exception: logging.getLogger(__name__).error(exception) raise RuntimeError(str(exception))
828,759
Find all row names and the maximum column widths. Args: columns (dict): the keys are the column name and the value the max length. Returns: dict: column names (key) and widths (value).
def calculate_columns(sequence): columns = {} for row in sequence: for key in row.keys(): if key not in columns: columns[key] = len(key) value_length = len(str(row[key])) if value_length > columns[key]: columns[key] = value_length return columns
828,883
Calculate row format. Args: columns (dict): the keys are the column name and the value the max length. keys (list): optional list of keys to order columns as well as to filter for them. Returns: str: format for table row
def calculate_row_format(columns, keys=None): row_format = '' if keys is None: keys = columns.keys() else: keys = [key for key in keys if key in columns] for key in keys: if len(row_format) > 0: row_format += "|" row_format += "%%(%s)-%ds" % (key, columns[key]) return '|' + row_format + '|'
828,884
Print sequence as ascii table to stdout. Args: sequence (list or tuple): a sequence with a dictionary each entry. keys (list): optional list of keys to order columns as well as to filter for them.
def pprint(sequence, keys=None): if len(sequence) > 0: columns = calculate_columns(sequence) row_format = calculate_row_format(columns, keys) header = row_format % dict([(key, key.title()) for key in columns]) separator = row_format % dict([(key, '-' * columns[key]) for key in columns]) print(separator) print(header) print(separator) for row in sequence: print(row_format % row) print(separator)
828,885
Run pipelines in parallel. Args: data(dict): parameters for the pipeline (model, options, ...). Returns: dict: with two fields: success True/False and captured output (list of str).
def matrix_worker(data): matrix = data['matrix'] Logger.get_logger(__name__ + '.worker').info( "Processing pipeline for matrix entry '%s'", matrix['name']) env = matrix['env'].copy() env.update({'PIPELINE_MATRIX': matrix['name']}) pipeline = Pipeline(model=data['model'], env=env, options=data['options']) pipeline.hooks = data['hooks'] return pipeline.process(data['pipeline'])
828,886
Check given matrix tags to be in the given list of matric tags. Args: entry (dict): matrix item (in yaml). matrix_tags (list): represents --matrix-tags defined by user in command line. Returns: bool: True when matrix entry can be processed.
def can_process_matrix(entry, matrix_tags): if len(matrix_tags) == 0: return True count = 0 if 'tags' in entry: for tag in matrix_tags: if tag in entry['tags']: count += 1 return count > 0
828,889
Initialize application with command line options. Args: options (ApplicationOptions): given command line options.
def __init__(self, **options): self.options = options self.logging_level = logging.DEBUG self.setup_logging() self.logger = Logger.get_logger(__name__) self.results = []
828,975
Verify whether to ignore a path. Args: path (str): path to check. Returns: bool: True when to ignore given path.
def ignore_path(path): ignore = False for name in ['.tox', 'dist', 'build', 'node_modules', 'htmlcov']: if path.find(name) >= 0: ignore = True break return ignore
828,977
Iterating files for given extensions. Args: supported_extensions (list): supported file extentsion for which to check loc and com. Returns: str: yield each full path and filename found.
def walk_files_for(paths, supported_extensions): for path in paths: for root, _, files in os.walk(path): if Application.ignore_path(root.replace(path, '')): continue for filename in files: extension = os.path.splitext(filename)[1] if extension in supported_extensions: yield path, os.path.join(root, filename), extension
828,978
Find out lines of code and lines of comments. Args: path_and_filename (str): path and filename to parse for loc and com. pattern (str): regex to search for line commens and block comments Returns: int, int: loc and com for given file.
def analyse(self, path_and_filename, pattern): with open(path_and_filename) as handle: content = handle.read() loc = content.count('\n') + 1 com = 0 for match in re.findall(pattern, content, re.DOTALL): com += match.count('\n') + 1 return max(0, loc - com), com
828,979
Get name and version of a tool defined by given command. Args: tool_name (str): name of the tool. tool_command (str): Bash one line command to get the version of the tool. Returns: dict: tool name and version or empty when no line has been found
def get_version(tool_name, tool_command): result = {} for line in Bash(ShellConfig(script=tool_command, internal=True)).process(): if line.find("command not found") >= 0: VersionsCheck.LOGGER.error("Required tool '%s' not found (stopping pipeline)!", tool_name) sys.exit(1) else: version = list(re.findall(r'(\d+(\.\d+)+)+', line))[0][0] result = {tool_name: Version(str(version))} break return result
829,157
Registers new events after instance creation Args: *names (str): Name or names of the events to register
def register_event(self, *names): for name in names: if name in self.__events: continue self.__events[name] = Event(name)
829,165
Dispatches an event to any subscribed listeners Note: If a listener returns :obj:`False`, the event will stop dispatching to other listeners. Any other return value is ignored. Args: name (str): The name of the :class:`Event` to dispatch *args (Optional): Positional arguments to be sent to listeners **kwargs (Optional): Keyword arguments to be sent to listeners
def emit(self, name, *args, **kwargs): e = self.__property_events.get(name) if e is None: e = self.__events[name] return e(*args, **kwargs)
829,168
Retrieves an Event object by name Args: name (str): The name of the :class:`Event` or :class:`~pydispatch.properties.Property` object to retrieve Returns: The :class:`Event` instance for the event or property definition .. versionadded:: 0.1.0
def get_dispatcher_event(self, name): e = self.__property_events.get(name) if e is None: e = self.__events[name] return e
829,169
Initialize application with command line options. Args: options (ApplicationOptions): given command line options.
def __init__(self, options): self.event = Event.create(__name__) self.options = options self.logging_level = logging.DEBUG self.setup_logging() self.logger = Logger.get_logger(__name__)
829,271
Running pipeline via a matrix. Args: matrix_definition (dict): one concrete matrix item. document (dict): spline document (complete) as loaded from yaml file.
def run_matrix(self, matrix_definition, document): matrix = Matrix(matrix_definition, 'matrix(parallel)' in document) process_data = MatrixProcessData() process_data.options = self.options process_data.pipeline = document['pipeline'] process_data.model = {} if 'model' not in document else document['model'] process_data.hooks = Hooks(document) return matrix.process(process_data)
829,274
Find **stages** in document. Args: document (dict): validated spline document loaded from a yaml file. Returns: list: stages as a part of the spline document or an empty list if not given. >>> find_stages({'pipeline': [{'stage(Prepare)':1}, {'stage(Build)':1}, {'stage(Deploy)':2}]}) ['Prepare', 'Build', 'Deploy']
def find_stages(document): names = [] if 'pipeline' in document: for entry in document['pipeline']: # each entry is dictionary with one key only key, _ = list(entry.items())[0] if key.startswith("stage("): names.append(key.replace('stage(', '').replace(')', '')) return names
829,283
Initializing and validating fields. Args: kwargs (dict): application command line options. Raises: RuntimeError: when validation of parameters has failed.
def __init__(self, **kwargs): try: arguments = Adapter(CollectorUpdate.schema_complete().validate(kwargs)) self.matrix = arguments.matrix self.stage = arguments.stage self.timestamp = arguments.timestamp self.status = arguments.status self.information = arguments.information.data except SchemaError as exception: Logger.get_logger(__name__).error(exception) raise RuntimeError(str(exception))
829,527
Initializing and validating fields. Args: kwargs (dict): application command line options. Raises: RuntimeError: when validation of parameters has failed.
def __init__(self, **kwargs): try: arguments = Adapter(CollectorStage.schema_complete().validate(kwargs)) self.stage = arguments.stage self.status = arguments.status self.events = arguments.events except SchemaError as exception: Logger.get_logger(__name__).error(exception) raise RuntimeError(str(exception))
829,530
Add event information. Args: timestamp (int): event timestamp. information (dict): event information. Raises: RuntimeError: when validation of parameters has failed.
def add(self, timestamp, information): try: item = Schema(CollectorStage.schema_event_items()).validate({ 'timestamp': timestamp, 'information': information }) self.events.append(item) except SchemaError as exception: Logger.get_logger(__name__).error(exception) raise RuntimeError(str(exception))
829,531
Number of registered stages for given matrix name. Parameters: matrix_name (str): name of the matrix Returns: int: number of reported stages for given matrix name.
def count_stages(self, matrix_name): return len(self.data[matrix_name]) if matrix_name in self.data else 0
829,533
Get duration for a concrete matrix. Args: matrix_name (str): name of the Matrix. Returns: float: duration of concrete matrix in seconds.
def get_duration(self, matrix_name): duration = 0.0 if matrix_name in self.data: duration = sum([stage.duration() for stage in self.data[matrix_name]]) return duration
829,535
Add a collector item. Args: item (CollectorUpdate): event data like stage, timestampe and status.
def update(self, item): if item.matrix not in self.data: self.data[item.matrix] = [] result = Select(self.data[item.matrix]).where( lambda entry: entry.stage == item.stage).build() if len(result) > 0: stage = result[0] stage.status = item.status stage.add(item.timestamp, item.information) else: stage = CollectorStage(stage=item.stage, status=item.status) stage.add(item.timestamp, item.information) self.data[item.matrix].append(stage)
829,536
Initializing pipeline with definition (loaded from a yaml file). Args: model (dict): if you have a model defined in your pipeline definition (yaml) env (dict): the env as defined (if) per matrix options (dict): command line options for spline
def __init__(self, model=None, env=None, options=None): self.event = Event.create(__name__) self.options = options self.model = {} if not isinstance(model, dict) else model self.data = PipelineData() self.data.env_list[0].update([] if env is None else env) self.logger = Logger.get_logger(__name__) self.variables = {}
829,581
Validate data against the schema. Args: data(dict): data structure to validate. Returns: dict: data as provided and defaults where defined in schema.
def validate(data): try: return Schema(Validator.SCHEMA).validate(data) except SchemaError as exception: logging.getLogger(__name__).error(exception) return None
829,624
Generating a temporary file with content. Args: content (str): file content (usually a script, Dockerfile, playbook or config file) prefix (str): the filename starts with this prefix (default: no prefix) suffix (str): the filename ends with this suffix (default: no suffix) Returns: str: name of the temporary file Note: You are responsible for the deletion of the file.
def write_temporary_file(content, prefix='', suffix=''): temp = tempfile.NamedTemporaryFile(prefix=prefix, suffix=suffix, mode='w+t', delete=False) temp.writelines(content) temp.close() return temp.name
829,666
Add an instance method or function Args: m: The instance method or function to store
def add_method(self, m, **kwargs): if isinstance(m, types.FunctionType): self['function', id(m)] = m else: f, obj = get_method_vars(m) wrkey = (f, id(obj)) self[wrkey] = obj
829,778
Remove an instance method or function if it exists Args: m: The instance method or function to remove
def del_method(self, m): if isinstance(m, types.FunctionType) and not iscoroutinefunction(m): wrkey = ('function', id(m)) else: f, obj = get_method_vars(m) wrkey = (f, id(obj)) if wrkey in self: del self[wrkey]
829,779
Remove any stored instance methods that belong to an object Args: obj: The instance object to remove
def del_instance(self, obj): to_remove = set() for wrkey, _obj in self.iter_instances(): if obj is _obj: to_remove.add(wrkey) for wrkey in to_remove: del self[wrkey]
829,780
Initialize with Bash code and optional environment variables. Args: config(ShellConfig): options for configuring Bash environment and behavior
def __init__(self, config): self.event = Event.create(__name__) self.logger = Logger.get_logger(__name__) self.config = config self.success = True self.env = {} self.env.update(config.env) self.stdout = subprocess.PIPE self.stderr = subprocess.STDOUT self.shell = False self.exit_code = 0
829,852
Create a temporary, executable bash file. It also does render given script (string) with the model and the provided environment variables and optional also an item when using the B{with} field. Args: script (str): either pather and filename or Bash code. Returns: str: path and filename of a temporary file.
def create_file_for(self, script): temp = tempfile.NamedTemporaryFile( prefix="pipeline-script-", mode='w+t', suffix=".sh", delete=False, dir=self.get_temporary_scripts_path()) self.update_environment_variables(temp.name) rendered_script = render(script, model=self.config.model, env=self.env, item=self.config.item, variables=self.config.variables) if rendered_script is None: self.success = False temp.close() os.remove(temp.name) return None to_file_map = {2: lambda s: s.encode('utf-8'), 3: lambda s: s} if all(ord(ch) < 128 for ch in rendered_script) and os.path.isfile(rendered_script): with open(rendered_script) as handle: content = str(handle.read()) temp.writelines(content) else: temp.write(u"#!/bin/bash\n%s" % self.render_bash_options()) temp.write(to_file_map[sys.version_info.major](rendered_script)) temp.close() # make Bash script executable os.chmod(temp.name, 0o700) return temp.name
829,855
Generating HTML report. Args: store (Store): report data. Returns: str: rendered HTML template.
def generate_html(store): spline = { 'version': VERSION, 'url': 'https://github.com/Nachtfeuer/pipeline', 'generated': datetime.now().strftime("%A, %d. %B %Y - %I:%M:%S %p") } html_template_file = os.path.join(os.path.dirname(__file__), 'templates/report.html.j2') with open(html_template_file) as handle: html_template = handle.read() return render(html_template, spline=spline, store=store)
829,937