text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Give the currently developed hotfix a new name. <END_TASK> <USER_TASK:> Description: def rename(name): # type: (str) -> None """ Give the currently developed hotfix a new name. """
from peltak.extra.gitflow import logic if name is None: name = click.prompt('Hotfix name') logic.hotfix.rename(name)
<SYSTEM_TASK:> Get context value with the given name and optional default. <END_TASK> <USER_TASK:> Description: def get(self, name, *default): # type: (str, Any) -> Any """ Get context value with the given name and optional default. Args: name (str): The name of the context value. *default (Any): If given and the key doesn't not exist, this will be returned instead. If it's not given and the context value does not exist, `AttributeError` will be raised Returns: The requested context value. If the value does not exist it will return `default` if give or raise `AttributeError`. Raises: AttributeError: If the value does not exist and `default` was not given. """
curr = self.values for part in name.split('.'): if part in curr: curr = curr[part] elif default: return default[0] else: fmt = "Context value '{}' does not exist:\n{}" raise AttributeError(fmt.format( name, util.yaml_dump(self.values) )) return curr
<SYSTEM_TASK:> Set context value. <END_TASK> <USER_TASK:> Description: def set(self, name, value): """ Set context value. Args: name (str): The name of the context value to change. value (Any): The new value for the selected context value """
curr = self.values parts = name.split('.') for i, part in enumerate(parts[:-1]): try: curr = curr.setdefault(part, {}) except AttributeError: raise InvalidPath('.'.join(parts[:i + 1])) try: curr[parts[-1]] = value except TypeError: raise InvalidPath('.'.join(parts[:-1]))
<SYSTEM_TASK:> Checks if an alias already exists in a keystore <END_TASK> <USER_TASK:> Description: def alias_exists(alias, keystore_path=None, keystore_password='changeit'): """Checks if an alias already exists in a keystore :param alias: :param keystore_path: :param keystore_password: :return: (bool) True when the alias already exists in the keystore :raises: OSError """
log = logging.getLogger(mod_logger + '.alias_exists') if not isinstance(alias, basestring): msg = 'alias arg must be a string' log.error(msg) raise OSError(msg) # Ensure JAVA_HOME is set log.debug('Determining JAVA_HOME...') try: java_home = os.environ['JAVA_HOME'] except KeyError: msg = 'JAVA_HOME is required but not set' log.error(msg) raise OSError(msg) # Ensure keytool can be found keytool = os.path.join(java_home, 'bin', 'keytool') if not os.path.isfile(keytool): msg = 'keytool file not found: {f}'.format(f=keytool) log.error(msg) raise OSError(msg) # Find the cacerts file if keystore_path is None: keystore_path = os.path.join(java_home, 'lib', 'security', 'cacerts') # If the JRE cacerts location is not found, look for the JDK cacerts if not os.path .isfile(keystore_path): keystore_path = os.path.join(java_home, 'jre', 'lib', 'security', 'cacerts') if not os.path.isfile(keystore_path): msg = 'Unable to file cacerts file' log.error(msg) raise OSError(msg) log.info('Checking keystore {k} for alias: {a}...'.format(k=keystore_path, a=alias)) # Build the keytool command command = [keytool, '-keystore', keystore_path, '-storepass', keystore_password, '-list'] # Running the keytool list command log.debug('Running the keytool list command...') try: result = run_command(command) except CommandError: _, ex, trace = sys.exc_info() msg = 'There was a problem running keytool on keystore: {k}\n{e}'.format(k=keystore_path, e=str(ex)) log.error(msg) raise OSError, msg, trace if result['code'] != 0: msg = 'keytool command exited with a non-zero code: {c}, and produced output: {o}'.format( c=result['code'], o=result['output']) log.error(msg) raise OSError(msg) # Check for the alias in the output if alias in result['output']: log.info('Found alias {a} in keystore: {k}'.format(a=alias, k=keystore_path)) return True else: log.info('Alias {a} was not found in keystore: {k}'.format(a=alias, k=keystore_path)) return False
<SYSTEM_TASK:> Define the filter function that every node must to implement. <END_TASK> <USER_TASK:> Description: def filter(self, query: Query, entity: type) -> Tuple[Query, Any]: """Define the filter function that every node must to implement. :param query: The sqlalchemy query. :type query: Query :param entity: The entity model. :type entity: type :return: The filtered query. :rtype: Tuple[Query, Any] """
raise NotImplementedError('You must implement this.')
<SYSTEM_TASK:> Transform the list of relation to list of class. <END_TASK> <USER_TASK:> Description: def _get_relation(self, related_model: type, relations: List[str]) -> Tuple[Optional[List[type]], Optional[type]]: """Transform the list of relation to list of class. :param related_mode: The model of the query. :type related_mode: type :param relations: The relation list get from the `_extract_relations`. :type relations: List[str] :return: Tuple with the list of relations (class) and the second element is the last relation class. :rtype: Tuple[Optional[List[type]], Optional[type]] """
relations_list, last_relation = [], related_model for relation in relations: relationship = getattr(last_relation, relation, None) if relationship is None: return (None, None) last_relation = relationship.mapper.class_ relations_list.append(last_relation) return (relations_list, last_relation)
<SYSTEM_TASK:> Method to make the join when relation is found. <END_TASK> <USER_TASK:> Description: def _join_tables(self, query: Query, join_models: Optional[List[type]]) -> Query: """Method to make the join when relation is found. :param query: The sqlalchemy query. :type query: Query :param join_models: The list of joined models get from the method `_get_relation`. :type join_models: Optional[List[type]] :return: The new Query with the joined tables. :rtype: Query """
joined_query = query # Create the list of already joined entities joined_tables = [mapper.class_ for mapper in query._join_entities] if join_models: for j_model in join_models: if not j_model in joined_tables: # /!\ join return a new query /!\ joined_query = joined_query.join(j_model) return joined_query
<SYSTEM_TASK:> Get the first toplevel widget in a Gtk.Builder hierarchy. <END_TASK> <USER_TASK:> Description: def get_first_builder_window(builder): """Get the first toplevel widget in a Gtk.Builder hierarchy. This is mostly used for guessing purposes, and an explicit naming is always going to be a better situation. """
for obj in builder.get_objects(): if isinstance(obj, Gtk.Window): # first window return obj
<SYSTEM_TASK:> Round the number to the specified number of significant figures <END_TASK> <USER_TASK:> Description: def round_sig(x, sig): """Round the number to the specified number of significant figures"""
return round(x, sig - int(floor(log10(abs(x)))) - 1)
<SYSTEM_TASK:> Open the file gunzipping it if it ends with .gz. <END_TASK> <USER_TASK:> Description: def open_file(filename, as_text=False): """Open the file gunzipping it if it ends with .gz. If as_text the file is opened in text mode, otherwise the file's opened in binary mode."""
if filename.lower().endswith('.gz'): if as_text: return gzip.open(filename, 'rt') else: return gzip.open(filename, 'rb') else: if as_text: return open(filename, 'rt') else: return open(filename, 'rb')
<SYSTEM_TASK:> This is a temp hack to write the minimal metadata that Squonk needs. <END_TASK> <USER_TASK:> Description: def write_squonk_datasetmetadata(outputBase, thinOutput, valueClassMappings, datasetMetaProps, fieldMetaProps): """This is a temp hack to write the minimal metadata that Squonk needs. Will needs to be replaced with something that allows something more complete to be written. :param outputBase: Base name for the file to write to :param thinOutput: Write only new data, not structures. Result type will be BasicObject :param valueClasses: A dict that describes the Java class of the value properties (used by Squonk) :param datasetMetaProps: A dict with metadata properties that describe the datset as a whole. The keys used for these metadata are up to the user, but common ones include source, description, created, history. :param fieldMetaProps: A list of dicts with the additional field metadata. Each dict has a key named fieldName whose value is the name of the field being described, and a key name values wholes values is a map of metadata properties. The keys used for these metadata are up to the user, but common ones include source, description, created, history. """
meta = {} props = {} # TODO add created property - how to handle date formats? if datasetMetaProps: props.update(datasetMetaProps) if fieldMetaProps: meta["fieldMetaProps"] = fieldMetaProps if len(props) > 0: meta["properties"] = props if valueClassMappings: meta["valueClassMappings"] = valueClassMappings if thinOutput: meta['type'] = 'org.squonk.types.BasicObject' else: meta['type'] = 'org.squonk.types.MoleculeObject' s = json.dumps(meta) meta = open(outputBase + '.metadata', 'w') meta.write(s) meta.close()
<SYSTEM_TASK:> Write the metrics data <END_TASK> <USER_TASK:> Description: def write_metrics(baseName, values): """Write the metrics data :param baseName: The base name of the output files. e.g. extensions will be appended to this base name :param values dictionary of values to write """
m = open(baseName + '_metrics.txt', 'w') for key in values: m.write(key + '=' + str(values[key]) + "\n") m.flush() m.close()
<SYSTEM_TASK:> Generate a dictionary that represents a Squonk MoleculeObject when <END_TASK> <USER_TASK:> Description: def generate_molecule_object_dict(source, format, values): """Generate a dictionary that represents a Squonk MoleculeObject when written as JSON :param source: Molecules in molfile or smiles format :param format: The format of the molecule. Either 'mol' or 'smiles' :param values: Optional dict of values (properties) for the MoleculeObject """
m = {"uuid": str(uuid.uuid4()), "source": source, "format": format} if values: m["values"] = values return m
<SYSTEM_TASK:> Queries Nexus for an artifact <END_TASK> <USER_TASK:> Description: def query_nexus(query_url, timeout_sec, basic_auth=None): """Queries Nexus for an artifact :param query_url: (str) Query URL :param timeout_sec: (int) query timeout :param basic_auth (HTTPBasicAuth) object or none :return: requests.Response object :raises: RuntimeError """
log = logging.getLogger(mod_logger + '.query_nexus') # Attempt to query Nexus retry_sec = 5 max_retries = 6 try_num = 1 query_success = False nexus_response = None while try_num <= max_retries: if query_success: break log.debug('Attempt # {n} of {m} to query the Nexus URL: {u}'.format(n=try_num, u=query_url, m=max_retries)) try: nexus_response = requests.get(query_url, auth=basic_auth, stream=True, timeout=timeout_sec) except requests.exceptions.Timeout: _, ex, trace = sys.exc_info() msg = '{n}: Nexus initial query timed out after {t} seconds:\n{e}'.format( n=ex.__class__.__name__, t=timeout_sec, r=retry_sec, e=str(ex)) log.warn(msg) if try_num < max_retries: log.info('Retrying query in {t} sec...'.format(t=retry_sec)) time.sleep(retry_sec) except (requests.exceptions.RequestException, requests.exceptions.ConnectionError): _, ex, trace = sys.exc_info() msg = '{n}: Nexus initial query failed with the following exception:\n{e}'.format( n=ex.__class__.__name__, r=retry_sec, e=str(ex)) log.warn(msg) if try_num < max_retries: log.info('Retrying query in {t} sec...'.format(t=retry_sec)) time.sleep(retry_sec) else: query_success = True try_num += 1 if not query_success: msg = 'Unable to query Nexus after {m} attempts using URL: {u}'.format( u=query_url, m=max_retries) log.error(msg) raise RuntimeError(msg) if nexus_response.status_code != 200: msg = 'Nexus request returned code {c}, unable to query Nexus using URL: {u}'.format( u=query_url, c=nexus_response.status_code) log.error(msg) raise RuntimeError(msg) return nexus_response
<SYSTEM_TASK:> Return a formated string containing documentation about the audio <END_TASK> <USER_TASK:> Description: def get_doc(additional_doc=False, field_prefix='$', field_suffix=':', indent=4): """Return a formated string containing documentation about the audio fields. """
if additional_doc: f = fields.copy() f.update(additional_doc) else: f = fields field_length = get_max_field_length(f) field_length = field_length + len(field_prefix) + len(field_suffix) + 4 description_indent = ' ' * (indent + field_length) output = '' for field, description in sorted(f.items()): description = description['description'] field = ' ' * indent + field_prefix + field + ':' output += field.ljust(field_length) + \ textwrap.fill( description, width=78, initial_indent=description_indent, subsequent_indent=description_indent )[field_length:] + '\n\n\n' return output
<SYSTEM_TASK:> List images stored in the registry. <END_TASK> <USER_TASK:> Description: def list_images(self): # type: () -> List[str] """ List images stored in the registry. Returns: list[str]: List of image names. """
r = self.get(self.registry_url + '/v2/_catalog', auth=self.auth) return r.json()['repositories']
<SYSTEM_TASK:> List all tags for the given image stored in the registry. <END_TASK> <USER_TASK:> Description: def list_tags(self, image_name): # type: (str) -> Iterator[str] """ List all tags for the given image stored in the registry. Args: image_name (str): The name of the image to query. The image must be present on the registry for this call to return any values. Returns: list[str]: List of tags for that image. """
tags_url = self.registry_url + '/v2/{}/tags/list' r = self.get(tags_url.format(image_name), auth=self.auth) data = r.json() if 'tags' in data: return reversed(sorted(data['tags'])) return []
<SYSTEM_TASK:> Command line call to validate an asset structure <END_TASK> <USER_TASK:> Description: def validate(asset_dir): """Command line call to validate an asset structure :param asset_dir: (full path to the asset dir) :return: (int) """
try: asset_name = validate_asset_structure(asset_dir_path=asset_dir) except Cons3rtAssetStructureError: _, ex, trace = sys.exc_info() msg = 'Cons3rtAssetStructureError: Problem with asset validation\n{e}'.format(e=str(ex)) print('ERROR: {m}'.format(m=msg)) return 1 print('Validated asset with name: {n}'.format(n=asset_name)) return 0
<SYSTEM_TASK:> Command line call to create an asset zip <END_TASK> <USER_TASK:> Description: def create(asset_dir, dest_dir): """Command line call to create an asset zip :param asset_dir: (full path to the asset dir) :param dest_dir: (full path to the destination directory) :return: (int) """
val = validate(asset_dir=asset_dir) if val != 0: return 1 try: asset_zip = make_asset_zip(asset_dir_path=asset_dir, destination_directory=dest_dir) except AssetZipCreationError: _, ex, trace = sys.exc_info() msg = 'AssetZipCreationError: Problem with asset zip creation\n{e}'.format(e=str(ex)) print('ERROR: {m}'.format(m=msg)) return 1 print('Created asset zip file: {z}'.format(z=asset_zip)) return 0
<SYSTEM_TASK:> This view should return a list of all the addresses the identity has <END_TASK> <USER_TASK:> Description: def get_queryset(self): """ This view should return a list of all the addresses the identity has for the supplied query parameters. Currently only supports address_type and default params Always excludes addresses with optedout = True """
identity_id = self.kwargs["identity_id"] address_type = self.kwargs["address_type"] use_ct = "use_communicate_through" in self.request.query_params default_only = "default" in self.request.query_params if use_ct: identity = Identity.objects.select_related("communicate_through").get( id=identity_id ) if identity.communicate_through is not None: identity = identity.communicate_through else: identity = Identity.objects.get(id=identity_id) addresses = identity.get_addresses_list(address_type, default_only) return [Address(addr) for addr in addresses]
<SYSTEM_TASK:> Check API response and raise exceptions if needed. <END_TASK> <USER_TASK:> Description: def check_api_response(self, response): """Check API response and raise exceptions if needed. :param requests.models.Response response: request response to check """
# check response if response.status_code == 200: return True elif response.status_code >= 400: logging.error( "{}: {} - {} - URL: {}".format( response.status_code, response.reason, response.json().get("error"), response.request.url, ) ) return False, response.status_code
<SYSTEM_TASK:> Check if asked tab is part of Isogeo web form and reliable <END_TASK> <USER_TASK:> Description: def check_edit_tab(self, tab: str, md_type: str): """Check if asked tab is part of Isogeo web form and reliable with metadata type. :param str tab: tab to check. Must be one one of EDIT_TABS attribute :param str md_type: metadata type. Must be one one of FILTER_TYPES """
# check parameters types if not isinstance(tab, str): raise TypeError("'tab' expected a str value.") else: pass if not isinstance(md_type, str): raise TypeError("'md_type' expected a str value.") else: pass # check parameters values if tab not in EDIT_TABS: raise ValueError( "'{}' isn't a valid edition tab. " "Available values: {}".format(tab, " | ".join(EDIT_TABS)) ) else: pass if md_type not in FILTER_TYPES: if md_type in FILTER_TYPES.values(): md_type = self._convert_md_type(md_type) else: raise ValueError( "'{}' isn't a valid metadata type. " "Available values: {}".format(md_type, " | ".join(FILTER_TYPES)) ) else: pass # check adequation tab/md_type if md_type not in EDIT_TABS.get(tab): raise ValueError( "'{}' isn't a valid tab for a '{}'' metadata." " Only for these types: {}.".format(tab, md_type, EDIT_TABS.get(tab)) ) else: return True
<SYSTEM_TASK:> Check if specific_md parameter is valid. <END_TASK> <USER_TASK:> Description: def _check_filter_specific_md(self, specific_md: list): """Check if specific_md parameter is valid. :param list specific_md: list of specific metadata UUID to check """
if isinstance(specific_md, list): if len(specific_md) > 0: # checking UUIDs and poping bad ones for md in specific_md: if not self.check_is_uuid(md): specific_md.remove(md) logging.error("Metadata UUID is not correct: {}".format(md)) # joining survivors specific_md = ",".join(specific_md) else: specific_md = "" else: raise TypeError("'specific_md' expects a list") return specific_md
<SYSTEM_TASK:> Check if specific_tag parameter is valid. <END_TASK> <USER_TASK:> Description: def _check_filter_specific_tag(self, specific_tag: list): """Check if specific_tag parameter is valid. :param list specific_tag: list of specific tag to check """
if isinstance(specific_tag, list): if len(specific_tag) > 0: specific_tag = ",".join(specific_tag) else: specific_tag = "" else: raise TypeError("'specific_tag' expects a list") return specific_tag
<SYSTEM_TASK:> Show project version. Has sub commands. <END_TASK> <USER_TASK:> Description: def version_cli(ctx, porcelain): # type: (click.Context, bool) -> None """ Show project version. Has sub commands. For this command to work you must specify where the project version is stored. You can do that with version_file conf variable. peltak supports multiple ways to store the project version. Right now you can store it in a python file using built-in __version__ variable. You can use node.js package.json and keep the version there or you can just use a plain text file that just holds the raw project version. The appropriate storage is guessed based on the file type and name. Example Configuration:: version_file: 'src/mypackage/__init__.py' Examples: \b $ peltak version # Pretty print current version $ peltak version --porcelain # Print version as raw string $ peltak version bump patch # Bump patch version component $ peltak version bump minor # Bump minor version component $ peltak version bump major # Bump major version component $ peltak version bump release # same as version bump patch $ peltak version bump --exact=1.2.1 # Set project version to 1.2.1 """
if ctx.invoked_subcommand: return from peltak.core import log from peltak.core import versioning current = versioning.current() if porcelain: print(current) else: log.info("Version: <35>{}".format(current))
<SYSTEM_TASK:> Bump current project version without committing anything. <END_TASK> <USER_TASK:> Description: def bump_version(component='patch', exact=None): # type: (str, str) -> None """ Bump current project version without committing anything. No tags are created either. Examples: \b $ peltak version bump patch # Bump patch version component $ peltak version bump minor # Bump minor version component $ peltak version bump major # Bump major version component $ peltak version bump release # same as version bump patch $ peltak version bump --exact=1.2.1 # Set project version to 1.2.1 """
from peltak.core import log from peltak.core import versioning old_ver, new_ver = versioning.bump(component, exact) log.info("Project version bumped") log.info(" old version: <35>{}".format(old_ver)) log.info(" new version: <35>{}".format(new_ver))
<SYSTEM_TASK:> Fetches a record by the record's ID or upstream_identifier. <END_TASK> <USER_TASK:> Description: def _get(self, rec_id=None, upstream=None): """ Fetches a record by the record's ID or upstream_identifier. Raises: `pulsarpy.models.RecordNotFound`: A record could not be found. """
if rec_id: self.record_url = self.__class__.get_record_url(rec_id) self.debug_logger.debug("GET {} record with ID {}: {}".format(self.__class__.__name__, rec_id, self.record_url)) response = requests.get(url=self.record_url, headers=HEADERS, verify=False) if not response.ok and response.status_code == requests.codes.NOT_FOUND: raise RecordNotFound("Search for {} record with ID '{}' returned no results.".format(self.__class__.__name__, rec_id)) self.write_response_html_to_file(response,"get_bob.html") response.raise_for_status() return response.json() elif upstream: rec_json = self.__class__.find_by({"upstream_identifier": upstream}, require=True) self.record_url = self.__class__.get_record_url(rec_json["id"]) return rec_json
<SYSTEM_TASK:> Used to replace a foreign key reference using a name with an ID. Works by searching the <END_TASK> <USER_TASK:> Description: def replace_name_with_id(cls, name): """ Used to replace a foreign key reference using a name with an ID. Works by searching the record in Pulsar and expects to find exactly one hit. First, will check if the foreign key reference is an integer value and if so, returns that as it is presumed to be the foreign key. Raises: `pulsarpy.elasticsearch_utils.MultipleHitsException`: Multiple hits were returned from the name search. `pulsarpy.models.RecordNotFound`: No results were produced from the name search. """
try: int(name) return name #Already a presumed ID. except ValueError: pass #Not an int, so maybe a combination of MODEL_ABBR and Primary Key, i.e. B-8. if name.split("-")[0] in Meta._MODEL_ABBREVS: return int(name.split("-", 1)[1]) try: result = cls.ES.get_record_by_name(cls.ES_INDEX_NAME, name) if result: return result["id"] except pulsarpy.elasticsearch_utils.MultipleHitsException as e: raise raise RecordNotFound("Name '{}' for model '{}' not found.".format(name, cls.__name__))
<SYSTEM_TASK:> Deletes the record. <END_TASK> <USER_TASK:> Description: def delete(self): """Deletes the record. """
res = requests.delete(url=self.record_url, headers=HEADERS, verify=False) #self.write_response_html_to_file(res,"bob_delete.html") if res.status_code == 204: #No content. Can't render json: return {} return res.json()
<SYSTEM_TASK:> Searches the model in question by AND joining the query parameters. <END_TASK> <USER_TASK:> Description: def find_by(cls, payload, require=False): """ Searches the model in question by AND joining the query parameters. Implements a Railsy way of looking for a record using a method by the same name and passing in the query as a dict. as well. Only the first hit is returned, and there is no particular ordering specified in the server-side API method. Args: payload: `dict`. The attributes of a record to restrict the search to. require: `bool`. True means to raise a `pulsarpy.models.RecordNotFound` exception if no record is found. Returns: `dict`: The JSON serialization of the record, if any, found by the API call. `None`: If the API call didnt' return any results. Raises: `pulsarpy.models.RecordNotFound`: No records were found, and the `require` parameter is True. """
if not isinstance(payload, dict): raise ValueError("The 'payload' parameter must be provided a dictionary object.") url = os.path.join(cls.URL, "find_by") payload = {"find_by": payload} cls.debug_logger.debug("Searching Pulsar {} for {}".format(cls.__name__, json.dumps(payload, indent=4))) res = requests.post(url=url, json=payload, headers=HEADERS, verify=False) #cls.write_response_html_to_file(res,"bob.html") res.raise_for_status() res_json = res.json() if res_json: try: res_json = res_json[cls.MODEL_NAME] except KeyError: # Key won't be present if there isn't a serializer for it on the server. pass else: if require: raise RecordNotFound("Can't find any {} records with search criteria: '{}'.".format(cls.__name__, payload)) return res_json
<SYSTEM_TASK:> Searches the model in question by OR joining the query parameters. <END_TASK> <USER_TASK:> Description: def find_by_or(cls, payload): """ Searches the model in question by OR joining the query parameters. Implements a Railsy way of looking for a record using a method by the same name and passing in the query as a string (for the OR operator joining to be specified). Only the first hit is returned, and there is not particular ordering specified in the server-side API method. Args: payload: `dict`. The attributes of a record to search for by using OR operator joining for each query parameter. Returns: `dict`: The JSON serialization of the record, if any, found by the API call. `None`: If the API call didnt' return any results. """
if not isinstance(payload, dict): raise ValueError("The 'payload' parameter must be provided a dictionary object.") url = os.path.join(cls.URL, "find_by_or") payload = {"find_by_or": payload} cls.debug_logger.debug("Searching Pulsar {} for {}".format(cls.__name__, json.dumps(payload, indent=4))) res = requests.post(url=url, json=payload, headers=HEADERS, verify=False) cls.write_response_html_to_file(res,"bob.html") if res: try: res = res[cls.MODEL_NAME] except KeyError: # Key won't be present if there isn't a serializer for it on the server. pass return res
<SYSTEM_TASK:> Fetches all records. <END_TASK> <USER_TASK:> Description: def index(cls): """Fetches all records. Returns: `dict`. The JSON formatted response. Raises: `requests.exceptions.HTTPError`: The status code is not ok. """
res = requests.get(cls.URL, headers=HEADERS, verify=False) res.raise_for_status() return res.json()
<SYSTEM_TASK:> Patches current record and udpates the current instance's 'attrs' <END_TASK> <USER_TASK:> Description: def patch(self, payload, append_to_arrays=True): """ Patches current record and udpates the current instance's 'attrs' attribute to reflect the new changes. Args: payload - hash. This will be JSON-formatted prior to sending the request. Returns: `dict`. The JSON formatted response. Raises: `requests.exceptions.HTTPError`: The status code is not ok. """
if not isinstance(payload, dict): raise ValueError("The 'payload' parameter must be provided a dictionary object.") payload = self.__class__.set_id_in_fkeys(payload) if append_to_arrays: for key in payload: val = payload[key] if type(val) == list: val.extend(getattr(self, key)) payload[key] = list(set(val)) payload = self.check_boolean_fields(payload) payload = self.__class__.add_model_name_to_payload(payload) self.debug_logger.debug("PATCHING payload {}".format(json.dumps(payload, indent=4))) res = requests.patch(url=self.record_url, json=payload, headers=HEADERS, verify=False) self.write_response_html_to_file(res,"bob.html") res.raise_for_status() json_res = res.json() self.debug_logger.debug("Success") self.attrs = json_res return json_res
<SYSTEM_TASK:> Posts the data to the specified record. <END_TASK> <USER_TASK:> Description: def post(cls, payload): """Posts the data to the specified record. Args: payload: `dict`. This will be JSON-formatted prior to sending the request. Returns: `dict`. The JSON formatted response. Raises: `Requests.exceptions.HTTPError`: The status code is not ok. `RecordNotUnique`: The Rails server returned the exception ActiveRecord::RecordNotUnique. """
if not isinstance(payload, dict): raise ValueError("The 'payload' parameter must be provided a dictionary object.") payload = cls.set_id_in_fkeys(payload) payload = cls.check_boolean_fields(payload) payload = cls.add_model_name_to_payload(payload) # Run any pre-post hooks: payload = cls.prepost_hooks(payload) cls.debug_logger.debug("POSTING payload {}".format(json.dumps(payload, indent=4))) res = requests.post(url=cls.URL, json=(payload), headers=HEADERS, verify=False) cls.write_response_html_to_file(res,"bob.html") if not res.ok: cls.log_error(res.text) res_json = res.json() if "exception" in res_json: exc_type = res_json["exception"] if exc_type == "ActiveRecord::RecordNotUnique": raise RecordNotUnique() res.raise_for_status() res = res.json() cls.log_post(res) cls.debug_logger.debug("Success") return res
<SYSTEM_TASK:> Logs the provided error message to both the error logger and the debug logger logging <END_TASK> <USER_TASK:> Description: def log_error(cls, msg): """ Logs the provided error message to both the error logger and the debug logger logging instances. Args: msg: `str`. The error message to log. """
cls.error_logger.error(msg) cls.debug_logger.debug(msg)
<SYSTEM_TASK:> Generates a dict. where each key is a Library ID on the SequencingRequest and each value <END_TASK> <USER_TASK:> Description: def library_sequencing_results(self): """ Generates a dict. where each key is a Library ID on the SequencingRequest and each value is the associated SequencingResult. Libraries that aren't yet with a SequencingResult are not inlcuded in the dict. """
sres_ids = self.sequencing_result_ids res = {} for i in sres_ids: sres = SequencingResult(i) res[sres.library_id] = sres return res
<SYSTEM_TASK:> Removes the user's existing API key, if present, and sets the current instance's 'api_key' <END_TASK> <USER_TASK:> Description: def remove_api_key(self): """ Removes the user's existing API key, if present, and sets the current instance's 'api_key' attribute to the empty string. Returns: `NoneType`: None. """
url = self.record_url + "/remove_api_key" res = requests.patch(url=url, headers=HEADERS, verify=False) res.raise_for_status() self.api_key = ""
<SYSTEM_TASK:> Add a custom HTTP header to the client's request headers <END_TASK> <USER_TASK:> Description: def add_header(self, header): """Add a custom HTTP header to the client's request headers"""
if type(header) is dict: self._headers.update(header) else: raise ValueError( "Dictionary expected, got '%s' instead" % type(header) )
<SYSTEM_TASK:> Generate changelog from commit messages. <END_TASK> <USER_TASK:> Description: def changelog_cli(ctx): # type: () -> None """ Generate changelog from commit messages. """
if ctx.invoked_subcommand: return from peltak.core import shell from . import logic shell.cprint(logic.changelog())
<SYSTEM_TASK:> Construct filename based on the attributes. <END_TASK> <USER_TASK:> Description: def get_filename(self, base_dir=None, modality=None): """Construct filename based on the attributes. Parameters ---------- base_dir : Path path of the root directory. If specified, the return value is a Path, with base_dir / sub-XXX / (ses-XXX /) modality / filename otherwise the return value is a string. modality : str overwrite value for modality (i.e. the directory inside subject/session). This is necessary because sometimes the modality attribute is ambiguous. Returns ------- str or Path str of the filename if base_dir is not specified, otherwise the full Path """
filename = 'sub-' + self.subject if self.session is not None: filename += '_ses-' + self.session if self.task is not None: filename += '_task-' + self.task if self.run is not None and self.direction is None: filename += '_run-' + self.run if self.acquisition is not None: filename += '_acq-' + self.acquisition if self.direction is not None: filename += '_dir-' + self.direction if self.run is not None and self.direction is not None: filename += '_run-' + self.run if self.modality is not None: filename += '_' + self.modality if self.extension is not None: filename += self.extension if base_dir is None: return filename else: dir_name = base_dir / ('sub-' + self.subject) if self.session is not None: dir_name /= 'ses-' + self.session if modality is not None: dir_name /= modality else: dir_name = add_modality(dir_name, self.modality) return dir_name / filename
<SYSTEM_TASK:> Select elements of the TSV, using python filter and map. <END_TASK> <USER_TASK:> Description: def get(self, filter_lambda=None, map_lambda=None): """Select elements of the TSV, using python filter and map. Parameters ---------- filter_lambda : function function to filter the tsv rows (the function needs to return True/False) map_lambda : function function to select the tsv columns Returns ------- list list (not a generator, because that's the most common case) Examples -------- To select all the channels in one list, called "good_labels":: >>> file_Tsv.get(lambda x: x['name'] in good_labels) To select all the names of the channels: >>> file_Tsv.get(map_filter=lambda x: x['name']) """
if filter_lambda is None: filter_lambda = lambda x: True if map_lambda is None: map_lambda = lambda x: x return list(map(map_lambda, filter(filter_lambda, self.tsv)))
<SYSTEM_TASK:> Search within the resources shared to the application. <END_TASK> <USER_TASK:> Description: def search( self, token: dict = None, query: str = "", bbox: list = None, poly: str = None, georel: str = None, order_by: str = "_created", order_dir: str = "desc", page_size: int = 100, offset: int = 0, share: str = None, specific_md: list = [], include: list = [], whole_share: bool = True, check: bool = True, augment: bool = False, tags_as_dicts: bool = False, prot: str = "https", ) -> dict: """Search within the resources shared to the application. It's the main method to use. :param str token: API auth token - DEPRECATED: token is now automatically included :param str query: search terms and semantic filters. Equivalent of **q** parameter in Isogeo API. It could be a simple string like *oil* or a tag like *keyword:isogeo:formations* or *keyword:inspire-theme:landcover*. The *AND* operator is applied when various tags are passed. :param list bbox: Bounding box to limit the search. Must be a 4 list of coordinates in WGS84 (EPSG 4326). Could be associated with *georel*. :param str poly: Geographic criteria for the search, in WKT format. Could be associated with *georel*. :param str georel: geometric operator to apply to the bbox or poly parameters. Available values (see: *isogeo.GEORELATIONS*): * 'contains', * 'disjoint', * 'equals', * 'intersects' - [APPLIED BY API if NOT SPECIFIED] * 'overlaps', * 'within'. :param str order_by: sorting results. Available values: * '_created': metadata creation date [DEFAULT if relevance is null] * '_modified': metadata last update * 'title': metadata title * 'created': data creation date (possibly None) * 'modified': data last update date * 'relevance': relevance score calculated by API [DEFAULT]. :param str order_dir: sorting direction. Available values: * 'desc': descending * 'asc': ascending :param int page_size: limits the number of results. Useful to paginate results display. Default value: 100. :param int offset: offset to start page size from a specific results index :param str share: share UUID to filter on :param list specific_md: list of metadata UUIDs to filter on :param list include: subresources that should be returned. Must be a list of strings. Available values: *isogeo.SUBRESOURCES* :param bool whole_share: option to return all results or only the page size. *True* by DEFAULT. :param bool check: option to check query parameters and avoid erros. *True* by DEFAULT. :param bool augment: option to improve API response by adding some tags on the fly (like shares_id) :param bool tags_as_dicts: option to store tags as key/values by filter. :param str prot: https [DEFAULT] or http (use it only for dev and tracking needs). """
# specific resources specific parsing specific_md = checker._check_filter_specific_md(specific_md) # sub resources specific parsing include = checker._check_filter_includes(include) # handling request parameters payload = { "_id": specific_md, "_include": include, "_lang": self.lang, "_limit": page_size, "_offset": offset, "box": bbox, "geo": poly, "rel": georel, "ob": order_by, "od": order_dir, "q": query, "s": share, } if check: checker.check_request_parameters(payload) else: pass # search request search_url = "{}://v1.{}.isogeo.com/resources/search".format(prot, self.api_url) try: search_req = self.get( search_url, headers=self.header, params=payload, proxies=self.proxies, verify=self.ssl, ) except Exception as e: logging.error(e) raise Exception # fast response check checker.check_api_response(search_req) # serializing result into dict and storing resources in variables search_rez = search_req.json() resources_count = search_rez.get("total") # total of metadatas shared # handling Isogeo API pagination # see: http://help.isogeo.com/api/fr/methods/pagination.html if resources_count > page_size and whole_share: # if API returned more than one page of results, let's get the rest! metadatas = [] # a recipient list payload["_limit"] = 100 # now it'll get pages of 100 resources # let's parse pages for idx in range(0, int(ceil(resources_count / 100)) + 1): payload["_offset"] = idx * 100 search_req = self.get( search_url, headers=self.header, params=payload, proxies=self.proxies, verify=self.ssl, ) # storing results by addition metadatas.extend(search_req.json().get("results")) search_rez["results"] = metadatas else: pass # add shares to tags and query if augment: self.add_tags_shares(search_rez.get("tags")) if share: search_rez.get("query")["_shares"] = [share] else: search_rez.get("query")["_shares"] = [] else: pass # store tags in dicts if tags_as_dicts: new_tags = utils.tags_to_dict( tags=search_rez.get("tags"), prev_query=search_rez.get("query") ) # clear search_rez.get("tags").clear() search_rez.get("query").clear() # update search_rez.get("tags").update(new_tags[0]) search_rez.get("query").update(new_tags[1]) else: pass # end of method return search_rez
<SYSTEM_TASK:> Get complete or partial metadata about one specific resource. <END_TASK> <USER_TASK:> Description: def resource( self, token: dict = None, id_resource: str = None, subresource=None, include: list = [], prot: str = "https", ) -> dict: """Get complete or partial metadata about one specific resource. :param str token: API auth token :param str id_resource: metadata UUID to get :param list include: subresources that should be included. Must be a list of strings. Available values: 'isogeo.SUBRESOURCES' :param str prot: https [DEFAULT] or http (use it only for dev and tracking needs). """
# if subresource route if isinstance(subresource, str): subresource = "/{}".format(checker._check_subresource(subresource)) else: subresource = "" # _includes specific parsing include = checker._check_filter_includes(include) # handling request parameters payload = {"id": id_resource, "_include": include} # resource search md_url = "{}://v1.{}.isogeo.com/resources/{}{}".format( prot, self.api_url, id_resource, subresource ) resource_req = self.get( md_url, headers=self.header, params=payload, proxies=self.proxies, verify=self.ssl, ) checker.check_api_response(resource_req) # end of method return resource_req.json()
<SYSTEM_TASK:> Get information about shares which feed the application. <END_TASK> <USER_TASK:> Description: def shares(self, token: dict = None, prot: str = "https") -> dict: """Get information about shares which feed the application. :param str token: API auth token :param str prot: https [DEFAULT] or http (use it only for dev and tracking needs). """
# passing auth parameter shares_url = "{}://v1.{}.isogeo.com/shares/".format(prot, self.api_url) shares_req = self.get( shares_url, headers=self.header, proxies=self.proxies, verify=self.ssl ) # checking response checker.check_api_response(shares_req) # end of method return shares_req.json()
<SYSTEM_TASK:> Get information about a specific share and its applications. <END_TASK> <USER_TASK:> Description: def share( self, share_id: str, token: dict = None, augment: bool = False, prot: str = "https", ) -> dict: """Get information about a specific share and its applications. :param str token: API auth token :param str share_id: share UUID :param bool augment: option to improve API response by adding some tags on the fly. :param str prot: https [DEFAULT] or http (use it only for dev and tracking needs). """
# passing auth parameter share_url = "{}://v1.{}.isogeo.com/shares/{}".format( prot, self.api_url, share_id ) share_req = self.get( share_url, headers=self.header, proxies=self.proxies, verify=self.ssl ) # checking response checker.check_api_response(share_req) # enhance share model share = share_req.json() if augment: share = utils.share_extender( share, self.search(whole_share=1, share=share_id).get("results") ) else: pass # end of method return share
<SYSTEM_TASK:> Get information about licenses owned by a specific workgroup. <END_TASK> <USER_TASK:> Description: def licenses( self, token: dict = None, owner_id: str = None, prot: str = "https" ) -> dict: """Get information about licenses owned by a specific workgroup. :param str token: API auth token :param str owner_id: workgroup UUID :param str prot: https [DEFAULT] or http (use it only for dev and tracking needs). """
# handling request parameters payload = {"gid": owner_id} # search request licenses_url = "{}://v1.{}.isogeo.com/groups/{}/licenses".format( prot, self.api_url, owner_id ) licenses_req = self.get( licenses_url, headers=self.header, params=payload, proxies=self.proxies, verify=self.ssl, ) # checking response req_check = checker.check_api_response(licenses_req) if isinstance(req_check, tuple): return req_check # end of method return licenses_req.json()
<SYSTEM_TASK:> Get details about a specific license. <END_TASK> <USER_TASK:> Description: def license(self, license_id: str, token: dict = None, prot: str = "https") -> dict: """Get details about a specific license. :param str token: API auth token :param str license_id: license UUID :param str prot: https [DEFAULT] or http (use it only for dev and tracking needs). """
# handling request parameters payload = {"lid": license_id} # search request license_url = "{}://v1.{}.isogeo.com/licenses/{}".format( prot, self.api_url, license_id ) license_req = self.get( license_url, headers=self.header, params=payload, proxies=self.proxies, verify=self.ssl, ) # checking response checker.check_api_response(license_req) # end of method return license_req.json()
<SYSTEM_TASK:> Get list of available thesauri. <END_TASK> <USER_TASK:> Description: def thesauri(self, token: dict = None, prot: str = "https") -> dict: """Get list of available thesauri. :param str token: API auth token :param str prot: https [DEFAULT] or http (use it only for dev and tracking needs). """
# passing auth parameter thez_url = "{}://v1.{}.isogeo.com/thesauri".format(prot, self.api_url) thez_req = self.get( thez_url, headers=self.header, proxies=self.proxies, verify=self.ssl ) # checking response checker.check_api_response(thez_req) # end of method return thez_req.json()
<SYSTEM_TASK:> Search for keywords within a specific thesaurus. <END_TASK> <USER_TASK:> Description: def keywords( self, token: dict = None, thez_id: str = "1616597fbc4348c8b11ef9d59cf594c8", query: str = "", offset: int = 0, order_by: str = "text", order_dir: str = "desc", page_size: int = 20, specific_md: list = [], specific_tag: list = [], include: list = [], prot: str = "https", ) -> dict: """Search for keywords within a specific thesaurus. :param str token: API auth token :param str thez_id: thesaurus UUID :param str query: search terms :param int offset: pagination start :param str order_by: sort criteria. Available values : - count.group, - count.isogeo, - text :param str prot: https [DEFAULT] or http (use it only for dev and tracking needs). """
# specific resources specific parsing specific_md = checker._check_filter_specific_md(specific_md) # sub resources specific parsing include = checker._check_filter_includes(include, "keyword") # specific tag specific parsing specific_tag = checker._check_filter_specific_tag(specific_tag) # handling request parameters payload = { "_id": specific_md, "_include": include, "_limit": page_size, "_offset": offset, "_tag": specific_tag, "tid": thez_id, "ob": order_by, "od": order_dir, "q": query, } # search request keywords_url = "{}://v1.{}.isogeo.com/thesauri/{}/keywords/search".format( prot, self.api_url, thez_id ) kwds_req = self.get( keywords_url, headers=self.header, params=payload, proxies=self.proxies, verify=self.ssl, ) # checking response checker.check_api_response(kwds_req) # end of method return kwds_req.json()
<SYSTEM_TASK:> Download hosted resource. <END_TASK> <USER_TASK:> Description: def dl_hosted( self, token: dict = None, resource_link: dict = None, encode_clean: bool = 1, proxy_url: str = None, prot: str = "https", ) -> tuple: """Download hosted resource. :param str token: API auth token :param dict resource_link: link dictionary :param bool encode_clean: option to ensure a clean filename and avoid OS errors :param str proxy_url: proxy to use to download :param str prot: https [DEFAULT] or http (use it only for dev and tracking needs). Example of resource_link dict: .. code-block:: json { "_id": "g8h9i0j11k12l13m14n15o16p17Q18rS", "type": "hosted", "title": "label_of_hosted_file.zip", "url": "/resources/1a2b3c4d5e6f7g8h9i0j11k12l13m14n/links/g8h9i0j11k12l13m14n15o16p17Q18rS.bin", "kind": "data", "actions": ["download", ], "size": "2253029", } """
# check resource link parameter type if not isinstance(resource_link, dict): raise TypeError("Resource link expects a dictionary.") else: pass # check resource link type if not resource_link.get("type") == "hosted": raise ValueError( "Resource link passed is not a hosted one: {}".format( resource_link.get("type") ) ) else: pass # handling request parameters payload = {"proxyUrl": proxy_url} # prepare URL request hosted_url = "{}://v1.{}.isogeo.com/{}".format( prot, self.api_url, resource_link.get("url") ) # send stream request hosted_req = self.get( hosted_url, headers=self.header, stream=True, params=payload, proxies=self.proxies, verify=self.ssl, ) # quick check req_check = checker.check_api_response(hosted_req) if not req_check: raise ConnectionError(req_check[1]) else: pass # get filename from header content_disposition = hosted_req.headers.get("Content-Disposition") if content_disposition: filename = re.findall("filename=(.+)", content_disposition)[0] else: filename = resource_link.get("title") # remove special characters if encode_clean: filename = utils.encoded_words_to_text(filename) filename = re.sub(r"[^\w\-_\. ]", "", filename) # well-formed size in_size = resource_link.get("size") for size_cat in ("octets", "Ko", "Mo", "Go"): if in_size < 1024.0: out_size = "%3.1f %s" % (in_size, size_cat) in_size /= 1024.0 out_size = "%3.1f %s" % (in_size, " To") # end of method return (hosted_req, filename, out_size)
<SYSTEM_TASK:> Add shares list to the tags attributes in search results. <END_TASK> <USER_TASK:> Description: def add_tags_shares(self, tags: dict = dict()): """Add shares list to the tags attributes in search results. :param dict tags: tags dictionary from a search request """
# check if shares_id have already been retrieved or not if not hasattr(self, "shares_id"): shares = self.shares() self.shares_id = { "share:{}".format(i.get("_id")): i.get("name") for i in shares } else: pass # update query tags tags.update(self.shares_id)
<SYSTEM_TASK:> Get information about the application declared on Isogeo. <END_TASK> <USER_TASK:> Description: def get_app_properties(self, token: dict = None, prot: str = "https"): """Get information about the application declared on Isogeo. :param str token: API auth token :param str prot: https [DEFAULT] or http (use it only for dev and tracking needs). """
# check if app properties have already been retrieved or not if not hasattr(self, "app_properties"): first_app = self.shares()[0].get("applications")[0] app = { "admin_url": "{}/applications/{}".format( self.mng_url, first_app.get("_id") ), "creation_date": first_app.get("_created"), "last_update": first_app.get("_modified"), "name": first_app.get("name"), "type": first_app.get("type"), "kind": first_app.get("kind"), "url": first_app.get("url"), } self.app_properties = app else: pass
<SYSTEM_TASK:> Get environment directives which represent INSPIRE limitations. <END_TASK> <USER_TASK:> Description: def get_directives(self, token: dict = None, prot: str = "https") -> dict: """Get environment directives which represent INSPIRE limitations. :param str token: API auth token :param str prot: https [DEFAULT] or http (use it only for dev and tracking needs). """
# search request req_url = "{}://v1.{}.isogeo.com/directives".format(prot, self.api_url) req = self.get( req_url, headers=self.header, proxies=self.proxies, verify=self.ssl ) # checking response checker.check_api_response(req) # end of method return req.json()
<SYSTEM_TASK:> Get available coordinate systems in Isogeo API. <END_TASK> <USER_TASK:> Description: def get_coordinate_systems( self, token: dict = None, srs_code: str = None, prot: str = "https" ) -> dict: """Get available coordinate systems in Isogeo API. :param str token: API auth token :param str srs_code: code of a specific coordinate system :param str prot: https [DEFAULT] or http (use it only for dev and tracking needs). """
# if specific format if isinstance(srs_code, str): specific_srs = "/{}".format(srs_code) else: specific_srs = "" # search request req_url = "{}://v1.{}.isogeo.com/coordinate-systems{}".format( prot, self.api_url, specific_srs ) req = self.get( req_url, headers=self.header, proxies=self.proxies, verify=self.ssl ) # checking response checker.check_api_response(req) # end of method return req.json()
<SYSTEM_TASK:> Get formats. <END_TASK> <USER_TASK:> Description: def get_formats( self, token: dict = None, format_code: str = None, prot: str = "https" ) -> dict: """Get formats. :param str token: API auth token :param str format_code: code of a specific format :param str prot: https [DEFAULT] or http (use it only for dev and tracking needs). """
# if specific format if isinstance(format_code, str): specific_format = "/{}".format(format_code) else: specific_format = "" # search request req_url = "{}://v1.{}.isogeo.com/formats{}".format( prot, self.api_url, specific_format ) req = self.get( req_url, headers=self.header, proxies=self.proxies, verify=self.ssl ) # checking response checker.check_api_response(req) # end of method return req.json()
<SYSTEM_TASK:> Get different filters values as dicts. <END_TASK> <USER_TASK:> Description: def get_filters_values(self): """Get different filters values as dicts."""
# DATASETS -- # badges self._DST_BADGES = requests.get(self.base_url + "datasets/badges/").json() # licences self._DST_LICENSES = { l.get("id"): l.get("title") for l in requests.get(self.base_url + "datasets/licenses").json() } # frequencies self._DST_FREQUENCIES = { f.get("id"): f.get("label") for f in requests.get(self.base_url + "datasets/frequencies").json() } # ORGANIZATIONS -- # badges self._ORG_BADGES = requests.get(self.base_url + "organizations/badges/").json() # # licences # self._DST_LICENSES = {l.get("id"): l.get("title") # for l in requests.get(self.base_url + "datasets/licenses").json()} # # frequencies # self._DST_FREQUENCIES = {f.get("id"): f.get("label") # for f in requests.get(self.base_url + "datasets/frequencies").json()} # SPATIAL -- # granularities self._GRANULARITIES = { g.get("id"): g.get("name") for g in requests.get(self.base_url + "spatial/granularities").json() } # levels self._LEVELS = { g.get("id"): g.get("name") for g in requests.get(self.base_url + "spatial/levels").json() } # MISC -- # facets self._FACETS = ( "all", "badge", "featured", "format", "geozone", "granularity", "license", "owner", "organization", "reuses", "tag", "temporal_coverage", ) # reuses self._REUSES = ("none", "few", "quite", "many")
<SYSTEM_TASK:> Deploy the app to AppEngine. <END_TASK> <USER_TASK:> Description: def deploy(app_id, version, promote, quiet): # type: (str, str, bool, bool) -> None """ Deploy the app to AppEngine. Args: app_id (str): AppEngine App ID. Overrides config value app_id if given. version (str): AppEngine project version. Overrides config values if given. promote (bool): If set to **True** promote the current remote app version to the one that's being deployed. quiet (bool): If set to **True** this will pass the ``--quiet`` flag to gcloud command. """
gae_app = GaeApp.for_branch(git.current_branch().name) if gae_app is None and None in (app_id, version): msg = ( "Can't find an AppEngine app setup for branch <35>{}<32> and" "--project and --version were not given." ) log.err(msg, git.current_branch().name) sys.exit(1) if version is not None: gae_app.version = version if app_id is not None: gae_app.app_id = app_id gae_app.deploy(promote, quiet)
<SYSTEM_TASK:> Mark command as deprecated. <END_TASK> <USER_TASK:> Description: def mark_deprecated(replaced_by): # type: (Text) -> FunctionType """ Mark command as deprecated. Args: replaced_by (str): The command that deprecated this command and should be used instead. """
def decorator(fn): # pylint: disable=missing-docstring @wraps(fn) def wrapper(*args, **kw): # pylint: disable=missing-docstring from peltak.core import shell if shell.is_tty: warnings.warn("This command is has been deprecated. Please use " "{new} instead.".format(new=replaced_by)) return fn(*args, **kw) return wrapper return decorator
<SYSTEM_TASK:> Split the given iterable into batches. <END_TASK> <USER_TASK:> Description: def in_batches(iterable, batch_size): # type: (Iterable[Any]) -> Generator[List[Any]] """ Split the given iterable into batches. Args: iterable (Iterable[Any]): The iterable you want to split into batches. batch_size (int): The size of each bach. The last batch will be probably smaller (if the number of elements cannot be equally divided. Returns: Generator[list[Any]]: Will yield all items in batches of **batch_size** size. Example: >>> from peltak.core import util >>> >>> batches = util.in_batches([1, 2, 3, 4, 5, 6, 7], 3) >>> batches = list(batches) # so we can query for lenght >>> len(batches) 3 >>> batches [[1, 2, 3], [4, 5, 6], [7]] """
items = list(iterable) size = len(items) for i in range(0, size, batch_size): yield items[i:min(i + batch_size, size)]
<SYSTEM_TASK:> Clear result cache on the given function. <END_TASK> <USER_TASK:> Description: def clear(cls, fn): # type: (FunctionType) -> None """ Clear result cache on the given function. If the function has no cached result, this call will do nothing. Args: fn (FunctionType): The function whose cache should be cleared. """
if hasattr(fn, cls.CACHE_VAR): delattr(fn, cls.CACHE_VAR)
<SYSTEM_TASK:> Merge current feature branch into develop. <END_TASK> <USER_TASK:> Description: def finish(): # type: () -> None """ Merge current feature branch into develop. """
pretend = context.get('pretend', False) if not pretend and (git.staged() or git.unstaged()): log.err( "You have uncommitted changes in your repo!\n" "You need to stash them before you merge the hotfix branch" ) sys.exit(1) branch = git.current_branch(refresh=True) base = common.get_base_branch() prompt = "<32>Merge <33>{}<32> into <33>{}<0>?".format(branch.name, base) if not click.confirm(shell.fmt(prompt)): log.info("Cancelled") return common.assert_branch_type('task') # Merge task into it's base feature branch common.git_checkout(base) common.git_pull(base) common.git_merge(base, branch.name) # Cleanup common.git_branch_delete(branch.name) common.git_prune() common.git_checkout(base)
<SYSTEM_TASK:> Call a Mutagen function with appropriate error handling. <END_TASK> <USER_TASK:> Description: def mutagen_call(action, path, func, *args, **kwargs): """Call a Mutagen function with appropriate error handling. `action` is a string describing what the function is trying to do, and `path` is the relevant filename. The rest of the arguments describe the callable to invoke. We require at least Mutagen 1.33, where `IOError` is *never* used, neither for internal parsing errors *nor* for ordinary IO error conditions such as a bad filename. Mutagen-specific parsing errors and IO errors are reraised as `UnreadableFileError`. Other exceptions raised inside Mutagen---i.e., bugs---are reraised as `MutagenError`. """
try: return func(*args, **kwargs) except mutagen.MutagenError as exc: log.debug(u'%s failed: %s', action, six.text_type(exc)) raise UnreadableFileError(path, six.text_type(exc)) except Exception as exc: # Isolate bugs in Mutagen. log.debug(u'%s', traceback.format_exc()) log.error(u'uncaught Mutagen exception in %s: %s', action, exc) raise MutagenError(path, exc)
<SYSTEM_TASK:> Try to covert val to out_type but never raise an exception. If <END_TASK> <USER_TASK:> Description: def _safe_cast(out_type, val): """Try to covert val to out_type but never raise an exception. If the value can't be converted, then a sensible default value is returned. out_type should be bool, int, or unicode; otherwise, the value is just passed through. """
if val is None: return None if out_type == int: if isinstance(val, int) or isinstance(val, float): # Just a number. return int(val) else: # Process any other type as a string. if isinstance(val, bytes): val = val.decode('utf-8', 'ignore') elif not isinstance(val, six.string_types): val = six.text_type(val) # Get a number from the front of the string. match = re.match(r'[\+-]?[0-9]+', val.strip()) return int(match.group(0)) if match else 0 elif out_type == bool: try: # Should work for strings, bools, ints: return bool(int(val)) except ValueError: return False elif out_type == six.text_type: if isinstance(val, bytes): return val.decode('utf-8', 'ignore') elif isinstance(val, six.text_type): return val else: return six.text_type(val) elif out_type == float: if isinstance(val, int) or isinstance(val, float): return float(val) else: if isinstance(val, bytes): val = val.decode('utf-8', 'ignore') else: val = six.text_type(val) match = re.match(r'[\+-]?([0-9]+\.?[0-9]*|[0-9]*\.[0-9]+)', val.strip()) if match: val = match.group(0) if val: return float(val) return 0.0 else: return val
<SYSTEM_TASK:> Given a raw value stored on a Mutagen object, decode and <END_TASK> <USER_TASK:> Description: def deserialize(self, mutagen_value): """Given a raw value stored on a Mutagen object, decode and return the represented value. """
if self.suffix and isinstance(mutagen_value, six.text_type) \ and mutagen_value.endswith(self.suffix): return mutagen_value[:-len(self.suffix)] else: return mutagen_value
<SYSTEM_TASK:> Assign the value for the field using this style. <END_TASK> <USER_TASK:> Description: def set(self, mutagen_file, value): """Assign the value for the field using this style. """
self.store(mutagen_file, self.serialize(value))
<SYSTEM_TASK:> Convert the external Python value to a type that is suitable for <END_TASK> <USER_TASK:> Description: def serialize(self, value): """Convert the external Python value to a type that is suitable for storing in a Mutagen file object. """
if isinstance(value, float) and self.as_type is six.text_type: value = u'{0:.{1}f}'.format(value, self.float_places) value = self.as_type(value) elif self.as_type is six.text_type: if isinstance(value, bool): # Store bools as 1/0 instead of True/False. value = six.text_type(int(bool(value))) elif isinstance(value, bytes): value = value.decode('utf-8', 'ignore') else: value = six.text_type(value) else: value = self.as_type(value) if self.suffix: value += self.suffix return value
<SYSTEM_TASK:> Get a list of all values for the field using this style. <END_TASK> <USER_TASK:> Description: def get_list(self, mutagen_file): """Get a list of all values for the field using this style. """
return [self.deserialize(item) for item in self.fetch(mutagen_file)]
<SYSTEM_TASK:> Set all values for the field using this style. `values` <END_TASK> <USER_TASK:> Description: def set_list(self, mutagen_file, values): """Set all values for the field using this style. `values` should be an iterable. """
self.store(mutagen_file, [self.serialize(value) for value in values])
<SYSTEM_TASK:> Return an APIC frame populated with data from ``image``. <END_TASK> <USER_TASK:> Description: def serialize(self, image): """Return an APIC frame populated with data from ``image``. """
assert isinstance(image, Image) frame = mutagen.id3.Frames[self.key]() frame.data = image.data frame.mime = image.mime_type frame.desc = image.desc or u'' # For compatibility with OS X/iTunes prefer latin-1 if possible. # See issue #899 try: frame.desc.encode("latin-1") except UnicodeEncodeError: frame.encoding = mutagen.id3.Encoding.UTF16 else: frame.encoding = mutagen.id3.Encoding.LATIN1 frame.type = image.type_index return frame
<SYSTEM_TASK:> Turn a Image into a base64 encoded FLAC picture block. <END_TASK> <USER_TASK:> Description: def serialize(self, image): """Turn a Image into a base64 encoded FLAC picture block. """
pic = mutagen.flac.Picture() pic.data = image.data pic.type = image.type_index pic.mime = image.mime_type pic.desc = image.desc or u'' # Encoding with base64 returns bytes on both Python 2 and 3. # Mutagen requires the data to be a Unicode string, so we decode # it before passing it along. return base64.b64encode(pic.write()).decode('ascii')
<SYSTEM_TASK:> ``pictures`` is a list of mutagen.flac.Picture instances. <END_TASK> <USER_TASK:> Description: def store(self, mutagen_file, pictures): """``pictures`` is a list of mutagen.flac.Picture instances. """
mutagen_file.clear_pictures() for pic in pictures: mutagen_file.add_picture(pic)
<SYSTEM_TASK:> Remove all images from the file. <END_TASK> <USER_TASK:> Description: def delete(self, mutagen_file): """Remove all images from the file. """
for cover_tag in self.TAG_NAMES.values(): try: del mutagen_file[cover_tag] except KeyError: pass
<SYSTEM_TASK:> Yields the list of storage styles of this field that can <END_TASK> <USER_TASK:> Description: def styles(self, mutagen_file): """Yields the list of storage styles of this field that can handle the MediaFile's format. """
for style in self._styles: if mutagen_file.__class__.__name__ in style.formats: yield style
<SYSTEM_TASK:> Get an appropriate "null" value for this field's type. This <END_TASK> <USER_TASK:> Description: def _none_value(self): """Get an appropriate "null" value for this field's type. This is used internally when setting the field to None. """
if self.out_type == int: return 0 elif self.out_type == float: return 0.0 elif self.out_type == bool: return False elif self.out_type == six.text_type: return u''
<SYSTEM_TASK:> Get a 3-item sequence representing the date consisting of a <END_TASK> <USER_TASK:> Description: def _get_date_tuple(self, mediafile): """Get a 3-item sequence representing the date consisting of a year, month, and day number. Each number is either an integer or None. """
# Get the underlying data and split on hyphens and slashes. datestring = super(DateField, self).__get__(mediafile, None) if isinstance(datestring, six.string_types): datestring = re.sub(r'[Tt ].*$', '', six.text_type(datestring)) items = re.split('[-/]', six.text_type(datestring)) else: items = [] # Ensure that we have exactly 3 components, possibly by # truncating or padding. items = items[:3] if len(items) < 3: items += [None] * (3 - len(items)) # Use year field if year is missing. if not items[0] and hasattr(self, '_year_field'): items[0] = self._year_field.__get__(mediafile) # Convert each component to an integer if possible. items_ = [] for item in items: try: items_.append(int(item)) except (TypeError, ValueError): items_.append(None) return items_
<SYSTEM_TASK:> Set the value of the field given a year, month, and day <END_TASK> <USER_TASK:> Description: def _set_date_tuple(self, mediafile, year, month=None, day=None): """Set the value of the field given a year, month, and day number. Each number can be an integer or None to indicate an unset component. """
if year is None: self.__delete__(mediafile) return date = [u'{0:04d}'.format(int(year))] if month: date.append(u'{0:02d}'.format(int(month))) if month and day: date.append(u'{0:02d}'.format(int(day))) date = map(six.text_type, date) super(DateField, self).__set__(mediafile, u'-'.join(date)) if hasattr(self, '_year_field'): self._year_field.__set__(mediafile, year)
<SYSTEM_TASK:> Get a sort key for a field name that determines the order <END_TASK> <USER_TASK:> Description: def _field_sort_name(cls, name): """Get a sort key for a field name that determines the order fields should be written in. Fields names are kept unchanged, unless they are instances of :class:`DateItemField`, in which case `year`, `month`, and `day` are replaced by `date0`, `date1`, and `date2`, respectively, to make them appear in that order. """
if isinstance(cls.__dict__[name], DateItemField): name = re.sub('year', 'date0', name) name = re.sub('month', 'date1', name) name = re.sub('day', 'date2', name) return name
<SYSTEM_TASK:> Get the names of all writable metadata fields, sorted in the <END_TASK> <USER_TASK:> Description: def sorted_fields(cls): """Get the names of all writable metadata fields, sorted in the order that they should be written. This is a lexicographic order, except for instances of :class:`DateItemField`, which are sorted in year-month-day order. """
for property in sorted(cls.fields(), key=cls._field_sort_name): yield property
<SYSTEM_TASK:> Add a field to store custom tags. <END_TASK> <USER_TASK:> Description: def add_field(cls, name, descriptor): """Add a field to store custom tags. :param name: the name of the property the field is accessed through. It must not already exist on this class. :param descriptor: an instance of :class:`MediaField`. """
if not isinstance(descriptor, MediaField): raise ValueError( u'{0} must be an instance of MediaField'.format(descriptor)) if name in cls.__dict__: raise ValueError( u'property "{0}" already exists on MediaField'.format(name)) setattr(cls, name, descriptor)
<SYSTEM_TASK:> Set all field values from a dictionary. <END_TASK> <USER_TASK:> Description: def update(self, dict): """Set all field values from a dictionary. For any key in `dict` that is also a field to store tags the method retrieves the corresponding value from `dict` and updates the `MediaFile`. If a key has the value `None`, the corresponding property is deleted from the `MediaFile`. """
for field in self.sorted_fields(): if field in dict: if dict[field] is None: delattr(self, field) else: setattr(self, field, dict[field])
<SYSTEM_TASK:> Wrapper around rebin that actually rebins 2 by 2 <END_TASK> <USER_TASK:> Description: def rebin2x2(a): """ Wrapper around rebin that actually rebins 2 by 2 """
inshape = np.array(a.shape) if not (inshape % 2 == np.zeros(2)).all(): # Modulo check to see if size is even raise RuntimeError, "I want even image shapes !" return rebin(a, inshape/2)
<SYSTEM_TASK:> Finds and labels the cosmic "islands" and returns a list of dicts containing their positions. <END_TASK> <USER_TASK:> Description: def labelmask(self, verbose = None): """ Finds and labels the cosmic "islands" and returns a list of dicts containing their positions. This is made on purpose for visualizations a la f2n.drawstarslist, but could be useful anyway. """
if verbose == None: verbose = self.verbose if verbose: print "Labeling mask pixels ..." # We morphologicaly dilate the mask to generously connect "sparse" cosmics : #dilstruct = np.ones((5,5)) dilmask = ndimage.morphology.binary_dilation(self.mask, structure=dilstruct, iterations=1, mask=None, output=None, border_value=0, origin=0, brute_force=False) # origin = 0 means center (labels, n) = ndimage.measurements.label(dilmask) #print "Number of cosmic ray hits : %i" % n #tofits(labels, "labels.fits", verbose = False) slicecouplelist = ndimage.measurements.find_objects(labels) # Now we have a huge list of couples of numpy slice objects giving a frame around each object # For plotting purposes, we want to transform this into the center of each object. if len(slicecouplelist) != n: # This never happened, but you never know ... raise RuntimeError, "Mega error in labelmask !" centers = [[(tup[0].start + tup[0].stop)/2.0, (tup[1].start + tup[1].stop)/2.0] for tup in slicecouplelist] # We also want to know how many pixels where affected by each cosmic ray. # Why ? Dunno... it's fun and available in scipy :-) sizes = ndimage.measurements.sum(self.mask.ravel(), labels.ravel(), np.arange(1,n+1,1)) retdictlist = [{"name":"%i" % size, "x":center[0], "y":center[1]} for (size, center) in zip(sizes, centers)] if verbose: print "Labeling done" return retdictlist
<SYSTEM_TASK:> Returns a morphologically dilated copy of the current mask. <END_TASK> <USER_TASK:> Description: def getdilatedmask(self, size=3): """ Returns a morphologically dilated copy of the current mask. size = 3 or 5 decides how to dilate. """
if size == 3: dilmask = ndimage.morphology.binary_dilation(self.mask, structure=growkernel, iterations=1, mask=None, output=None, border_value=0, origin=0, brute_force=False) elif size == 5: dilmask = ndimage.morphology.binary_dilation(self.mask, structure=dilstruct, iterations=1, mask=None, output=None, border_value=0, origin=0, brute_force=False) else: dismask = self.mask.copy() return dilmask
<SYSTEM_TASK:> Returns the mask of saturated stars after finding them if not yet done. <END_TASK> <USER_TASK:> Description: def getsatstars(self, verbose = None): """ Returns the mask of saturated stars after finding them if not yet done. Intended mainly for external use. """
if verbose == None: verbose = self.verbose if not self.satlevel > 0: raise RuntimeError, "Cannot determine satstars : you gave satlevel <= 0 !" if self.satstars == None: self.findsatstars(verbose = verbose) return self.satstars
<SYSTEM_TASK:> Estimates the background level. This could be used to fill pixels in large cosmics. <END_TASK> <USER_TASK:> Description: def guessbackgroundlevel(self): """ Estimates the background level. This could be used to fill pixels in large cosmics. """
if self.backgroundlevel == None: self.backgroundlevel = np.median(self.rawarray.ravel()) return self.backgroundlevel
<SYSTEM_TASK:> Search your Django project root. <END_TASK> <USER_TASK:> Description: def search_project_root(): """ Search your Django project root. returns: - path:string Django project root path """
while True: current = os.getcwd() if pathlib.Path("Miragefile.py").is_file() or pathlib.Path("Miragefile").is_file(): return current elif os.getcwd() == "/": raise FileNotFoundError else: os.chdir("../")
<SYSTEM_TASK:> Judge where current working directory is in Django application or not. <END_TASK> <USER_TASK:> Description: def in_app() -> bool: """ Judge where current working directory is in Django application or not. returns: - (Bool) cwd is in app dir returns True """
try: MirageEnvironment.set_import_root() import apps if os.path.isfile("apps.py"): return True else: return False except ImportError: return False except: return False
<SYSTEM_TASK:> Start working on a new hotfix. <END_TASK> <USER_TASK:> Description: def start(name): # type: (str) -> None """ Start working on a new hotfix. This will create a new branch off master called hotfix/<name>. Args: name (str): The name of the new feature. """
hotfix_branch = 'hotfix/' + common.to_branch_name(name) master = conf.get('git.master_branch', 'master') common.assert_on_branch(master) common.git_checkout(hotfix_branch, create=True)
<SYSTEM_TASK:> Merge current feature into develop. <END_TASK> <USER_TASK:> Description: def finish(): # type: () -> None """ Merge current feature into develop. """
pretend = context.get('pretend', False) if not pretend and (git.staged() or git.unstaged()): log.err( "You have uncommitted changes in your repo!\n" "You need to stash them before you merge the hotfix branch" ) sys.exit(1) develop = conf.get('git.devel_branch', 'develop') master = conf.get('git.master_branch', 'master') branch = git.current_branch(refresh=True) common.assert_branch_type('hotfix') # Merge hotfix into master common.git_checkout(master) common.git_pull(master) common.git_merge(master, branch.name) # Merge hotfix into develop common.git_checkout(develop) common.git_pull(develop) common.git_merge(develop, branch.name) # Cleanup common.git_branch_delete(branch.name) common.git_prune() common.git_checkout(master)
<SYSTEM_TASK:> Run the shell command <END_TASK> <USER_TASK:> Description: def run(self): """ Run the shell command Returns: ShellCommand: return this ShellCommand instance for chaining """
if not self.block: self.output = [] self.error = [] self.thread = threading.Thread(target=self.run_non_blocking) self.thread.start() else: self.__create_process() self.process.wait() if self._stdout is not None: self.output = self.process.stdout.read().decode("utf-8") if self._stderr is not None: self.error = self.process.stderr.read().decode("utf-8") self.return_code = self.process.returncode return self
<SYSTEM_TASK:> Send text to stdin. Can only be used on non blocking commands <END_TASK> <USER_TASK:> Description: def send(self, value): """ Send text to stdin. Can only be used on non blocking commands Args: value (str): the text to write on stdin Raises: TypeError: If command is blocking Returns: ShellCommand: return this ShellCommand instance for chaining """
if not self.block and self._stdin is not None: self.writer.write("{}\n".format(value)) return self else: raise TypeError(NON_BLOCKING_ERROR_MESSAGE)
<SYSTEM_TASK:> Append lines from stdout to self.output. <END_TASK> <USER_TASK:> Description: def poll_output(self): """ Append lines from stdout to self.output. Returns: list: The lines added since last call """
if self.block: return self.output new_list = self.output[self.old_output_size:] self.old_output_size += len(new_list) return new_list
<SYSTEM_TASK:> Append lines from stderr to self.errors. <END_TASK> <USER_TASK:> Description: def poll_error(self): """ Append lines from stderr to self.errors. Returns: list: The lines added since last call """
if self.block: return self.error new_list = self.error[self.old_error_size:] self.old_error_size += len(new_list) return new_list
<SYSTEM_TASK:> Kill the current non blocking command <END_TASK> <USER_TASK:> Description: def kill(self): """ Kill the current non blocking command Raises: TypeError: If command is blocking """
if self.block: raise TypeError(NON_BLOCKING_ERROR_MESSAGE) try: self.process.kill() except ProcessLookupError as exc: self.logger.debug(exc)
<SYSTEM_TASK:> Block until a pattern have been found in stdout and stderr <END_TASK> <USER_TASK:> Description: def wait_for(self, pattern, timeout=None): """ Block until a pattern have been found in stdout and stderr Args: pattern(:class:`~re.Pattern`): The pattern to search timeout(int): Maximum number of second to wait. If None, wait infinitely Raises: TimeoutError: When timeout is reach """
should_continue = True if self.block: raise TypeError(NON_BLOCKING_ERROR_MESSAGE) def stop(signum, frame): # pylint: disable=W0613 nonlocal should_continue if should_continue: raise TimeoutError() if timeout: signal.signal(signal.SIGALRM, stop) signal.alarm(timeout) while should_continue: output = self.poll_output() + self.poll_error() filtered = [line for line in output if re.match(pattern, line)] if filtered: should_continue = False
<SYSTEM_TASK:> Check if the command is currently running <END_TASK> <USER_TASK:> Description: def is_running(self): """ Check if the command is currently running Returns: bool: True if running, else False """
if self.block: return False return self.thread.is_alive() or self.process.poll() is None
<SYSTEM_TASK:> Used mainly to measure scatter for the BCES best-fit <END_TASK> <USER_TASK:> Description: def scatter(slope, zero, x1, x2, x1err=[], x2err=[]): """ Used mainly to measure scatter for the BCES best-fit """
n = len(x1) x2pred = zero + slope * x1 s = sum((x2 - x2pred) ** 2) / (n - 1) if len(x2err) == n: s_obs = sum((x2err / x2) ** 2) / n s0 = s - s_obs print numpy.sqrt(s), numpy.sqrt(s_obs), numpy.sqrt(s0) return numpy.sqrt(s0)
<SYSTEM_TASK:> Use emcee to find the best-fit linear relation or power law <END_TASK> <USER_TASK:> Description: def mcmc(x1, x2, x1err=[], x2err=[], po=(1,1,0.5), logify=True, nsteps=5000, nwalkers=100, nburn=500, output='full'): """ Use emcee to find the best-fit linear relation or power law accounting for measurement uncertainties and intrinsic scatter Parameters ---------- x1 : array of floats Independent variable, or observable x2 : array of floats Dependent variable x1err : array of floats (optional) Uncertainties on the independent variable x2err : array of floats (optional) Uncertainties on the dependent variable po : tuple of 3 floats (optional) Initial guesses for zero point, slope, and intrinsic scatter. Results are not very sensitive to these values so they shouldn't matter a lot. logify : bool (default True) Whether to take the log of the measurements in order to estimate the best-fit power law instead of linear relation nsteps : int (default 5000) Number of steps each walker should take in the MCMC nwalkers : int (default 100) Number of MCMC walkers nburn : int (default 500) Number of samples to discard to give the MCMC enough time to converge. output : list of ints or 'full' (default 'full') If 'full', then return the full samples (except for burn-in section) for each parameter. Otherwise, each float corresponds to a percentile that will be returned for each parameter. Returns ------- See *output* argument above for return options. """
import emcee if len(x1err) == 0: x1err = numpy.ones(len(x1)) if len(x2err) == 0: x2err = numpy.ones(len(x1)) def lnlike(theta, x, y, xerr, yerr): a, b, s = theta model = a + b*x sigma = numpy.sqrt((b*xerr)**2 + yerr*2 + s**2) lglk = 2 * sum(numpy.log(sigma)) + \ sum(((y-model) / sigma) ** 2) + \ numpy.log(len(x)) * numpy.sqrt(2*numpy.pi) / 2 return -lglk def lnprior(theta): a, b, s = theta if s >= 0: return 0 return -numpy.inf def lnprob(theta, x, y, xerr, yerr): lp = lnprior(theta) return lp + lnlike(theta, x, y, xerr, yerr) if logify: x1, x2, x1err, x2err = to_log(x1, x2, x1err, x2err) start = numpy.array(po) ndim = len(start) pos = [start + 1e-4*numpy.random.randn(ndim) for i in range(nwalkers)] sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=(x1,x2,x1err,x2err)) sampler.run_mcmc(pos, nsteps) samples = numpy.array([sampler.chain[:,nburn:,i].reshape(-1) \ for i in xrange(ndim)]) if logify: samples[2] *= numpy.log(10) if output == 'full': return samples else: try: values = [[numpy.percentile(s, o) for o in output] for s in samples] return values except TypeError: msg = 'ERROR: wrong value for argument output in mcmc().' msg += ' Must be "full" or list of ints.' print msg exit() return
<SYSTEM_TASK:> Maximum Likelihood Estimation of best-fit parameters <END_TASK> <USER_TASK:> Description: def mle(x1, x2, x1err=[], x2err=[], cerr=[], s_int=True, po=(1,0,0.1), verbose=False, logify=True, full_output=False): """ Maximum Likelihood Estimation of best-fit parameters Parameters ---------- x1, x2 : float arrays the independent and dependent variables. x1err, x2err : float arrays (optional) measurement uncertainties on independent and dependent variables. Any of the two, or both, can be supplied. cerr : float array (same size as x1) covariance on the measurement errors s_int : boolean (default True) whether to include intrinsic scatter in the MLE. po : tuple of floats initial guess for free parameters. If s_int is True, then po must have 3 elements; otherwise it can have two (for the zero point and the slope) verbose : boolean (default False) verbose? logify : boolean (default True) whether to convert the values to log10's. This is to calculate the best-fit power law. Note that the result is given for the equation log(y)=a+b*log(x) -- i.e., the zero point must be converted to 10**a if logify=True full_output : boolean (default False) numpy.optimize.fmin's full_output argument Returns ------- a : float Maximum Likelihood Estimate of the zero point. Note that if logify=True, the power-law intercept is 10**a b : float Maximum Likelihood Estimate of the slope s : float (optional, if s_int=True) Maximum Likelihood Estimate of the intrinsic scatter """
from scipy import optimize n = len(x1) if len(x2) != n: raise ValueError('x1 and x2 must have same length') if len(x1err) == 0: x1err = numpy.ones(n) if len(x2err) == 0: x2err = numpy.ones(n) if logify: x1, x2, x1err, x2err = to_log(x1, x2, x1err, x2err) f = lambda a, b: a + b * x1 if s_int: w = lambda b, s: numpy.sqrt(b**2 * x1err**2 + x2err**2 + s**2) loglike = lambda p: 2 * sum(numpy.log(w(p[1],p[2]))) + \ sum(((x2 - f(p[0],p[1])) / w(p[1],p[2])) ** 2) + \ numpy.log(n * numpy.sqrt(2*numpy.pi)) / 2 else: w = lambda b: numpy.sqrt(b**2 * x1err**2 + x2err**2) loglike = lambda p: sum(numpy.log(w(p[1]))) + \ sum(((x2 - f(p[0],p[1])) / w(p[1])) ** 2) / 2 + \ numpy.log(n * numpy.sqrt(2*numpy.pi)) / 2 po = po[:2] out = optimize.fmin(loglike, po, disp=verbose, full_output=full_output) return out
<SYSTEM_TASK:> Take linear measurements and uncertainties and transform to log values. <END_TASK> <USER_TASK:> Description: def to_log(x1, x2, x1err, x2err): """ Take linear measurements and uncertainties and transform to log values. """
logx1 = numpy.log10(numpy.array(x1)) logx2 = numpy.log10(numpy.array(x2)) x1err = numpy.log10(numpy.array(x1)+numpy.array(x1err)) - logx1 x2err = numpy.log10(numpy.array(x2)+numpy.array(x2err)) - logx2 return logx1, logx2, x1err, x2err