text_prompt
stringlengths 157
13.1k
| code_prompt
stringlengths 7
19.8k
⌀ |
---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get(self, rec_id=None, upstream=None):
""" Fetches a record by the record's ID or upstream_identifier. Raises: `pulsarpy.models.RecordNotFound`: A record could not be found. """ |
if rec_id:
self.record_url = self.__class__.get_record_url(rec_id)
self.debug_logger.debug("GET {} record with ID {}: {}".format(self.__class__.__name__, rec_id, self.record_url))
response = requests.get(url=self.record_url, headers=HEADERS, verify=False)
if not response.ok and response.status_code == requests.codes.NOT_FOUND:
raise RecordNotFound("Search for {} record with ID '{}' returned no results.".format(self.__class__.__name__, rec_id))
self.write_response_html_to_file(response,"get_bob.html")
response.raise_for_status()
return response.json()
elif upstream:
rec_json = self.__class__.find_by({"upstream_identifier": upstream}, require=True)
self.record_url = self.__class__.get_record_url(rec_json["id"])
return rec_json |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def replace_name_with_id(cls, name):
""" Used to replace a foreign key reference using a name with an ID. Works by searching the record in Pulsar and expects to find exactly one hit. First, will check if the foreign key reference is an integer value and if so, returns that as it is presumed to be the foreign key. Raises: `pulsarpy.elasticsearch_utils.MultipleHitsException`: Multiple hits were returned from the name search. `pulsarpy.models.RecordNotFound`: No results were produced from the name search. """ |
try:
int(name)
return name #Already a presumed ID.
except ValueError:
pass
#Not an int, so maybe a combination of MODEL_ABBR and Primary Key, i.e. B-8.
if name.split("-")[0] in Meta._MODEL_ABBREVS:
return int(name.split("-", 1)[1])
try:
result = cls.ES.get_record_by_name(cls.ES_INDEX_NAME, name)
if result:
return result["id"]
except pulsarpy.elasticsearch_utils.MultipleHitsException as e:
raise
raise RecordNotFound("Name '{}' for model '{}' not found.".format(name, cls.__name__)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def delete(self):
"""Deletes the record. """ |
res = requests.delete(url=self.record_url, headers=HEADERS, verify=False)
#self.write_response_html_to_file(res,"bob_delete.html")
if res.status_code == 204:
#No content. Can't render json:
return {}
return res.json() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find_by(cls, payload, require=False):
""" Searches the model in question by AND joining the query parameters. Implements a Railsy way of looking for a record using a method by the same name and passing in the query as a dict. as well. Only the first hit is returned, and there is no particular ordering specified in the server-side API method. Args: payload: `dict`. The attributes of a record to restrict the search to. require: `bool`. True means to raise a `pulsarpy.models.RecordNotFound` exception if no record is found. Returns: `dict`: The JSON serialization of the record, if any, found by the API call. `None`: If the API call didnt' return any results. Raises: `pulsarpy.models.RecordNotFound`: No records were found, and the `require` parameter is True. """ |
if not isinstance(payload, dict):
raise ValueError("The 'payload' parameter must be provided a dictionary object.")
url = os.path.join(cls.URL, "find_by")
payload = {"find_by": payload}
cls.debug_logger.debug("Searching Pulsar {} for {}".format(cls.__name__, json.dumps(payload, indent=4)))
res = requests.post(url=url, json=payload, headers=HEADERS, verify=False)
#cls.write_response_html_to_file(res,"bob.html")
res.raise_for_status()
res_json = res.json()
if res_json:
try:
res_json = res_json[cls.MODEL_NAME]
except KeyError:
# Key won't be present if there isn't a serializer for it on the server.
pass
else:
if require:
raise RecordNotFound("Can't find any {} records with search criteria: '{}'.".format(cls.__name__, payload))
return res_json |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find_by_or(cls, payload):
""" Searches the model in question by OR joining the query parameters. Implements a Railsy way of looking for a record using a method by the same name and passing in the query as a string (for the OR operator joining to be specified). Only the first hit is returned, and there is not particular ordering specified in the server-side API method. Args: payload: `dict`. The attributes of a record to search for by using OR operator joining for each query parameter. Returns: `dict`: The JSON serialization of the record, if any, found by the API call. `None`: If the API call didnt' return any results. """ |
if not isinstance(payload, dict):
raise ValueError("The 'payload' parameter must be provided a dictionary object.")
url = os.path.join(cls.URL, "find_by_or")
payload = {"find_by_or": payload}
cls.debug_logger.debug("Searching Pulsar {} for {}".format(cls.__name__, json.dumps(payload, indent=4)))
res = requests.post(url=url, json=payload, headers=HEADERS, verify=False)
cls.write_response_html_to_file(res,"bob.html")
if res:
try:
res = res[cls.MODEL_NAME]
except KeyError:
# Key won't be present if there isn't a serializer for it on the server.
pass
return res |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def index(cls):
"""Fetches all records. Returns: `dict`. The JSON formatted response. Raises: `requests.exceptions.HTTPError`: The status code is not ok. """ |
res = requests.get(cls.URL, headers=HEADERS, verify=False)
res.raise_for_status()
return res.json() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def patch(self, payload, append_to_arrays=True):
""" Patches current record and udpates the current instance's 'attrs' attribute to reflect the new changes. Args: payload - hash. This will be JSON-formatted prior to sending the request. Returns: `dict`. The JSON formatted response. Raises: `requests.exceptions.HTTPError`: The status code is not ok. """ |
if not isinstance(payload, dict):
raise ValueError("The 'payload' parameter must be provided a dictionary object.")
payload = self.__class__.set_id_in_fkeys(payload)
if append_to_arrays:
for key in payload:
val = payload[key]
if type(val) == list:
val.extend(getattr(self, key))
payload[key] = list(set(val))
payload = self.check_boolean_fields(payload)
payload = self.__class__.add_model_name_to_payload(payload)
self.debug_logger.debug("PATCHING payload {}".format(json.dumps(payload, indent=4)))
res = requests.patch(url=self.record_url, json=payload, headers=HEADERS, verify=False)
self.write_response_html_to_file(res,"bob.html")
res.raise_for_status()
json_res = res.json()
self.debug_logger.debug("Success")
self.attrs = json_res
return json_res |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def post(cls, payload):
"""Posts the data to the specified record. Args: payload: `dict`. This will be JSON-formatted prior to sending the request. Returns: `dict`. The JSON formatted response. Raises: `Requests.exceptions.HTTPError`: The status code is not ok. `RecordNotUnique`: The Rails server returned the exception ActiveRecord::RecordNotUnique. """ |
if not isinstance(payload, dict):
raise ValueError("The 'payload' parameter must be provided a dictionary object.")
payload = cls.set_id_in_fkeys(payload)
payload = cls.check_boolean_fields(payload)
payload = cls.add_model_name_to_payload(payload)
# Run any pre-post hooks:
payload = cls.prepost_hooks(payload)
cls.debug_logger.debug("POSTING payload {}".format(json.dumps(payload, indent=4)))
res = requests.post(url=cls.URL, json=(payload), headers=HEADERS, verify=False)
cls.write_response_html_to_file(res,"bob.html")
if not res.ok:
cls.log_error(res.text)
res_json = res.json()
if "exception" in res_json:
exc_type = res_json["exception"]
if exc_type == "ActiveRecord::RecordNotUnique":
raise RecordNotUnique()
res.raise_for_status()
res = res.json()
cls.log_post(res)
cls.debug_logger.debug("Success")
return res |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def log_error(cls, msg):
""" Logs the provided error message to both the error logger and the debug logger logging instances. Args: msg: `str`. The error message to log. """ |
cls.error_logger.error(msg)
cls.debug_logger.debug(msg) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def library_sequencing_results(self):
""" Generates a dict. where each key is a Library ID on the SequencingRequest and each value is the associated SequencingResult. Libraries that aren't yet with a SequencingResult are not inlcuded in the dict. """ |
sres_ids = self.sequencing_result_ids
res = {}
for i in sres_ids:
sres = SequencingResult(i)
res[sres.library_id] = sres
return res |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def unarchive_user(self, user_id):
"""Unarchives the user with the specified user ID. Args: user_id: `int`. The ID of the user to unarchive. Returns: `NoneType`: None. """ |
url = self.record_url + "/unarchive"
res = requests.patch(url=url, json={"user_id": user_id}, headers=HEADERS, verify=False)
self.write_response_html_to_file(res,"bob.html")
res.raise_for_status() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def remove_api_key(self):
""" Removes the user's existing API key, if present, and sets the current instance's 'api_key' attribute to the empty string. Returns: `NoneType`: None. """ |
url = self.record_url + "/remove_api_key"
res = requests.patch(url=url, headers=HEADERS, verify=False)
res.raise_for_status()
self.api_key = "" |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _process(self, resource=None, data={}):
"""Processes the current transaction Sends an HTTP request to the PAYDUNYA API server """ |
# use object's data if no data is passed
_data = data or self._data
rsc_url = self.get_rsc_endpoint(resource)
if _data:
req = requests.post(rsc_url, data=json.dumps(_data),
headers=self.headers)
else:
req = requests.get(rsc_url, params=_data,
headers=self.headers)
if req.status_code == 200:
self._response = json.loads(req.text)
if int(self._response['response_code']) == 00:
return (True, self._response)
else:
return (False, self._response['response_text'])
else:
return (500, "Request Failed") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_header(self, header):
"""Add a custom HTTP header to the client's request headers""" |
if type(header) is dict:
self._headers.update(header)
else:
raise ValueError(
"Dictionary expected, got '%s' instead" % type(header)
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def changelog_cli(ctx):
# type: () -> None """ Generate changelog from commit messages. """ |
if ctx.invoked_subcommand:
return
from peltak.core import shell
from . import logic
shell.cprint(logic.changelog()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_default_name(self):
'''
Return the default generated name to store value on the parser for this option.
eg. An option *['-s', '--use-ssl']* will generate the *use_ssl* name
Returns:
str: the default name of the option
'''
long_names = [name for name in self.name if name.startswith("--")]
short_names = [name for name in self.name if not name.startswith("--")]
if long_names:
return to_snake_case(long_names[0].lstrip("-"))
return to_snake_case(short_names[0].lstrip("-")) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_filename(self, base_dir=None, modality=None):
"""Construct filename based on the attributes. Parameters base_dir : Path path of the root directory. If specified, the return value is a Path, with base_dir / sub-XXX / (ses-XXX /) modality / filename otherwise the return value is a string. modality : str overwrite value for modality (i.e. the directory inside subject/session). This is necessary because sometimes the modality attribute is ambiguous. Returns ------- str or Path str of the filename if base_dir is not specified, otherwise the full Path """ |
filename = 'sub-' + self.subject
if self.session is not None:
filename += '_ses-' + self.session
if self.task is not None:
filename += '_task-' + self.task
if self.run is not None and self.direction is None:
filename += '_run-' + self.run
if self.acquisition is not None:
filename += '_acq-' + self.acquisition
if self.direction is not None:
filename += '_dir-' + self.direction
if self.run is not None and self.direction is not None:
filename += '_run-' + self.run
if self.modality is not None:
filename += '_' + self.modality
if self.extension is not None:
filename += self.extension
if base_dir is None:
return filename
else:
dir_name = base_dir / ('sub-' + self.subject)
if self.session is not None:
dir_name /= 'ses-' + self.session
if modality is not None:
dir_name /= modality
else:
dir_name = add_modality(dir_name, self.modality)
return dir_name / filename |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get(self, filter_lambda=None, map_lambda=None):
"""Select elements of the TSV, using python filter and map. Parameters filter_lambda : function function to filter the tsv rows (the function needs to return True/False) map_lambda : function function to select the tsv columns Returns ------- list list (not a generator, because that's the most common case) Examples -------- To select all the channels in one list, called "good_labels":: To select all the names of the channels: """ |
if filter_lambda is None:
filter_lambda = lambda x: True
if map_lambda is None:
map_lambda = lambda x: x
return list(map(map_lambda, filter(filter_lambda, self.tsv))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def connect(self, client_id: str = None, client_secret: str = None) -> dict: """Authenticate application and get token bearer. Isogeo API uses oAuth 2.0 protocol (https://tools.ietf.org/html/rfc6749) see: http://help.isogeo.com/api/fr/authentication/groupsapps.html :param str client_id: application oAuth2 identifier :param str client_secret: application oAuth2 secret """ |
# instanciated or direct call
if not client_id and not client_secret:
client_id = self.client_id
client_secret = self.client_secret
else:
pass
# Basic Authentication header in Base64 (https://en.wikipedia.org/wiki/Base64)
# see: http://tools.ietf.org/html/rfc2617#section-2
# using Client Credentials Grant method
# see: http://tools.ietf.org/html/rfc6749#section-4.4
payload = {"grant_type": "client_credentials"}
head = {"user-agent": self.app_name}
# passing request to get a 24h bearer
# see: http://tools.ietf.org/html/rfc6750#section-2
id_url = "https://id.{}.isogeo.com/oauth/token".format(self.api_url)
try:
conn = self.post(
id_url,
auth=(client_id, client_secret),
headers=head,
data=payload,
proxies=self.proxies,
verify=self.ssl,
)
except ConnectionError as e:
raise ConnectionError("Connection to Isogeo ID" "failed: {}".format(e))
# just a fast check
check_params = checker.check_api_response(conn)
if check_params == 1:
pass
elif isinstance(check_params, tuple) and len(check_params) == 2:
raise ValueError(2, check_params)
# getting access
self.token = conn.json()
# add expiration date - calculating with a prevention of 10%
expiration_delay = self.token.get("expires_in", 3600) - (
self.token.get("expires_in", 3600) / 10
)
self.token["expires_at"] = datetime.utcnow() + timedelta(
seconds=expiration_delay
)
# end of method
return self.token |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def search( self, token: dict = None, query: str = "", bbox: list = None, poly: str = None, georel: str = None, order_by: str = "_created", order_dir: str = "desc", page_size: int = 100, offset: int = 0, share: str = None, specific_md: list = [], include: list = [], whole_share: bool = True, check: bool = True, augment: bool = False, tags_as_dicts: bool = False, prot: str = "https", ) -> dict: """Search within the resources shared to the application. It's the main method to use. :param str token: API auth token - DEPRECATED: token is now automatically included :param str query: search terms and semantic filters. Equivalent of **q** parameter in Isogeo API. It could be a simple string like *oil* or a tag like *keyword:isogeo:formations* or *keyword:inspire-theme:landcover*. The *AND* operator is applied when various tags are passed. :param list bbox: Bounding box to limit the search. Must be a 4 list of coordinates in WGS84 (EPSG 4326). Could be associated with *georel*. :param str poly: Geographic criteria for the search, in WKT format. Could be associated with *georel*. :param str georel: geometric operator to apply to the bbox or poly parameters. Available values (see: *isogeo.GEORELATIONS*):
* 'contains', * 'disjoint', * 'equals', * 'intersects' - [APPLIED BY API if NOT SPECIFIED] * 'overlaps', * 'within'. :param str order_by: sorting results. Available values: * '_created': metadata creation date [DEFAULT if relevance is null] * '_modified': metadata last update * 'title': metadata title * 'created': data creation date (possibly None) * 'modified': data last update date * 'relevance': relevance score calculated by API [DEFAULT]. :param str order_dir: sorting direction. Available values: * 'desc': descending * 'asc': ascending :param int page_size: limits the number of results. Useful to paginate results display. Default value: 100. :param int offset: offset to start page size from a specific results index :param str share: share UUID to filter on :param list specific_md: list of metadata UUIDs to filter on :param list include: subresources that should be returned. Must be a list of strings. Available values: *isogeo.SUBRESOURCES* :param bool whole_share: option to return all results or only the page size. *True* by DEFAULT. :param bool check: option to check query parameters and avoid erros. *True* by DEFAULT. :param bool augment: option to improve API response by adding some tags on the fly (like shares_id) :param bool tags_as_dicts: option to store tags as key/values by filter. :param str prot: https [DEFAULT] or http (use it only for dev and tracking needs). """ |
# specific resources specific parsing
specific_md = checker._check_filter_specific_md(specific_md)
# sub resources specific parsing
include = checker._check_filter_includes(include)
# handling request parameters
payload = {
"_id": specific_md,
"_include": include,
"_lang": self.lang,
"_limit": page_size,
"_offset": offset,
"box": bbox,
"geo": poly,
"rel": georel,
"ob": order_by,
"od": order_dir,
"q": query,
"s": share,
}
if check:
checker.check_request_parameters(payload)
else:
pass
# search request
search_url = "{}://v1.{}.isogeo.com/resources/search".format(prot, self.api_url)
try:
search_req = self.get(
search_url,
headers=self.header,
params=payload,
proxies=self.proxies,
verify=self.ssl,
)
except Exception as e:
logging.error(e)
raise Exception
# fast response check
checker.check_api_response(search_req)
# serializing result into dict and storing resources in variables
search_rez = search_req.json()
resources_count = search_rez.get("total") # total of metadatas shared
# handling Isogeo API pagination
# see: http://help.isogeo.com/api/fr/methods/pagination.html
if resources_count > page_size and whole_share:
# if API returned more than one page of results, let's get the rest!
metadatas = [] # a recipient list
payload["_limit"] = 100 # now it'll get pages of 100 resources
# let's parse pages
for idx in range(0, int(ceil(resources_count / 100)) + 1):
payload["_offset"] = idx * 100
search_req = self.get(
search_url,
headers=self.header,
params=payload,
proxies=self.proxies,
verify=self.ssl,
)
# storing results by addition
metadatas.extend(search_req.json().get("results"))
search_rez["results"] = metadatas
else:
pass
# add shares to tags and query
if augment:
self.add_tags_shares(search_rez.get("tags"))
if share:
search_rez.get("query")["_shares"] = [share]
else:
search_rez.get("query")["_shares"] = []
else:
pass
# store tags in dicts
if tags_as_dicts:
new_tags = utils.tags_to_dict(
tags=search_rez.get("tags"), prev_query=search_rez.get("query")
)
# clear
search_rez.get("tags").clear()
search_rez.get("query").clear()
# update
search_rez.get("tags").update(new_tags[0])
search_rez.get("query").update(new_tags[1])
else:
pass
# end of method
return search_rez |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def resource( self, token: dict = None, id_resource: str = None, subresource=None, include: list = [], prot: str = "https", ) -> dict: """Get complete or partial metadata about one specific resource. :param str token: API auth token :param str id_resource: metadata UUID to get :param list include: subresources that should be included. Must be a list of strings. Available values: 'isogeo.SUBRESOURCES' :param str prot: https [DEFAULT] or http (use it only for dev and tracking needs). """ |
# if subresource route
if isinstance(subresource, str):
subresource = "/{}".format(checker._check_subresource(subresource))
else:
subresource = ""
# _includes specific parsing
include = checker._check_filter_includes(include)
# handling request parameters
payload = {"id": id_resource, "_include": include}
# resource search
md_url = "{}://v1.{}.isogeo.com/resources/{}{}".format(
prot, self.api_url, id_resource, subresource
)
resource_req = self.get(
md_url,
headers=self.header,
params=payload,
proxies=self.proxies,
verify=self.ssl,
)
checker.check_api_response(resource_req)
# end of method
return resource_req.json() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def shares(self, token: dict = None, prot: str = "https") -> dict: """Get information about shares which feed the application. :param str token: API auth token :param str prot: https [DEFAULT] or http (use it only for dev and tracking needs). """ |
# passing auth parameter
shares_url = "{}://v1.{}.isogeo.com/shares/".format(prot, self.api_url)
shares_req = self.get(
shares_url, headers=self.header, proxies=self.proxies, verify=self.ssl
)
# checking response
checker.check_api_response(shares_req)
# end of method
return shares_req.json() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def share( self, share_id: str, token: dict = None, augment: bool = False, prot: str = "https", ) -> dict: """Get information about a specific share and its applications. :param str token: API auth token :param str share_id: share UUID :param bool augment: option to improve API response by adding some tags on the fly. :param str prot: https [DEFAULT] or http (use it only for dev and tracking needs). """ |
# passing auth parameter
share_url = "{}://v1.{}.isogeo.com/shares/{}".format(
prot, self.api_url, share_id
)
share_req = self.get(
share_url, headers=self.header, proxies=self.proxies, verify=self.ssl
)
# checking response
checker.check_api_response(share_req)
# enhance share model
share = share_req.json()
if augment:
share = utils.share_extender(
share, self.search(whole_share=1, share=share_id).get("results")
)
else:
pass
# end of method
return share |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def licenses( self, token: dict = None, owner_id: str = None, prot: str = "https" ) -> dict: """Get information about licenses owned by a specific workgroup. :param str token: API auth token :param str owner_id: workgroup UUID :param str prot: https [DEFAULT] or http (use it only for dev and tracking needs). """ |
# handling request parameters
payload = {"gid": owner_id}
# search request
licenses_url = "{}://v1.{}.isogeo.com/groups/{}/licenses".format(
prot, self.api_url, owner_id
)
licenses_req = self.get(
licenses_url,
headers=self.header,
params=payload,
proxies=self.proxies,
verify=self.ssl,
)
# checking response
req_check = checker.check_api_response(licenses_req)
if isinstance(req_check, tuple):
return req_check
# end of method
return licenses_req.json() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def license(self, license_id: str, token: dict = None, prot: str = "https") -> dict: """Get details about a specific license. :param str token: API auth token :param str license_id: license UUID :param str prot: https [DEFAULT] or http (use it only for dev and tracking needs). """ |
# handling request parameters
payload = {"lid": license_id}
# search request
license_url = "{}://v1.{}.isogeo.com/licenses/{}".format(
prot, self.api_url, license_id
)
license_req = self.get(
license_url,
headers=self.header,
params=payload,
proxies=self.proxies,
verify=self.ssl,
)
# checking response
checker.check_api_response(license_req)
# end of method
return license_req.json() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def thesauri(self, token: dict = None, prot: str = "https") -> dict: """Get list of available thesauri. :param str token: API auth token :param str prot: https [DEFAULT] or http (use it only for dev and tracking needs). """ |
# passing auth parameter
thez_url = "{}://v1.{}.isogeo.com/thesauri".format(prot, self.api_url)
thez_req = self.get(
thez_url, headers=self.header, proxies=self.proxies, verify=self.ssl
)
# checking response
checker.check_api_response(thez_req)
# end of method
return thez_req.json() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def thesaurus( self, token: dict = None, thez_id: str = "1616597fbc4348c8b11ef9d59cf594c8", prot: str = "https", ) -> dict: """Get a thesaurus. :param str token: API auth token :param str thez_id: thesaurus UUID :param str prot: https [DEFAULT] or http (use it only for dev and tracking needs). """ |
# handling request parameters
payload = {"tid": thez_id}
# passing auth parameter
thez_url = "{}://v1.{}.isogeo.com/thesauri/{}".format(
prot, self.api_url, thez_id
)
thez_req = self.get(
thez_url,
headers=self.header,
params=payload,
proxies=self.proxies,
verify=self.ssl,
)
# checking response
checker.check_api_response(thez_req)
# end of method
return thez_req.json() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def keywords( self, token: dict = None, thez_id: str = "1616597fbc4348c8b11ef9d59cf594c8", query: str = "", offset: int = 0, order_by: str = "text", order_dir: str = "desc", page_size: int = 20, specific_md: list = [], specific_tag: list = [], include: list = [], prot: str = "https", ) -> dict: """Search for keywords within a specific thesaurus. :param str token: API auth token :param str thez_id: thesaurus UUID :param str query: search terms :param int offset: pagination start :param str order_by: sort criteria. Available values : - count.group, - count.isogeo, - text :param str prot: https [DEFAULT] or http (use it only for dev and tracking needs). """ |
# specific resources specific parsing
specific_md = checker._check_filter_specific_md(specific_md)
# sub resources specific parsing
include = checker._check_filter_includes(include, "keyword")
# specific tag specific parsing
specific_tag = checker._check_filter_specific_tag(specific_tag)
# handling request parameters
payload = {
"_id": specific_md,
"_include": include,
"_limit": page_size,
"_offset": offset,
"_tag": specific_tag,
"tid": thez_id,
"ob": order_by,
"od": order_dir,
"q": query,
}
# search request
keywords_url = "{}://v1.{}.isogeo.com/thesauri/{}/keywords/search".format(
prot, self.api_url, thez_id
)
kwds_req = self.get(
keywords_url,
headers=self.header,
params=payload,
proxies=self.proxies,
verify=self.ssl,
)
# checking response
checker.check_api_response(kwds_req)
# end of method
return kwds_req.json() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dl_hosted( self, token: dict = None, resource_link: dict = None, encode_clean: bool = 1, proxy_url: str = None, prot: str = "https", ) -> tuple: """Download hosted resource. :param str token: API auth token :param dict resource_link: link dictionary :param bool encode_clean: option to ensure a clean filename and avoid OS errors :param str proxy_url: proxy to use to download :param str prot: https [DEFAULT] or http (use it only for dev and tracking needs). Example of resource_link dict: .. code-block:: json { "_id": "g8h9i0j11k12l13m14n15o16p17Q18rS", "type": "hosted", "title": "label_of_hosted_file.zip", "url": "/resources/1a2b3c4d5e6f7g8h9i0j11k12l13m14n/links/g8h9i0j11k12l13m14n15o16p17Q18rS.bin", "kind": "data", "actions": ["download", ], "size": "2253029", } """ |
# check resource link parameter type
if not isinstance(resource_link, dict):
raise TypeError("Resource link expects a dictionary.")
else:
pass
# check resource link type
if not resource_link.get("type") == "hosted":
raise ValueError(
"Resource link passed is not a hosted one: {}".format(
resource_link.get("type")
)
)
else:
pass
# handling request parameters
payload = {"proxyUrl": proxy_url}
# prepare URL request
hosted_url = "{}://v1.{}.isogeo.com/{}".format(
prot, self.api_url, resource_link.get("url")
)
# send stream request
hosted_req = self.get(
hosted_url,
headers=self.header,
stream=True,
params=payload,
proxies=self.proxies,
verify=self.ssl,
)
# quick check
req_check = checker.check_api_response(hosted_req)
if not req_check:
raise ConnectionError(req_check[1])
else:
pass
# get filename from header
content_disposition = hosted_req.headers.get("Content-Disposition")
if content_disposition:
filename = re.findall("filename=(.+)", content_disposition)[0]
else:
filename = resource_link.get("title")
# remove special characters
if encode_clean:
filename = utils.encoded_words_to_text(filename)
filename = re.sub(r"[^\w\-_\. ]", "", filename)
# well-formed size
in_size = resource_link.get("size")
for size_cat in ("octets", "Ko", "Mo", "Go"):
if in_size < 1024.0:
out_size = "%3.1f %s" % (in_size, size_cat)
in_size /= 1024.0
out_size = "%3.1f %s" % (in_size, " To")
# end of method
return (hosted_req, filename, out_size) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def xml19139( self, token: dict = None, id_resource: str = None, proxy_url=None, prot: str = "https", ):
"""Get resource exported into XML ISO 19139. :param str token: API auth token :param str id_resource: metadata UUID to export :param str proxy_url: proxy to use to download :param str prot: https [DEFAULT] or http (use it only for dev and tracking needs). """ |
# check metadata UUID
if not checker.check_is_uuid(id_resource):
raise ValueError("Metadata ID is not a correct UUID.")
else:
pass
# handling request parameters
payload = {"proxyUrl": proxy_url, "id": id_resource}
# resource search
md_url = "{}://v1.{}.isogeo.com/resources/{}.xml".format(
prot, self.api_url, id_resource
)
xml_req = self.get(
md_url,
headers=self.header,
stream=True,
params=payload,
proxies=self.proxies,
verify=self.ssl,
)
# end of method
return xml_req |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_tags_shares(self, tags: dict = dict()):
"""Add shares list to the tags attributes in search results. :param dict tags: tags dictionary from a search request """ |
# check if shares_id have already been retrieved or not
if not hasattr(self, "shares_id"):
shares = self.shares()
self.shares_id = {
"share:{}".format(i.get("_id")): i.get("name") for i in shares
}
else:
pass
# update query tags
tags.update(self.shares_id) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_app_properties(self, token: dict = None, prot: str = "https"):
"""Get information about the application declared on Isogeo. :param str token: API auth token :param str prot: https [DEFAULT] or http (use it only for dev and tracking needs). """ |
# check if app properties have already been retrieved or not
if not hasattr(self, "app_properties"):
first_app = self.shares()[0].get("applications")[0]
app = {
"admin_url": "{}/applications/{}".format(
self.mng_url, first_app.get("_id")
),
"creation_date": first_app.get("_created"),
"last_update": first_app.get("_modified"),
"name": first_app.get("name"),
"type": first_app.get("type"),
"kind": first_app.get("kind"),
"url": first_app.get("url"),
}
self.app_properties = app
else:
pass |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_directives(self, token: dict = None, prot: str = "https") -> dict: """Get environment directives which represent INSPIRE limitations. :param str token: API auth token :param str prot: https [DEFAULT] or http (use it only for dev and tracking needs). """ |
# search request
req_url = "{}://v1.{}.isogeo.com/directives".format(prot, self.api_url)
req = self.get(
req_url, headers=self.header, proxies=self.proxies, verify=self.ssl
)
# checking response
checker.check_api_response(req)
# end of method
return req.json() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_coordinate_systems( self, token: dict = None, srs_code: str = None, prot: str = "https" ) -> dict: """Get available coordinate systems in Isogeo API. :param str token: API auth token :param str srs_code: code of a specific coordinate system :param str prot: https [DEFAULT] or http (use it only for dev and tracking needs). """ |
# if specific format
if isinstance(srs_code, str):
specific_srs = "/{}".format(srs_code)
else:
specific_srs = ""
# search request
req_url = "{}://v1.{}.isogeo.com/coordinate-systems{}".format(
prot, self.api_url, specific_srs
)
req = self.get(
req_url, headers=self.header, proxies=self.proxies, verify=self.ssl
)
# checking response
checker.check_api_response(req)
# end of method
return req.json() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_formats( self, token: dict = None, format_code: str = None, prot: str = "https" ) -> dict: """Get formats. :param str token: API auth token :param str format_code: code of a specific format :param str prot: https [DEFAULT] or http (use it only for dev and tracking needs). """ |
# if specific format
if isinstance(format_code, str):
specific_format = "/{}".format(format_code)
else:
specific_format = ""
# search request
req_url = "{}://v1.{}.isogeo.com/formats{}".format(
prot, self.api_url, specific_format
)
req = self.get(
req_url, headers=self.header, proxies=self.proxies, verify=self.ssl
)
# checking response
checker.check_api_response(req)
# end of method
return req.json() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def search_datasets( self, license=None, format=None, query=None, featured=None, owner=None, organization=None, badge=None, reuses=None, page_size=20, x_fields=None, ):
"""Search datasets within uData portal.""" |
# handling request parameters
payload = {"badge": badge, "size": page_size, "X-Fields": x_fields}
# search request
# head = {"X-API-KEY": self.api_key}
search_url = "{}/datasets".format(
self.base_url,
# org_id,
# page_size
)
search_req = requests.get(
search_url,
# headers=head,
params=payload,
)
# serializing result into dict and storing resources in variables
logger.debug(search_req.url)
return search_req.json() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_filters_values(self):
"""Get different filters values as dicts.""" |
# DATASETS --
# badges
self._DST_BADGES = requests.get(self.base_url + "datasets/badges/").json()
# licences
self._DST_LICENSES = {
l.get("id"): l.get("title")
for l in requests.get(self.base_url + "datasets/licenses").json()
}
# frequencies
self._DST_FREQUENCIES = {
f.get("id"): f.get("label")
for f in requests.get(self.base_url + "datasets/frequencies").json()
}
# ORGANIZATIONS --
# badges
self._ORG_BADGES = requests.get(self.base_url + "organizations/badges/").json()
# # licences
# self._DST_LICENSES = {l.get("id"): l.get("title")
# for l in requests.get(self.base_url + "datasets/licenses").json()}
# # frequencies
# self._DST_FREQUENCIES = {f.get("id"): f.get("label")
# for f in requests.get(self.base_url + "datasets/frequencies").json()}
# SPATIAL --
# granularities
self._GRANULARITIES = {
g.get("id"): g.get("name")
for g in requests.get(self.base_url + "spatial/granularities").json()
}
# levels
self._LEVELS = {
g.get("id"): g.get("name")
for g in requests.get(self.base_url + "spatial/levels").json()
}
# MISC --
# facets
self._FACETS = (
"all",
"badge",
"featured",
"format",
"geozone",
"granularity",
"license",
"owner",
"organization",
"reuses",
"tag",
"temporal_coverage",
)
# reuses
self._REUSES = ("none", "few", "quite", "many") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def deploy(app_id, version, promote, quiet):
# type: (str, str, bool, bool) -> None """ Deploy the app to AppEngine. Args: app_id (str):
AppEngine App ID. Overrides config value app_id if given. version (str):
AppEngine project version. Overrides config values if given. promote (bool):
If set to **True** promote the current remote app version to the one that's being deployed. quiet (bool):
If set to **True** this will pass the ``--quiet`` flag to gcloud command. """ |
gae_app = GaeApp.for_branch(git.current_branch().name)
if gae_app is None and None in (app_id, version):
msg = (
"Can't find an AppEngine app setup for branch <35>{}<32> and"
"--project and --version were not given."
)
log.err(msg, git.current_branch().name)
sys.exit(1)
if version is not None:
gae_app.version = version
if app_id is not None:
gae_app.app_id = app_id
gae_app.deploy(promote, quiet) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def setup_ci():
# type: () -> None """ Setup AppEngine SDK on CircleCI """ |
gcloud_path = shell.run('which gcloud', capture=True).stdout.strip()
sdk_path = normpath(join(gcloud_path, '../../platform/google_appengine'))
gcloud_cmd = gcloud_path + ' --quiet'
if not exists(sdk_path):
log.info("Installing AppEngine SDK")
shell.run('sudo {} components install app-engine-python'.format(
gcloud_cmd
))
else:
# Only initialise once. To reinitialise, just build without cache.
log.info("AppEngine SDK already initialised")
log.info("Using service account authentication")
shell.run('{} auth activate-service-account --key-file {}'.format(
gcloud_cmd,
conf.proj_path('ops/client_secret.json')
)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mark_experimental(fn):
# type: (FunctionType) -> FunctionType """ Mark function as experimental. Args: fn (FunctionType):
The command function to decorate. """ |
@wraps(fn)
def wrapper(*args, **kw): # pylint: disable=missing-docstring
from peltak.core import shell
if shell.is_tty:
warnings.warn("This command is has experimental status. The "
"interface is not yet stable and might change "
"without notice within with a patch version update. "
"Use at your own risk")
return fn(*args, **kw)
return wrapper |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mark_deprecated(replaced_by):
# type: (Text) -> FunctionType """ Mark command as deprecated. Args: replaced_by (str):
The command that deprecated this command and should be used instead. """ |
def decorator(fn): # pylint: disable=missing-docstring
@wraps(fn)
def wrapper(*args, **kw): # pylint: disable=missing-docstring
from peltak.core import shell
if shell.is_tty:
warnings.warn("This command is has been deprecated. Please use "
"{new} instead.".format(new=replaced_by))
return fn(*args, **kw)
return wrapper
return decorator |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def in_batches(iterable, batch_size):
# type: (Iterable[Any]) -> Generator[List[Any]] """ Split the given iterable into batches. Args: iterable (Iterable[Any]):
The iterable you want to split into batches. batch_size (int):
The size of each bach. The last batch will be probably smaller (if the number of elements cannot be equally divided. Returns: Generator[list[Any]]: Will yield all items in batches of **batch_size** size. Example: 3 [[1, 2, 3], [4, 5, 6], [7]] """ |
items = list(iterable)
size = len(items)
for i in range(0, size, batch_size):
yield items[i:min(i + batch_size, size)] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def clear(cls, fn):
# type: (FunctionType) -> None """ Clear result cache on the given function. If the function has no cached result, this call will do nothing. Args: fn (FunctionType):
The function whose cache should be cleared. """ |
if hasattr(fn, cls.CACHE_VAR):
delattr(fn, cls.CACHE_VAR) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def finish():
# type: () -> None """ Merge current feature branch into develop. """ |
pretend = context.get('pretend', False)
if not pretend and (git.staged() or git.unstaged()):
log.err(
"You have uncommitted changes in your repo!\n"
"You need to stash them before you merge the hotfix branch"
)
sys.exit(1)
branch = git.current_branch(refresh=True)
base = common.get_base_branch()
prompt = "<32>Merge <33>{}<32> into <33>{}<0>?".format(branch.name, base)
if not click.confirm(shell.fmt(prompt)):
log.info("Cancelled")
return
common.assert_branch_type('task')
# Merge task into it's base feature branch
common.git_checkout(base)
common.git_pull(base)
common.git_merge(base, branch.name)
# Cleanup
common.git_branch_delete(branch.name)
common.git_prune()
common.git_checkout(base) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mutagen_call(action, path, func, *args, **kwargs):
"""Call a Mutagen function with appropriate error handling. `action` is a string describing what the function is trying to do, and `path` is the relevant filename. The rest of the arguments describe the callable to invoke. We require at least Mutagen 1.33, where `IOError` is *never* used, neither for internal parsing errors *nor* for ordinary IO error conditions such as a bad filename. Mutagen-specific parsing errors and IO errors are reraised as `UnreadableFileError`. Other exceptions raised inside Mutagen---i.e., bugs---are reraised as `MutagenError`. """ |
try:
return func(*args, **kwargs)
except mutagen.MutagenError as exc:
log.debug(u'%s failed: %s', action, six.text_type(exc))
raise UnreadableFileError(path, six.text_type(exc))
except Exception as exc:
# Isolate bugs in Mutagen.
log.debug(u'%s', traceback.format_exc())
log.error(u'uncaught Mutagen exception in %s: %s', action, exc)
raise MutagenError(path, exc) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _safe_cast(out_type, val):
"""Try to covert val to out_type but never raise an exception. If the value can't be converted, then a sensible default value is returned. out_type should be bool, int, or unicode; otherwise, the value is just passed through. """ |
if val is None:
return None
if out_type == int:
if isinstance(val, int) or isinstance(val, float):
# Just a number.
return int(val)
else:
# Process any other type as a string.
if isinstance(val, bytes):
val = val.decode('utf-8', 'ignore')
elif not isinstance(val, six.string_types):
val = six.text_type(val)
# Get a number from the front of the string.
match = re.match(r'[\+-]?[0-9]+', val.strip())
return int(match.group(0)) if match else 0
elif out_type == bool:
try:
# Should work for strings, bools, ints:
return bool(int(val))
except ValueError:
return False
elif out_type == six.text_type:
if isinstance(val, bytes):
return val.decode('utf-8', 'ignore')
elif isinstance(val, six.text_type):
return val
else:
return six.text_type(val)
elif out_type == float:
if isinstance(val, int) or isinstance(val, float):
return float(val)
else:
if isinstance(val, bytes):
val = val.decode('utf-8', 'ignore')
else:
val = six.text_type(val)
match = re.match(r'[\+-]?([0-9]+\.?[0-9]*|[0-9]*\.[0-9]+)',
val.strip())
if match:
val = match.group(0)
if val:
return float(val)
return 0.0
else:
return val |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def deserialize(self, mutagen_value):
"""Given a raw value stored on a Mutagen object, decode and return the represented value. """ |
if self.suffix and isinstance(mutagen_value, six.text_type) \
and mutagen_value.endswith(self.suffix):
return mutagen_value[:-len(self.suffix)]
else:
return mutagen_value |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set(self, mutagen_file, value):
"""Assign the value for the field using this style. """ |
self.store(mutagen_file, self.serialize(value)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def serialize(self, value):
"""Convert the external Python value to a type that is suitable for storing in a Mutagen file object. """ |
if isinstance(value, float) and self.as_type is six.text_type:
value = u'{0:.{1}f}'.format(value, self.float_places)
value = self.as_type(value)
elif self.as_type is six.text_type:
if isinstance(value, bool):
# Store bools as 1/0 instead of True/False.
value = six.text_type(int(bool(value)))
elif isinstance(value, bytes):
value = value.decode('utf-8', 'ignore')
else:
value = six.text_type(value)
else:
value = self.as_type(value)
if self.suffix:
value += self.suffix
return value |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_list(self, mutagen_file):
"""Get a list of all values for the field using this style. """ |
return [self.deserialize(item) for item in self.fetch(mutagen_file)] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_list(self, mutagen_file, values):
"""Set all values for the field using this style. `values` should be an iterable. """ |
self.store(mutagen_file, [self.serialize(value) for value in values]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def deserialize(self, apic_frame):
"""Convert APIC frame into Image.""" |
return Image(data=apic_frame.data, desc=apic_frame.desc,
type=apic_frame.type) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def serialize(self, image):
"""Return an APIC frame populated with data from ``image``. """ |
assert isinstance(image, Image)
frame = mutagen.id3.Frames[self.key]()
frame.data = image.data
frame.mime = image.mime_type
frame.desc = image.desc or u''
# For compatibility with OS X/iTunes prefer latin-1 if possible.
# See issue #899
try:
frame.desc.encode("latin-1")
except UnicodeEncodeError:
frame.encoding = mutagen.id3.Encoding.UTF16
else:
frame.encoding = mutagen.id3.Encoding.LATIN1
frame.type = image.type_index
return frame |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def serialize(self, image):
"""Turn a Image into a base64 encoded FLAC picture block. """ |
pic = mutagen.flac.Picture()
pic.data = image.data
pic.type = image.type_index
pic.mime = image.mime_type
pic.desc = image.desc or u''
# Encoding with base64 returns bytes on both Python 2 and 3.
# Mutagen requires the data to be a Unicode string, so we decode
# it before passing it along.
return base64.b64encode(pic.write()).decode('ascii') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def store(self, mutagen_file, pictures):
"""``pictures`` is a list of mutagen.flac.Picture instances. """ |
mutagen_file.clear_pictures()
for pic in pictures:
mutagen_file.add_picture(pic) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def serialize(self, image):
"""Turn a Image into a mutagen.flac.Picture. """ |
pic = mutagen.flac.Picture()
pic.data = image.data
pic.type = image.type_index
pic.mime = image.mime_type
pic.desc = image.desc or u''
return pic |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def delete(self, mutagen_file):
"""Remove all images from the file. """ |
for cover_tag in self.TAG_NAMES.values():
try:
del mutagen_file[cover_tag]
except KeyError:
pass |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def styles(self, mutagen_file):
"""Yields the list of storage styles of this field that can handle the MediaFile's format. """ |
for style in self._styles:
if mutagen_file.__class__.__name__ in style.formats:
yield style |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _none_value(self):
"""Get an appropriate "null" value for this field's type. This is used internally when setting the field to None. """ |
if self.out_type == int:
return 0
elif self.out_type == float:
return 0.0
elif self.out_type == bool:
return False
elif self.out_type == six.text_type:
return u'' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_date_tuple(self, mediafile):
"""Get a 3-item sequence representing the date consisting of a year, month, and day number. Each number is either an integer or None. """ |
# Get the underlying data and split on hyphens and slashes.
datestring = super(DateField, self).__get__(mediafile, None)
if isinstance(datestring, six.string_types):
datestring = re.sub(r'[Tt ].*$', '', six.text_type(datestring))
items = re.split('[-/]', six.text_type(datestring))
else:
items = []
# Ensure that we have exactly 3 components, possibly by
# truncating or padding.
items = items[:3]
if len(items) < 3:
items += [None] * (3 - len(items))
# Use year field if year is missing.
if not items[0] and hasattr(self, '_year_field'):
items[0] = self._year_field.__get__(mediafile)
# Convert each component to an integer if possible.
items_ = []
for item in items:
try:
items_.append(int(item))
except (TypeError, ValueError):
items_.append(None)
return items_ |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _set_date_tuple(self, mediafile, year, month=None, day=None):
"""Set the value of the field given a year, month, and day number. Each number can be an integer or None to indicate an unset component. """ |
if year is None:
self.__delete__(mediafile)
return
date = [u'{0:04d}'.format(int(year))]
if month:
date.append(u'{0:02d}'.format(int(month)))
if month and day:
date.append(u'{0:02d}'.format(int(day)))
date = map(six.text_type, date)
super(DateField, self).__set__(mediafile, u'-'.join(date))
if hasattr(self, '_year_field'):
self._year_field.__set__(mediafile, year) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save(self):
"""Write the object's tags back to the file. May throw `UnreadableFileError`. """ |
# Possibly save the tags to ID3v2.3.
kwargs = {}
if self.id3v23:
id3 = self.mgfile
if hasattr(id3, 'tags'):
# In case this is an MP3 object, not an ID3 object.
id3 = id3.tags
id3.update_to_v23()
kwargs['v2_version'] = 3
mutagen_call('save', self.path, self.mgfile.save, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _field_sort_name(cls, name):
"""Get a sort key for a field name that determines the order fields should be written in. Fields names are kept unchanged, unless they are instances of :class:`DateItemField`, in which case `year`, `month`, and `day` are replaced by `date0`, `date1`, and `date2`, respectively, to make them appear in that order. """ |
if isinstance(cls.__dict__[name], DateItemField):
name = re.sub('year', 'date0', name)
name = re.sub('month', 'date1', name)
name = re.sub('day', 'date2', name)
return name |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sorted_fields(cls):
"""Get the names of all writable metadata fields, sorted in the order that they should be written. This is a lexicographic order, except for instances of :class:`DateItemField`, which are sorted in year-month-day order. """ |
for property in sorted(cls.fields(), key=cls._field_sort_name):
yield property |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_field(cls, name, descriptor):
"""Add a field to store custom tags. :param name: the name of the property the field is accessed through. It must not already exist on this class. :param descriptor: an instance of :class:`MediaField`. """ |
if not isinstance(descriptor, MediaField):
raise ValueError(
u'{0} must be an instance of MediaField'.format(descriptor))
if name in cls.__dict__:
raise ValueError(
u'property "{0}" already exists on MediaField'.format(name))
setattr(cls, name, descriptor) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update(self, dict):
"""Set all field values from a dictionary. For any key in `dict` that is also a field to store tags the method retrieves the corresponding value from `dict` and updates the `MediaFile`. If a key has the value `None`, the corresponding property is deleted from the `MediaFile`. """ |
for field in self.sorted_fields():
if field in dict:
if dict[field] is None:
delattr(self, field)
else:
setattr(self, field, dict[field]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def rebin2x2(a):
""" Wrapper around rebin that actually rebins 2 by 2 """ |
inshape = np.array(a.shape)
if not (inshape % 2 == np.zeros(2)).all(): # Modulo check to see if size is even
raise RuntimeError, "I want even image shapes !"
return rebin(a, inshape/2) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def labelmask(self, verbose = None):
""" Finds and labels the cosmic "islands" and returns a list of dicts containing their positions. This is made on purpose for visualizations a la f2n.drawstarslist, but could be useful anyway. """ |
if verbose == None:
verbose = self.verbose
if verbose:
print "Labeling mask pixels ..."
# We morphologicaly dilate the mask to generously connect "sparse" cosmics :
#dilstruct = np.ones((5,5))
dilmask = ndimage.morphology.binary_dilation(self.mask, structure=dilstruct, iterations=1, mask=None, output=None, border_value=0, origin=0, brute_force=False)
# origin = 0 means center
(labels, n) = ndimage.measurements.label(dilmask)
#print "Number of cosmic ray hits : %i" % n
#tofits(labels, "labels.fits", verbose = False)
slicecouplelist = ndimage.measurements.find_objects(labels)
# Now we have a huge list of couples of numpy slice objects giving a frame around each object
# For plotting purposes, we want to transform this into the center of each object.
if len(slicecouplelist) != n:
# This never happened, but you never know ...
raise RuntimeError, "Mega error in labelmask !"
centers = [[(tup[0].start + tup[0].stop)/2.0, (tup[1].start + tup[1].stop)/2.0] for tup in slicecouplelist]
# We also want to know how many pixels where affected by each cosmic ray.
# Why ? Dunno... it's fun and available in scipy :-)
sizes = ndimage.measurements.sum(self.mask.ravel(), labels.ravel(), np.arange(1,n+1,1))
retdictlist = [{"name":"%i" % size, "x":center[0], "y":center[1]} for (size, center) in zip(sizes, centers)]
if verbose:
print "Labeling done"
return retdictlist |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getdilatedmask(self, size=3):
""" Returns a morphologically dilated copy of the current mask. size = 3 or 5 decides how to dilate. """ |
if size == 3:
dilmask = ndimage.morphology.binary_dilation(self.mask, structure=growkernel, iterations=1, mask=None, output=None, border_value=0, origin=0, brute_force=False)
elif size == 5:
dilmask = ndimage.morphology.binary_dilation(self.mask, structure=dilstruct, iterations=1, mask=None, output=None, border_value=0, origin=0, brute_force=False)
else:
dismask = self.mask.copy()
return dilmask |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getsatstars(self, verbose = None):
""" Returns the mask of saturated stars after finding them if not yet done. Intended mainly for external use. """ |
if verbose == None:
verbose = self.verbose
if not self.satlevel > 0:
raise RuntimeError, "Cannot determine satstars : you gave satlevel <= 0 !"
if self.satstars == None:
self.findsatstars(verbose = verbose)
return self.satstars |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def guessbackgroundlevel(self):
""" Estimates the background level. This could be used to fill pixels in large cosmics. """ |
if self.backgroundlevel == None:
self.backgroundlevel = np.median(self.rawarray.ravel())
return self.backgroundlevel |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def search_project_root():
""" Search your Django project root. returns: - path:string Django project root path """ |
while True:
current = os.getcwd()
if pathlib.Path("Miragefile.py").is_file() or pathlib.Path("Miragefile").is_file():
return current
elif os.getcwd() == "/":
raise FileNotFoundError
else:
os.chdir("../") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def search_app_root():
""" Search your Django application root returns: - (String) Django application root path """ |
while True:
current = os.getcwd()
if pathlib.Path("apps.py").is_file():
return current
elif pathlib.Path.cwd() == "/":
raise FileNotFoundError
else:
os.chdir("../") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def in_app() -> bool: """ Judge where current working directory is in Django application or not. returns: - (Bool) cwd is in app dir returns True """ |
try:
MirageEnvironment.set_import_root()
import apps
if os.path.isfile("apps.py"):
return True
else:
return False
except ImportError:
return False
except:
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def start(name):
# type: (str) -> None """ Start working on a new hotfix. This will create a new branch off master called hotfix/<name>. Args: name (str):
The name of the new feature. """ |
hotfix_branch = 'hotfix/' + common.to_branch_name(name)
master = conf.get('git.master_branch', 'master')
common.assert_on_branch(master)
common.git_checkout(hotfix_branch, create=True) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def finish():
# type: () -> None """ Merge current feature into develop. """ |
pretend = context.get('pretend', False)
if not pretend and (git.staged() or git.unstaged()):
log.err(
"You have uncommitted changes in your repo!\n"
"You need to stash them before you merge the hotfix branch"
)
sys.exit(1)
develop = conf.get('git.devel_branch', 'develop')
master = conf.get('git.master_branch', 'master')
branch = git.current_branch(refresh=True)
common.assert_branch_type('hotfix')
# Merge hotfix into master
common.git_checkout(master)
common.git_pull(master)
common.git_merge(master, branch.name)
# Merge hotfix into develop
common.git_checkout(develop)
common.git_pull(develop)
common.git_merge(develop, branch.name)
# Cleanup
common.git_branch_delete(branch.name)
common.git_prune()
common.git_checkout(master) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run(self):
""" Run the shell command Returns: ShellCommand: return this ShellCommand instance for chaining """ |
if not self.block:
self.output = []
self.error = []
self.thread = threading.Thread(target=self.run_non_blocking)
self.thread.start()
else:
self.__create_process()
self.process.wait()
if self._stdout is not None:
self.output = self.process.stdout.read().decode("utf-8")
if self._stderr is not None:
self.error = self.process.stderr.read().decode("utf-8")
self.return_code = self.process.returncode
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def send(self, value):
""" Send text to stdin. Can only be used on non blocking commands Args: value (str):
the text to write on stdin Raises: TypeError: If command is blocking Returns: ShellCommand: return this ShellCommand instance for chaining """ |
if not self.block and self._stdin is not None:
self.writer.write("{}\n".format(value))
return self
else:
raise TypeError(NON_BLOCKING_ERROR_MESSAGE) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def poll_output(self):
""" Append lines from stdout to self.output. Returns: list: The lines added since last call """ |
if self.block:
return self.output
new_list = self.output[self.old_output_size:]
self.old_output_size += len(new_list)
return new_list |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def poll_error(self):
""" Append lines from stderr to self.errors. Returns: list: The lines added since last call """ |
if self.block:
return self.error
new_list = self.error[self.old_error_size:]
self.old_error_size += len(new_list)
return new_list |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def kill(self):
""" Kill the current non blocking command Raises: TypeError: If command is blocking """ |
if self.block:
raise TypeError(NON_BLOCKING_ERROR_MESSAGE)
try:
self.process.kill()
except ProcessLookupError as exc:
self.logger.debug(exc) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def wait_for(self, pattern, timeout=None):
""" Block until a pattern have been found in stdout and stderr Args: pattern(:class:`~re.Pattern`):
The pattern to search timeout(int):
Maximum number of second to wait. If None, wait infinitely Raises: TimeoutError: When timeout is reach """ |
should_continue = True
if self.block:
raise TypeError(NON_BLOCKING_ERROR_MESSAGE)
def stop(signum, frame): # pylint: disable=W0613
nonlocal should_continue
if should_continue:
raise TimeoutError()
if timeout:
signal.signal(signal.SIGALRM, stop)
signal.alarm(timeout)
while should_continue:
output = self.poll_output() + self.poll_error()
filtered = [line for line in output if re.match(pattern, line)]
if filtered:
should_continue = False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_running(self):
""" Check if the command is currently running Returns: bool: True if running, else False """ |
if self.block:
return False
return self.thread.is_alive() or self.process.poll() is None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def print_live_output(self):
'''
Block and print the output of the command
Raises:
TypeError: If command is blocking
'''
if self.block:
raise TypeError(NON_BLOCKING_ERROR_MESSAGE)
else:
while self.thread.is_alive() or self.old_output_size < len(self.output) or self.old_error_size < len(self.error):
if self._stdout is not None and len(self.output) > self.old_output_size:
while self.old_output_size < len(self.output):
self.logger.info(self.output[self.old_output_size])
self.old_output_size += 1
if self._stderr is not None and len(self.error) > self.old_error_size:
while self.old_error_size < len(self.error):
self.logger.error(self.error[self.old_error_size])
self.old_error_size += 1 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def scatter(slope, zero, x1, x2, x1err=[], x2err=[]):
""" Used mainly to measure scatter for the BCES best-fit """ |
n = len(x1)
x2pred = zero + slope * x1
s = sum((x2 - x2pred) ** 2) / (n - 1)
if len(x2err) == n:
s_obs = sum((x2err / x2) ** 2) / n
s0 = s - s_obs
print numpy.sqrt(s), numpy.sqrt(s_obs), numpy.sqrt(s0)
return numpy.sqrt(s0) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mcmc(x1, x2, x1err=[], x2err=[], po=(1,1,0.5), logify=True, nsteps=5000, nwalkers=100, nburn=500, output='full'):
""" Use emcee to find the best-fit linear relation or power law accounting for measurement uncertainties and intrinsic scatter Parameters x1 : array of floats Independent variable, or observable x2 : array of floats Dependent variable x1err : array of floats (optional) Uncertainties on the independent variable x2err : array of floats (optional) Uncertainties on the dependent variable po : tuple of 3 floats (optional) Initial guesses for zero point, slope, and intrinsic scatter. Results are not very sensitive to these values so they shouldn't matter a lot. logify : bool (default True) Whether to take the log of the measurements in order to estimate the best-fit power law instead of linear relation nsteps : int (default 5000) Number of steps each walker should take in the MCMC nwalkers : int (default 100) Number of MCMC walkers nburn : int (default 500) Number of samples to discard to give the MCMC enough time to converge. output : list of ints or 'full' (default 'full') If 'full', then return the full samples (except for burn-in section) for each parameter. Otherwise, each float corresponds to a percentile that will be returned for each parameter. Returns ------- See *output* argument above for return options. """ |
import emcee
if len(x1err) == 0:
x1err = numpy.ones(len(x1))
if len(x2err) == 0:
x2err = numpy.ones(len(x1))
def lnlike(theta, x, y, xerr, yerr):
a, b, s = theta
model = a + b*x
sigma = numpy.sqrt((b*xerr)**2 + yerr*2 + s**2)
lglk = 2 * sum(numpy.log(sigma)) + \
sum(((y-model) / sigma) ** 2) + \
numpy.log(len(x)) * numpy.sqrt(2*numpy.pi) / 2
return -lglk
def lnprior(theta):
a, b, s = theta
if s >= 0:
return 0
return -numpy.inf
def lnprob(theta, x, y, xerr, yerr):
lp = lnprior(theta)
return lp + lnlike(theta, x, y, xerr, yerr)
if logify:
x1, x2, x1err, x2err = to_log(x1, x2, x1err, x2err)
start = numpy.array(po)
ndim = len(start)
pos = [start + 1e-4*numpy.random.randn(ndim) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob,
args=(x1,x2,x1err,x2err))
sampler.run_mcmc(pos, nsteps)
samples = numpy.array([sampler.chain[:,nburn:,i].reshape(-1) \
for i in xrange(ndim)])
if logify:
samples[2] *= numpy.log(10)
if output == 'full':
return samples
else:
try:
values = [[numpy.percentile(s, o) for o in output]
for s in samples]
return values
except TypeError:
msg = 'ERROR: wrong value for argument output in mcmc().'
msg += ' Must be "full" or list of ints.'
print msg
exit()
return |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mle(x1, x2, x1err=[], x2err=[], cerr=[], s_int=True, po=(1,0,0.1), verbose=False, logify=True, full_output=False):
""" Maximum Likelihood Estimation of best-fit parameters Parameters x1, x2 : float arrays the independent and dependent variables. x1err, x2err : float arrays (optional) measurement uncertainties on independent and dependent variables. Any of the two, or both, can be supplied. cerr : float array (same size as x1) covariance on the measurement errors s_int : boolean (default True) whether to include intrinsic scatter in the MLE. po : tuple of floats initial guess for free parameters. If s_int is True, then po must have 3 elements; otherwise it can have two (for the zero point and the slope) verbose : boolean (default False) verbose? logify : boolean (default True) whether to convert the values to log10's. This is to calculate the best-fit power law. Note that the result is given for the equation log(y)=a+b*log(x) -- i.e., the zero point must be converted to 10**a if logify=True full_output : boolean (default False) numpy.optimize.fmin's full_output argument Returns ------- a : float Maximum Likelihood Estimate of the zero point. Note that if logify=True, the power-law intercept is 10**a b : float Maximum Likelihood Estimate of the slope s : float (optional, if s_int=True) Maximum Likelihood Estimate of the intrinsic scatter """ |
from scipy import optimize
n = len(x1)
if len(x2) != n:
raise ValueError('x1 and x2 must have same length')
if len(x1err) == 0:
x1err = numpy.ones(n)
if len(x2err) == 0:
x2err = numpy.ones(n)
if logify:
x1, x2, x1err, x2err = to_log(x1, x2, x1err, x2err)
f = lambda a, b: a + b * x1
if s_int:
w = lambda b, s: numpy.sqrt(b**2 * x1err**2 + x2err**2 + s**2)
loglike = lambda p: 2 * sum(numpy.log(w(p[1],p[2]))) + \
sum(((x2 - f(p[0],p[1])) / w(p[1],p[2])) ** 2) + \
numpy.log(n * numpy.sqrt(2*numpy.pi)) / 2
else:
w = lambda b: numpy.sqrt(b**2 * x1err**2 + x2err**2)
loglike = lambda p: sum(numpy.log(w(p[1]))) + \
sum(((x2 - f(p[0],p[1])) / w(p[1])) ** 2) / 2 + \
numpy.log(n * numpy.sqrt(2*numpy.pi)) / 2
po = po[:2]
out = optimize.fmin(loglike, po, disp=verbose, full_output=full_output)
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_log(x1, x2, x1err, x2err):
""" Take linear measurements and uncertainties and transform to log values. """ |
logx1 = numpy.log10(numpy.array(x1))
logx2 = numpy.log10(numpy.array(x2))
x1err = numpy.log10(numpy.array(x1)+numpy.array(x1err)) - logx1
x2err = numpy.log10(numpy.array(x2)+numpy.array(x2err)) - logx2
return logx1, logx2, x1err, x2err |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def wrap_paths(paths):
# type: (list[str]) -> str """ Put quotes around all paths and join them with space in-between. """ |
if isinstance(paths, string_types):
raise ValueError(
"paths cannot be a string. "
"Use array with one element instead."
)
return ' '.join('"' + path + '"' for path in paths) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def lint_cli(ctx, exclude, skip_untracked, commit_only):
# type: (click.Context, List[str], bool, bool) -> None """ Run pep8 and pylint on all project files. You can configure the linting paths using the lint.paths config variable. This should be a list of paths that will be linted. If a path to a directory is given, all files in that directory and it's subdirectories will be used. The pep8 and pylint config paths are by default stored in ops/tools/pep8.ini and ops/tools/pylint.ini. You can customise those paths in your config with lint.pep8_cfg and lint.pylint_cfg variables. **Config Example**:: \b lint: pylint_cfg: 'ops/tools/pylint.ini' pep8_cfg: 'ops/tools/pep8.ini' paths: - 'src/mypkg' **Examples**:: \b $ peltak lint # Run linter in default mode, skip untracked $ peltak lint --commit # Lint only files staged for commit $ peltak lint --all # Lint all files, including untracked. $ peltak lint --pretend # Print the list of files to lint $ peltak lint -e "*.tox*" # Don't lint files inside .tox directory """ |
if ctx.invoked_subcommand:
return
from peltak.logic import lint
lint.lint(exclude, skip_untracked, commit_only) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_ip_addresses():
"""Gets the ip addresses from ifconfig :return: (dict) of devices and aliases with the IPv4 address """ |
log = logging.getLogger(mod_logger + '.get_ip_addresses')
command = ['/sbin/ifconfig']
try:
result = run_command(command)
except CommandError:
raise
ifconfig = result['output'].strip()
# Scan the ifconfig output for IPv4 addresses
devices = {}
parts = ifconfig.split()
device = None
for part in parts:
if device is None:
if 'eth' in part or 'eno' in part:
device = part
else:
test = part.split(':', 1)
if len(test) == 2:
if test[0] == 'addr':
ip_address = test[1]
log.info('Found IP address %s on device %s', ip_address,
device)
devices[device] = ip_address
device = None
return devices |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_mac_address(device_index=0):
"""Returns the Mac Address given a device index :param device_index: (int) Device index :return: (str) Mac address or None """ |
log = logging.getLogger(mod_logger + '.get_mac_address')
command = ['ip', 'addr', 'show', 'eth{d}'.format(d=device_index)]
log.info('Attempting to find a mac address at device index: {d}'.format(d=device_index))
try:
result = run_command(command)
except CommandError:
_, ex, trace = sys.exc_info()
log.error('There was a problem running command, unable to determine mac address: {c}\n{e}'.format(
c=command, e=str(ex)))
return
ipaddr = result['output'].split()
get_next = False
mac_address = None
for part in ipaddr:
if get_next:
mac_address = part
log.info('Found mac address: {m}'.format(m=mac_address))
break
if 'link' in part:
get_next = True
if not mac_address:
log.info('mac address not found for device: {d}'.format(d=device_index))
return mac_address |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def chmod(path, mode, recursive=False):
"""Emulates bash chmod command This method sets the file permissions to the specified mode. :param path: (str) Full path to the file or directory :param mode: (str) Mode to be set (e.g. 0755) :param recursive: (bool) Set True to make a recursive call :return: int exit code of the chmod command :raises CommandError """ |
log = logging.getLogger(mod_logger + '.chmod')
# Validate args
if not isinstance(path, basestring):
msg = 'path argument is not a string'
log.error(msg)
raise CommandError(msg)
if not isinstance(mode, basestring):
msg = 'mode argument is not a string'
log.error(msg)
raise CommandError(msg)
# Ensure the item exists
if not os.path.exists(path):
msg = 'Item not found: {p}'.format(p=path)
log.error(msg)
raise CommandError(msg)
# Create the chmod command
command = ['chmod']
# Make it recursive if specified
if recursive:
command.append('-R')
command.append(mode)
command.append(path)
try:
result = run_command(command)
except CommandError:
raise
log.info('chmod command exited with code: {c}'.format(c=result['code']))
return result['code'] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mkdir_p(path):
"""Emulates 'mkdir -p' in bash :param path: (str) Path to create :return: None :raises CommandError """ |
log = logging.getLogger(mod_logger + '.mkdir_p')
if not isinstance(path, basestring):
msg = 'path argument is not a string'
log.error(msg)
raise CommandError(msg)
log.info('Attempting to create directory: %s', path)
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
msg = 'Unable to create directory: {p}'.format(p=path)
log.error(msg)
raise CommandError(msg) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def source(script):
"""Emulates 'source' command in bash :param script: (str) Full path to the script to source :return: Updated environment :raises CommandError """ |
log = logging.getLogger(mod_logger + '.source')
if not isinstance(script, basestring):
msg = 'script argument must be a string'
log.error(msg)
raise CommandError(msg)
log.info('Attempting to source script: %s', script)
try:
pipe = subprocess.Popen(". %s; env" % script, stdout=subprocess.PIPE, shell=True)
data = pipe.communicate()[0]
except ValueError:
_, ex, trace = sys.exc_info()
msg = 'Invalid argument:\n{e}'.format(e=str(ex))
log.error(msg)
raise CommandError, msg, trace
except OSError:
_, ex, trace = sys.exc_info()
msg = 'File not found: {s}\n{e}'.format(s=script, e=str(ex))
raise CommandError, msg, trace
except subprocess.CalledProcessError:
_, ex, trace = sys.exc_info()
msg = 'Script {s} returned a non-zero exit code: {c}\n{e}'.format(
s=script, e=str(ex), c=ex.returncode)
log.error(msg)
raise CommandError, msg, trace
env = {}
log.debug('Adding environment variables from data: {d}'.format(d=data))
for line in data.splitlines():
entry = line.split("=", 1)
if len(entry) != 2:
log.warn('This property is not in prop=value format, and will be skipped: {p}'.format(p=line))
continue
try:
env[entry[0]] = entry[1]
except IndexError:
_, ex, trace = sys.exc_info()
log.warn('IndexError: There was a problem setting environment variables from line: {p}\n{e}'.format(
p=line, e=str(ex)))
continue
else:
log.debug('Added environment variable {p}={v}'.format(p=entry[0], v=entry[1]))
os.environ.update(env)
return env |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def yum_update(downloadonly=False, dest_dir='/tmp'):
"""Run a yum update on this system This public method runs the yum -y update command to update packages from yum. If downloadonly is set to true, the yum updates will be downloaded to the specified dest_dir. :param dest_dir: (str) Full path to the download directory :param downloadonly: Boolean :return: int exit code from the yum command :raises CommandError """ |
log = logging.getLogger(mod_logger + '.yum_update')
# Type checks on the args
if not isinstance(dest_dir, basestring):
msg = 'dest_dir argument must be a string'
log.error(msg)
raise CommandError(msg)
if not isinstance(downloadonly, bool):
msg = 'downloadonly argument must be a bool'
log.error(msg)
raise CommandError(msg)
# If downloadonly was True, download packages to dest_dir
if downloadonly:
# Create the destination directory if it does not exist
log.info('Creating directory: %s', dest_dir)
try:
mkdir_p(dest_dir)
except OSError:
_, ex, trace = sys.exc_info()
msg = 'Unable to create destination directory: {d}'.format(
d=dest_dir)
log.error(msg)
raise CommandError, msg, trace
# Build command string with downloadonly options specified
command = ['yum', '-y', 'update', '--downloadonly',
'--downloaddir={d}'.format(d=dest_dir)]
log.info('Downloading updates from yum to %s...', dest_dir)
else:
# Build command string to update directly
command = ['yum', '-y', 'update']
log.info('Installing yum updates from RHN...')
# Run the command
try:
result = run_command(command)
except CommandError:
raise
log.info('Yum update completed and exit with code: {c}'.format(
c=result['code']))
return result['code'] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def rpm_install(install_dir):
"""This method installs all RPM files in a specific dir :param install_dir: (str) Full path to the directory :return int exit code form the rpm command :raises CommandError """ |
log = logging.getLogger(mod_logger + '.rpm_install')
# Type checks on the args
if not isinstance(install_dir, basestring):
msg = 'install_dir argument must be a string'
log.error(msg)
raise CommandError(msg)
# Ensure the install_dir directory exists
if not os.path.isdir(install_dir):
msg = 'Directory not found: {f}'.format(f=install_dir)
log.error(msg)
raise CommandError(msg)
# Create the command
command = ['rpm', '-iv', '--force', '{d}/*.rpm'.format(d=install_dir)]
# Run the rpm command
try:
result = run_command(command)
except CommandError:
raise
log.info('RPM completed and exit with code: {c}'.format(
c=result['code']))
return result['code'] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sed(file_path, pattern, replace_str, g=0):
"""Python impl of the bash sed command This method emulates the functionality of a bash sed command. :param file_path: (str) Full path to the file to be edited :param pattern: (str) Search pattern to replace as a regex :param replace_str: (str) String to replace the pattern :param g: (int) Whether to globally replace (0) or replace 1 instance (equivalent to the 'g' option in bash sed :return: None :raises CommandError """ |
log = logging.getLogger(mod_logger + '.sed')
# Type checks on the args
if not isinstance(file_path, basestring):
msg = 'file_path argument must be a string'
log.error(msg)
raise CommandError(msg)
if not isinstance(pattern, basestring):
msg = 'pattern argument must be a string'
log.error(msg)
raise CommandError(msg)
if not isinstance(replace_str, basestring):
msg = 'replace_str argument must be a string'
log.error(msg)
raise CommandError(msg)
# Ensure the file_path file exists
if not os.path.isfile(file_path):
msg = 'File not found: {f}'.format(f=file_path)
log.error(msg)
raise CommandError(msg)
# Search for a matching pattern and replace matching patterns
log.info('Updating file: %s...', file_path)
for line in fileinput.input(file_path, inplace=True):
if re.search(pattern, line):
log.info('Updating line: %s', line)
new_line = re.sub(pattern, replace_str, line, count=g)
log.info('Replacing with line: %s', new_line)
sys.stdout.write(new_line)
else:
sys.stdout.write(line) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def zip_dir(dir_path, zip_file):
"""Creates a zip file of a directory tree This method creates a zip archive using the directory tree dir_path and adds to zip_file output. :param dir_path: (str) Full path to directory to be zipped :param zip_file: (str) Full path to the output zip file :return: None :raises CommandError """ |
log = logging.getLogger(mod_logger + '.zip_dir')
# Validate args
if not isinstance(dir_path, basestring):
msg = 'dir_path argument must be a string'
log.error(msg)
raise CommandError(msg)
if not isinstance(zip_file, basestring):
msg = 'zip_file argument must be a string'
log.error(msg)
raise CommandError(msg)
# Ensure the dir_path file exists
if not os.path.isdir(dir_path):
msg = 'Directory not found: {f}'.format(f=dir_path)
log.error(msg)
raise CommandError(msg)
try:
with contextlib.closing(zipfile.ZipFile(zip_file, 'w', allowZip64=True)) as zip_w:
for root, dirs, files in os.walk(dir_path):
for f in files:
log.debug('Adding file to zip: %s', f)
strip = len(dir_path) - len(os.path.split(dir_path)[-1])
file_name = os.path.join(root, f)
archive_name = os.path.join(root[strip:], f)
zip_w.write(file_name, archive_name)
except Exception:
_, ex, trace = sys.exc_info()
msg = 'Unable to create zip file: {f}\n{e}'.format(
f=zip_file, e=str(ex))
log.error(msg)
raise CommandError, msg, trace
log.info('Successfully created zip file: %s', zip_file) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_ip(interface=0):
"""This method return the IP address :param interface: (int) Interface number (e.g. 0 for eth0) :return: (str) IP address or None """ |
log = logging.getLogger(mod_logger + '.get_ip')
log.info('Getting the IP address for this system...')
ip_address = None
try:
log.info('Attempting to get IP address by hostname...')
ip_address = socket.gethostbyname(socket.gethostname())
except socket.error:
log.info('Unable to get IP address for this system using hostname, '
'using a bash command...')
command = 'ip addr show eth%s | grep inet | grep -v inet6 | ' \
'awk \'{ print $2 }\' | cut -d/ -f1 ' \
'>> /root/ip' % interface
try:
log.info('Running command: %s', command)
subprocess.check_call(command, shell=True)
except(OSError, subprocess.CalledProcessError):
_, ex, trace = sys.exc_info()
msg = 'Unable to get the IP address of this system\n{e}'.format(
e=str(ex))
log.error(msg)
raise CommandError, msg, trace
else:
ip_file = '/root/ip'
log.info('Command executed successfully, pulling IP address from '
'file: %s', ip_file)
if os.path.isfile(ip_file):
with open(ip_file, 'r') as f:
for line in f:
ip_address = line.strip()
log.info('Found IP address from file: %s', ip_address)
else:
msg = 'File not found: {f}'.format(f=ip_file)
log.error(msg)
raise CommandError(msg)
log.info('Returning IP address: %s', ip_address)
return ip_address |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.