text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Remove this FirewallRule from the API.
<END_TASK>
<USER_TASK:>
Description:
def destroy(self):
"""
Remove this FirewallRule from the API.
This instance must be associated with a server for this method to work,
which is done by instantiating via server.get_firewall_rules().
""" |
if not hasattr(self, 'server') or not self.server:
raise Exception(
"""FirewallRule not associated with server;
please use or server.get_firewall_rules() to get objects
that are associated with a server.
""")
return self.server.cloud_manager.delete_firewall_rule(
self.server.uuid,
self.position
) |
<SYSTEM_TASK:>
Create a new Tag. Only name is mandatory.
<END_TASK>
<USER_TASK:>
Description:
def create_tag(self, name, description=None, servers=[]):
"""
Create a new Tag. Only name is mandatory.
Returns the created Tag object.
""" |
servers = [str(server) for server in servers]
body = {'tag': Tag(name, description, servers).to_dict()}
res = self.request('POST', '/tag', body)
return Tag(cloud_manager=self, **res['tag']) |
<SYSTEM_TASK:>
Remove tags from a server.
<END_TASK>
<USER_TASK:>
Description:
def remove_tags(self, server, tags):
"""
Remove tags from a server.
- server: Server object or UUID string
- tags: list of Tag objects or strings
""" |
uuid = str(server)
tags = [str(tag) for tag in tags]
url = '/server/{0}/untag/{1}'.format(uuid, ','.join(tags))
return self.post_request(url) |
<SYSTEM_TASK:>
Helper for assigning object attributes from API responses.
<END_TASK>
<USER_TASK:>
Description:
def assignIfExists(opts, default=None, **kwargs):
"""
Helper for assigning object attributes from API responses.
""" |
for opt in opts:
if(opt in kwargs):
return kwargs[opt]
return default |
<SYSTEM_TASK:>
Reset the server object with new values given as params.
<END_TASK>
<USER_TASK:>
Description:
def _reset(self, server, **kwargs):
"""
Reset the server object with new values given as params.
- server: a dict representing the server. e.g the API response.
- kwargs: any meta fields such as cloud_manager and populated.
Note: storage_devices and ip_addresses may be given in server as dicts or
in kwargs as lists containing Storage and IPAddress objects.
""" |
if server:
# handle storage, ip_address dicts and tags if they exist
Server._handle_server_subobjs(server, kwargs.get('cloud_manager'))
for key in server:
object.__setattr__(self, key, server[key])
for key in kwargs:
object.__setattr__(self, key, kwargs[key]) |
<SYSTEM_TASK:>
Sync changes from the API to the local object.
<END_TASK>
<USER_TASK:>
Description:
def populate(self):
"""
Sync changes from the API to the local object.
Note: syncs ip_addresses and storage_devices too (/server/uuid endpoint)
""" |
server, IPAddresses, storages = self.cloud_manager.get_server_data(self.uuid)
self._reset(
server,
ip_addresses=IPAddresses,
storage_devices=storages,
populated=True
)
return self |
<SYSTEM_TASK:>
Sync local changes in server's attributes to the API.
<END_TASK>
<USER_TASK:>
Description:
def save(self):
"""
Sync local changes in server's attributes to the API.
Note: DOES NOT sync IPAddresses and storage_devices,
use add_ip, add_storage, remove_ip, remove_storage instead.
""" |
# dict comprehension that also works with 2.6
# http://stackoverflow.com/questions/21069668/alternative-to-dict-comprehension-prior-to-python-2-7
kwargs = dict(
(field, getattr(self, field))
for field in self.updateable_fields
if hasattr(self, field)
)
self.cloud_manager.modify_server(self.uuid, **kwargs)
self._reset(kwargs) |
<SYSTEM_TASK:>
Restart the server. By default, issue a soft restart with a timeout of 30s
<END_TASK>
<USER_TASK:>
Description:
def restart(self, hard=False, timeout=30, force=True):
"""
Restart the server. By default, issue a soft restart with a timeout of 30s
and a hard restart after the timeout.
After the a timeout a hard restart is performed if the server has not stopped.
Note: API responds immediately (unlike in start), with state: started.
This client will, however, set state as 'maintenance' to signal that the server is neither
started nor stopped.
""" |
body = dict()
body['restart_server'] = {
'stop_type': 'hard' if hard else 'soft',
'timeout': '{0}'.format(timeout),
'timeout_action': 'destroy' if force else 'ignore'
}
path = '/server/{0}/restart'.format(self.uuid)
self.cloud_manager.post_request(path, body)
object.__setattr__(self, 'state', 'maintenance') |
<SYSTEM_TASK:>
Release the specified IP-address from the server.
<END_TASK>
<USER_TASK:>
Description:
def remove_ip(self, IPAddress):
"""
Release the specified IP-address from the server.
""" |
self.cloud_manager.release_ip(IPAddress.address)
self.ip_addresses.remove(IPAddress) |
<SYSTEM_TASK:>
Attach the given storage to the Server.
<END_TASK>
<USER_TASK:>
Description:
def add_storage(self, storage=None, type='disk', address=None):
"""
Attach the given storage to the Server.
Default address is next available.
""" |
self.cloud_manager.attach_storage(server=self.uuid,
storage=storage.uuid,
storage_type=type,
address=address)
storage.address = address
storage.type = type
self.storage_devices.append(storage) |
<SYSTEM_TASK:>
Remove Storage from a Server.
<END_TASK>
<USER_TASK:>
Description:
def remove_storage(self, storage):
"""
Remove Storage from a Server.
The Storage must be a reference to an object in
Server.storage_devices or the method will throw and Exception.
A Storage from get_storage(uuid) will not work as it is missing the 'address' property.
""" |
if not hasattr(storage, 'address'):
raise Exception(
('Storage does not have an address. '
'Access the Storage via Server.storage_devices '
'so they include an address. '
'(This is due how the API handles Storages)')
)
self.cloud_manager.detach_storage(server=self.uuid, address=storage.address)
self.storage_devices.remove(storage) |
<SYSTEM_TASK:>
Helper function for automatically adding several FirewallRules in series.
<END_TASK>
<USER_TASK:>
Description:
def configure_firewall(self, FirewallRules):
"""
Helper function for automatically adding several FirewallRules in series.
""" |
firewall_rule_bodies = [
FirewallRule.to_dict()
for FirewallRule in FirewallRules
]
return self.cloud_manager.configure_firewall(self, firewall_rule_bodies) |
<SYSTEM_TASK:>
Prepare a JSON serializable dict from a Server instance with nested.
<END_TASK>
<USER_TASK:>
Description:
def prepare_post_body(self):
"""
Prepare a JSON serializable dict from a Server instance with nested.
Storage instances.
""" |
body = dict()
# mandatory
body['server'] = {
'hostname': self.hostname,
'zone': self.zone,
'title': self.title,
'storage_devices': {}
}
# optional fields
for optional_field in self.optional_fields:
if hasattr(self, optional_field):
body['server'][optional_field] = getattr(self, optional_field)
# set password_delivery default as 'none' to prevent API from sending
# emails (with credentials) about each created server
if not hasattr(self, 'password_delivery'):
body['server']['password_delivery'] = 'none'
# collect storage devices and create a unique title (see: Storage.title in API doc)
# for each of them
body['server']['storage_devices'] = {
'storage_device': []
}
storage_title_id = 0 # running number for unique storage titles
for storage in self.storage_devices:
if not hasattr(storage, 'os') or storage.os is None:
storage_title_id += 1
storage_body = storage.to_dict()
# setup default titles for storages unless the user has specified
# them at storage.title
if not hasattr(storage, 'title') or not storage.title:
if hasattr(storage, 'os') and storage.os:
storage_body['title'] = self.hostname + ' OS disk'
else:
storage_body['title'] = self.hostname + ' storage disk ' + str(storage_title_id)
# figure out the storage `action` parameter
# public template
if hasattr(storage, 'os') and storage.os:
storage_body['action'] = 'clone'
storage_body['storage'] = OperatingSystems.get_OS_UUID(storage.os)
# private template
elif hasattr(storage, 'uuid'):
storage_body['action'] = 'clone'
storage_body['storage'] = storage.uuid
# create a new storage
else:
storage_body['action'] = 'create'
body['server']['storage_devices']['storage_device'].append(storage_body)
if hasattr(self, 'ip_addresses') and self.ip_addresses:
body['server']['ip_addresses'] = {
'ip_address': [
ip.to_dict() for ip in self.ip_addresses
]
}
return body |
<SYSTEM_TASK:>
Prepare a JSON serializable dict for read-only purposes.
<END_TASK>
<USER_TASK:>
Description:
def to_dict(self):
"""
Prepare a JSON serializable dict for read-only purposes.
Includes storages and IP-addresses.
Use prepare_post_body for POST and .save() for PUT.
""" |
fields = dict(vars(self).items())
if self.populated:
fields['ip_addresses'] = []
fields['storage_devices'] = []
for ip in self.ip_addresses:
fields['ip_addresses'].append({
'address': ip.address,
'access': ip.access,
'family': ip.family
})
for storage in self.storage_devices:
fields['storage_devices'].append({
'address': storage.address,
'storage': storage.uuid,
'storage_size': storage.size,
'storage_title': storage.title,
'type': storage.type,
})
del fields['populated']
del fields['cloud_manager']
return fields |
<SYSTEM_TASK:>
Return the server's IP address.
<END_TASK>
<USER_TASK:>
Description:
def get_ip(self, access='public', addr_family=None, strict=None):
"""
Return the server's IP address.
Params:
- addr_family: IPv4, IPv6 or None. None prefers IPv4 but will
return IPv6 if IPv4 addr was not available.
- access: 'public' or 'private'
""" |
if addr_family not in ['IPv4', 'IPv6', None]:
raise Exception("`addr_family` must be 'IPv4', 'IPv6' or None")
if access not in ['private', 'public']:
raise Exception("`access` must be 'public' or 'private'")
if not hasattr(self, 'ip_addresses'):
self.populate()
# server can have several public or private IPs
ip_addrs = [
ip_addr for ip_addr in self.ip_addresses
if ip_addr.access == access
]
# prefer addr_family (or IPv4 if none given)
preferred_family = addr_family if addr_family else 'IPv4'
for ip_addr in ip_addrs:
if ip_addr.family == preferred_family:
return ip_addr.address
# any IP (of the right access) will do if available and addr_family is None
return ip_addrs[0].address if ip_addrs and not addr_family else None |
<SYSTEM_TASK:>
Blocking wait until target_state reached. update_interval is in seconds.
<END_TASK>
<USER_TASK:>
Description:
def _wait_for_state_change(self, target_states, update_interval=10):
"""
Blocking wait until target_state reached. update_interval is in seconds.
Warning: state change must begin before calling this method.
""" |
while self.state not in target_states:
if self.state == 'error':
raise Exception('server is in error state')
# update server state every 10s
sleep(update_interval)
self.populate() |
<SYSTEM_TASK:>
Destroy a server and its storages. Stops the server before destroying.
<END_TASK>
<USER_TASK:>
Description:
def stop_and_destroy(self, sync=True):
"""
Destroy a server and its storages. Stops the server before destroying.
Syncs the server state from the API, use sync=False to disable.
""" |
def _self_destruct():
"""destroy the server and all storages attached to it."""
# try_it_n_times util is used as a convenience because
# Servers and Storages can fluctuate between "maintenance" and their
# original state due to several different reasons especially when
# destroying infrastructure.
# first destroy server
try_it_n_times(operation=self.destroy,
expected_error_codes=['SERVER_STATE_ILLEGAL'],
custom_error='destroying server failed')
# storages may be deleted instantly after server DELETE
for storage in self.storage_devices:
try_it_n_times(operation=storage.destroy,
expected_error_codes=['STORAGE_STATE_ILLEGAL'],
custom_error='destroying storage failed')
if sync:
self.populate()
# server is either starting or stopping (or error)
if self.state in ['maintenance', 'error']:
self._wait_for_state_change(['stopped', 'started'])
if self.state == 'started':
try_it_n_times(operation=self.stop,
expected_error_codes=['SERVER_STATE_ILLEGAL'],
custom_error='stopping server failed')
self._wait_for_state_change(['stopped'])
if self.state == 'stopped':
_self_destruct()
else:
raise Exception('unknown server state: ' + self.state) |
<SYSTEM_TASK:>
Revert the state to the version stored on disc.
<END_TASK>
<USER_TASK:>
Description:
def revert(self):
"""Revert the state to the version stored on disc.""" |
if self.filepath:
if path.isfile(self.filepath):
serialised_file = open(self.filepath, "r")
try:
self.state = json.load(serialised_file)
except ValueError:
print("No JSON information could be read from the persistence file - could be empty: %s" % self.filepath)
self.state = {}
finally:
serialised_file.close()
else:
print("The persistence file has not yet been created or does not exist, so the state cannot be read from it yet.")
else:
print("Filepath to the persistence file is not set. State cannot be read.")
return False |
<SYSTEM_TASK:>
Synchronise and update the stored state to the in-memory state.
<END_TASK>
<USER_TASK:>
Description:
def sync(self):
"""Synchronise and update the stored state to the in-memory state.""" |
if self.filepath:
serialised_file = open(self.filepath, "w")
json.dump(self.state, serialised_file)
serialised_file.close()
else:
print("Filepath to the persistence file is not set. State cannot be synced to disc.") |
<SYSTEM_TASK:>
Also try to create the bucket.
<END_TASK>
<USER_TASK:>
Description:
def _require_bucket(self, bucket_name):
""" Also try to create the bucket. """ |
if not self.exists(bucket_name) and not self.claim_bucket(bucket_name):
raise OFSException("Invalid bucket: %s" % bucket_name)
return self._get_bucket(bucket_name) |
<SYSTEM_TASK:>
Will fail if the bucket or label don't exist
<END_TASK>
<USER_TASK:>
Description:
def del_stream(self, bucket, label):
""" Will fail if the bucket or label don't exist """ |
bucket = self._require_bucket(bucket)
key = self._require_key(bucket, label)
key.delete() |
<SYSTEM_TASK:>
Return the URL for the given resource ID.
<END_TASK>
<USER_TASK:>
Description:
def get_url_for_id(client_site_url, apikey, resource_id):
"""Return the URL for the given resource ID.
Contacts the client site's API to get the URL for the ID and returns it.
:raises CouldNotGetURLError: if getting the URL fails for any reason
""" |
# TODO: Handle invalid responses from the client site.
url = client_site_url + u"deadoralive/get_url_for_resource_id"
params = {"resource_id": resource_id}
response = requests.get(url, headers=dict(Authorization=apikey),
params=params)
if not response.ok:
raise CouldNotGetURLError(
u"Couldn't get URL for resource {id}: {code} {reason}".format(
id=resource_id, code=response.status_code,
reason=response.reason))
return response.json() |
<SYSTEM_TASK:>
Check whether the given URL is dead or alive.
<END_TASK>
<USER_TASK:>
Description:
def check_url(url):
"""Check whether the given URL is dead or alive.
Returns a dict with four keys:
"url": The URL that was checked (string)
"alive": Whether the URL was working, True or False
"status": The HTTP status code of the response from the URL,
e.g. 200, 401, 500 (int)
"reason": The reason for the success or failure of the check,
e.g. "OK", "Unauthorized", "Internal Server Error" (string)
The "status" may be None if we did not get a valid HTTP response,
e.g. in the event of a timeout, DNS failure or invalid HTTP response.
The "reason" will always be a string, but may be a requests library
exception string rather than an HTTP reason string if we did not get a valid
HTTP response.
""" |
result = {"url": url}
try:
response = requests.get(url)
result["status"] = response.status_code
result["reason"] = response.reason
response.raise_for_status() # Raise if status_code is not OK.
result["alive"] = True
except AttributeError as err:
if err.message == "'NoneType' object has no attribute 'encode'":
# requests seems to throw these for some invalid URLs.
result["alive"] = False
result["reason"] = "Invalid URL"
result["status"] = None
else:
raise
except requests.exceptions.RequestException as err:
result["alive"] = False
if "reason" not in result:
result["reason"] = str(err)
if "status" not in result:
# This can happen if the response is invalid HTTP, if we get a DNS
# failure, or a timeout, etc.
result["status"] = None
# We should always have these four fields in the result.
assert "url" in result
assert result.get("alive") in (True, False)
assert "status" in result
assert "reason" in result
return result |
<SYSTEM_TASK:>
Post the given link check result to the client site.
<END_TASK>
<USER_TASK:>
Description:
def upsert_result(client_site_url, apikey, resource_id, result):
"""Post the given link check result to the client site.""" |
# TODO: Handle exceptions and unexpected results.
url = client_site_url + u"deadoralive/upsert"
params = result.copy()
params["resource_id"] = resource_id
requests.post(url, headers=dict(Authorization=apikey), params=params) |
<SYSTEM_TASK:>
Get links from the client site, check them, and post the results back.
<END_TASK>
<USER_TASK:>
Description:
def get_check_and_report(client_site_url, apikey, get_resource_ids_to_check,
get_url_for_id, check_url, upsert_result):
"""Get links from the client site, check them, and post the results back.
Get resource IDs from the client site, get the URL for each resource ID from
the client site, check each URL, and post the results back to the client
site.
This function can be called repeatedly to keep on getting more links from
the client site and checking them.
The functions that this function calls to carry out the various tasks are
taken as parameters to this function for testing purposes - it makes it
easy for tests to pass in mock functions. It also decouples the code nicely.
:param client_site_url: the base URL of the client site
:type client_site_url: string
:param apikey: the API key to use when making requests to the client site
:type apikey: string or None
:param get_resource_ids_to_check: The function to call to get the list of
resource IDs to be checked from the client site. See
get_resource_ids_to_check() above for the interface that this function
should implement.
:type get_resource_ids_to_check: callable
:param get_url_for_id: The function to call to get the URL for a given
resource ID from the client site. See get_url_for_id() above for the
interface that this function should implement.
:type get_url_for_id: callable
:param check_url: The function to call to check whether a URL is dead or
alive. See check_url() above for the interface that this function
should implement.
:type check_url: callable
:param upsert_result: The function to call to post a link check result to
the client site. See upsert_result() above for the interface that this
function should implement.
:type upsert_result: callable
""" |
logger = _get_logger()
resource_ids = get_resource_ids_to_check(client_site_url, apikey)
for resource_id in resource_ids:
try:
url = get_url_for_id(client_site_url, apikey, resource_id)
except CouldNotGetURLError:
logger.info(u"This link checker was not authorized to access "
"resource {0}, skipping.".format(resource_id))
continue
result = check_url(url)
status = result["status"]
reason = result["reason"]
if result["alive"]:
logger.info(u"Checking URL {0} of resource {1} succeeded with "
"status {2}:".format(url, resource_id, status))
else:
logger.info(u"Checking URL {0} of resource {1} failed with error "
"{2}:".format(url, resource_id, reason))
upsert_result(client_site_url, apikey, resource_id=resource_id,
result=result) |
<SYSTEM_TASK:>
Remove a member from the archive.
<END_TASK>
<USER_TASK:>
Description:
def remove(self, member):
"""Remove a member from the archive.""" |
# Make sure we have an info object
if isinstance(member, ZipInfo):
# 'member' is already an info object
zinfo = member
else:
# Get info object for name
zinfo = self.getinfo(member)
# compute the location of the file data in the local file header,
# by adding the lengths of the records before it
zlen = len(zinfo.FileHeader()) + zinfo.compress_size
fileidx = self.filelist.index(zinfo)
fileofs = sum(
[len(self.filelist[f].FileHeader()) + self.filelist[f].compress_size
for f in xrange(0, fileidx)]
)
self.fp.seek(fileofs + zlen)
after = self.fp.read()
self.fp.seek(fileofs)
self.fp.write(after)
self.fp.seek(-zlen, 2)
self.fp.truncate()
self._didModify = True
self.filelist.remove(zinfo)
del self.NameToInfo[member] |
<SYSTEM_TASK:>
this borrows too much from the internals of ofs
<END_TASK>
<USER_TASK:>
Description:
def make_label(self, path):
"""
this borrows too much from the internals of ofs
maybe expose different parts of the api?
""" |
from datetime import datetime
from StringIO import StringIO
path = path.lstrip("/")
bucket, label = path.split("/", 1)
bucket = self.ofs._require_bucket(bucket)
key = self.ofs._get_key(bucket, label)
if key is None:
key = bucket.new_key(label)
self.ofs._update_key_metadata(key, { '_creation_time': str(datetime.utcnow()) })
key.set_contents_from_file(StringIO(''))
key.close() |
<SYSTEM_TASK:>
stub. this really needs to be a call to the remote
<END_TASK>
<USER_TASK:>
Description:
def get_proxy_config(self, headers, path):
"""
stub. this really needs to be a call to the remote
restful interface to get the appropriate host and
headers to use for this upload
""" |
self.ofs.conn.add_aws_auth_header(headers, 'PUT', path)
from pprint import pprint
pprint(headers)
host = self.ofs.conn.server_name()
return host, headers |
<SYSTEM_TASK:>
Return a FirewallRule object based on server uuid and rule position.
<END_TASK>
<USER_TASK:>
Description:
def get_firewall_rule(self, server_uuid, firewall_rule_position, server_instance=None):
"""
Return a FirewallRule object based on server uuid and rule position.
""" |
url = '/server/{0}/firewall_rule/{1}'.format(server_uuid, firewall_rule_position)
res = self.get_request(url)
return FirewallRule(**res['firewall_rule']) |
<SYSTEM_TASK:>
Return all FirewallRule objects based on a server instance or uuid.
<END_TASK>
<USER_TASK:>
Description:
def get_firewall_rules(self, server):
"""
Return all FirewallRule objects based on a server instance or uuid.
""" |
server_uuid, server_instance = uuid_and_instance(server)
url = '/server/{0}/firewall_rule'.format(server_uuid)
res = self.get_request(url)
return [
FirewallRule(server=server_instance, **firewall_rule)
for firewall_rule in res['firewall_rules']['firewall_rule']
] |
<SYSTEM_TASK:>
Create a new firewall rule for a given server uuid.
<END_TASK>
<USER_TASK:>
Description:
def create_firewall_rule(self, server, firewall_rule_body):
"""
Create a new firewall rule for a given server uuid.
The rule can begiven as a dict or with FirewallRule.prepare_post_body().
Returns a FirewallRule object.
""" |
server_uuid, server_instance = uuid_and_instance(server)
url = '/server/{0}/firewall_rule'.format(server_uuid)
body = {'firewall_rule': firewall_rule_body}
res = self.post_request(url, body)
return FirewallRule(server=server_instance, **res['firewall_rule']) |
<SYSTEM_TASK:>
Delete a firewall rule based on a server uuid and rule position.
<END_TASK>
<USER_TASK:>
Description:
def delete_firewall_rule(self, server_uuid, firewall_rule_position):
"""
Delete a firewall rule based on a server uuid and rule position.
""" |
url = '/server/{0}/firewall_rule/{1}'.format(server_uuid, firewall_rule_position)
return self.request('DELETE', url) |
<SYSTEM_TASK:>
Helper for calling create_firewall_rule in series for a list of firewall_rule_bodies.
<END_TASK>
<USER_TASK:>
Description:
def configure_firewall(self, server, firewall_rule_bodies):
"""
Helper for calling create_firewall_rule in series for a list of firewall_rule_bodies.
""" |
server_uuid, server_instance = uuid_and_instance(server)
return [
self.create_firewall_rule(server_uuid, rule)
for rule in firewall_rule_bodies
] |
<SYSTEM_TASK:>
POSTs a raw SMTP message to the Sinkhole API
<END_TASK>
<USER_TASK:>
Description:
def post(self, data):
"""
POSTs a raw SMTP message to the Sinkhole API
:param data: raw content to be submitted [STRING]
:return: { list of predictions }
""" |
uri = '{}/sinkhole'.format(self.client.remote)
self.logger.debug(uri)
if PYVERSION == 2:
try:
data = data.decode('utf-8')
except Exception:
data = data.decode('latin-1')
data = {
'message': data
}
body = self.client.post(uri, data)
return body |
<SYSTEM_TASK:>
Perform a request with a given body to a given endpoint in UpCloud's API.
<END_TASK>
<USER_TASK:>
Description:
def request(self, method, endpoint, body=None, timeout=-1):
"""
Perform a request with a given body to a given endpoint in UpCloud's API.
Handles errors with __error_middleware.
""" |
if method not in set(['GET', 'POST', 'PUT', 'DELETE']):
raise Exception('Invalid/Forbidden HTTP method')
url = '/' + self.api_v + endpoint
headers = {
'Authorization': self.token,
'Content-Type': 'application/json'
}
if body:
json_body_or_None = json.dumps(body)
else:
json_body_or_None = None
call_timeout = timeout if timeout != -1 else self.timeout
APIcall = getattr(requests, method.lower())
res = APIcall('https://api.upcloud.com' + url,
data=json_body_or_None,
headers=headers,
timeout=call_timeout)
if res.text:
res_json = res.json()
else:
res_json = {}
return self.__error_middleware(res, res_json) |
<SYSTEM_TASK:>
Perform a POST request to a given endpoint in UpCloud's API.
<END_TASK>
<USER_TASK:>
Description:
def post_request(self, endpoint, body=None, timeout=-1):
"""
Perform a POST request to a given endpoint in UpCloud's API.
""" |
return self.request('POST', endpoint, body, timeout) |
<SYSTEM_TASK:>
Middleware that raises an exception when HTTP statuscode is an error code.
<END_TASK>
<USER_TASK:>
Description:
def __error_middleware(self, res, res_json):
"""
Middleware that raises an exception when HTTP statuscode is an error code.
""" |
if(res.status_code in [400, 401, 402, 403, 404, 405, 406, 409]):
err_dict = res_json.get('error', {})
raise UpCloudAPIError(error_code=err_dict.get('error_code'),
error_message=err_dict.get('error_message'))
return res_json |
<SYSTEM_TASK:>
Performs a search against the predict endpoint
<END_TASK>
<USER_TASK:>
Description:
def get(self, q, limit=None):
"""
Performs a search against the predict endpoint
:param q: query to be searched for [STRING]
:return: { score: [0|1] }
""" |
uri = '{}/predict?q={}'.format(self.client.remote, q)
self.logger.debug(uri)
body = self.client.get(uri)
return body['score'] |
<SYSTEM_TASK:>
Create IPAddress objects from API response data.
<END_TASK>
<USER_TASK:>
Description:
def _create_ip_address_objs(ip_addresses, cloud_manager):
"""
Create IPAddress objects from API response data.
Also associates CloudManager with the objects.
""" |
# ip-addresses might be provided as a flat array or as a following dict:
# {'ip_addresses': {'ip_address': [...]}} || {'ip_address': [...]}
if 'ip_addresses' in ip_addresses:
ip_addresses = ip_addresses['ip_addresses']
if 'ip_address' in ip_addresses:
ip_addresses = ip_addresses['ip_address']
return [
IPAddress(cloud_manager=cloud_manager, **ip_addr)
for ip_addr in ip_addresses
] |
<SYSTEM_TASK:>
Reset the objects attributes.
<END_TASK>
<USER_TASK:>
Description:
def _reset(self, **kwargs):
"""
Reset the objects attributes.
Accepts servers as either unflattened or flattened UUID strings or Server objects.
""" |
super(Tag, self)._reset(**kwargs)
# backup name for changing it (look: Tag.save)
self._api_name = self.name
# flatten { servers: { server: [] } }
if 'server' in self.servers:
self.servers = kwargs['servers']['server']
# convert UUIDs into server objects
if self.servers and isinstance(self.servers[0], six.string_types):
self.servers = [Server(uuid=server, populated=False) for server in self.servers] |
<SYSTEM_TASK:>
HTTP GET function
<END_TASK>
<USER_TASK:>
Description:
def _get(self, uri, params={}):
"""
HTTP GET function
:param uri: REST endpoint
:param params: optional HTTP params to pass to the endpoint
:return: list of results (usually a list of dicts)
Example:
ret = cli.get('/search', params={ 'q': 'example.org' })
""" |
if not uri.startswith(self.remote):
uri = '{}{}'.format(self.remote, uri)
return self._make_request(uri, params) |
<SYSTEM_TASK:>
HTTP POST function
<END_TASK>
<USER_TASK:>
Description:
def _post(self, uri, data):
"""
HTTP POST function
:param uri: REST endpoint to POST to
:param data: list of dicts to be passed to the endpoint
:return: list of dicts, usually will be a list of objects or id's
Example:
ret = cli.post('/indicators', { 'indicator': 'example.com' })
""" |
if not uri.startswith(self.remote):
uri = '{}/{}'.format(self.remote, uri)
self.logger.debug(uri)
return self._make_request(uri, data=data) |
<SYSTEM_TASK:>
modify_server allows updating the server's updateable_fields.
<END_TASK>
<USER_TASK:>
Description:
def modify_server(self, UUID, **kwargs):
"""
modify_server allows updating the server's updateable_fields.
Note: Server's IP-addresses and Storages are managed by their own add/remove methods.
""" |
body = dict()
body['server'] = {}
for arg in kwargs:
if arg not in Server.updateable_fields:
Exception('{0} is not an updateable field'.format(arg))
body['server'][arg] = kwargs[arg]
res = self.request('PUT', '/server/{0}'.format(UUID), body)
server = res['server']
# Populate subobjects
IPAddresses = IPAddress._create_ip_address_objs(server.pop('ip_addresses'),
cloud_manager=self)
storages = Storage._create_storage_objs(server.pop('storage_devices'),
cloud_manager=self)
return Server(
server,
ip_addresses=IPAddresses,
storage_devices=storages,
populated=True,
cloud_manager=self
) |
<SYSTEM_TASK:>
Converts file in IDX format provided by file-like input into numpy.ndarray
<END_TASK>
<USER_TASK:>
Description:
def _internal_convert(inp):
"""
Converts file in IDX format provided by file-like input into numpy.ndarray
and returns it.
""" |
'''
Converts file in IDX format provided by file-like input into numpy.ndarray
and returns it.
'''
# Read the "magic number" - 4 bytes.
try:
mn = struct.unpack('>BBBB', inp.read(4))
except struct.error:
raise FormatError(struct.error)
# First two bytes are always zero, check it.
if mn[0] != 0 or mn[1] != 0:
msg = ("Incorrect first two bytes of the magic number: " +
"0x{0:02X} 0x{1:02X}".format(mn[0], mn[1]))
raise FormatError(msg)
# 3rd byte is the data type code.
dtype_code = mn[2]
if dtype_code not in _DATA_TYPES_IDX:
msg = "Incorrect data type code: 0x{0:02X}".format(dtype_code)
raise FormatError(msg)
# 4th byte is the number of dimensions.
dims = int(mn[3])
# See possible data types description.
dtype, dtype_s, el_size = _DATA_TYPES_IDX[dtype_code]
# 4-byte integer for length of each dimension.
try:
dims_sizes = struct.unpack('>' + 'I' * dims, inp.read(4 * dims))
except struct.error as e:
raise FormatError('Dims sizes: {0}'.format(e))
# Full length of data.
full_length = reduce(operator.mul, dims_sizes, 1)
# Create a numpy array from the data
try:
result_array = numpy.frombuffer(
inp.read(full_length * el_size),
dtype=numpy.dtype(dtype)
).reshape(dims_sizes)
except ValueError as e:
raise FormatError('Error creating numpy array: {0}'.format(e))
# Check for superfluous data.
if len(inp.read(1)) > 0:
raise FormatError('Superfluous data detected.')
return result_array |
<SYSTEM_TASK:>
Writes the contents of the numpy.ndarray ndarr to bytes in IDX format and
<END_TASK>
<USER_TASK:>
Description:
def convert_to_string(ndarr):
"""
Writes the contents of the numpy.ndarray ndarr to bytes in IDX format and
returns it.
""" |
with contextlib.closing(BytesIO()) as bytesio:
_internal_write(bytesio, ndarr)
return bytesio.getvalue() |
<SYSTEM_TASK:>
Creates a new Feed object
<END_TASK>
<USER_TASK:>
Description:
def new(self, user, name, description=None):
"""
Creates a new Feed object
:param user: feed username
:param name: feed name
:param description: feed description
:return: dict
""" |
uri = self.client.remote + '/users/{0}/feeds'.format(user)
data = {
'feed': {
'name': name,
'description': description
}
}
resp = self.client.post(uri, data)
return resp |
<SYSTEM_TASK:>
Removes a feed
<END_TASK>
<USER_TASK:>
Description:
def delete(self, user, name):
"""
Removes a feed
:param user: feed username
:param name: feed name
:return: true/false
""" |
uri = self.client.remote + '/users/{}/feeds/{}'.format(user, name)
resp = self.client.session.delete(uri)
return resp.status_code |
<SYSTEM_TASK:>
Returns a list of Feeds from the API
<END_TASK>
<USER_TASK:>
Description:
def index(self, user):
"""
Returns a list of Feeds from the API
:param user: feed username
:return: list
Example:
ret = feed.index('csirtgadgets')
""" |
uri = self.client.remote + '/users/{0}/feeds'.format(user)
return self.client.get(uri) |
<SYSTEM_TASK:>
Returns a specific Feed from the API
<END_TASK>
<USER_TASK:>
Description:
def show(self, user, name, limit=None, lasttime=None):
"""
Returns a specific Feed from the API
:param user: feed username
:param name: feed name
:param limit: limit the results
:param lasttime: only show >= lasttime
:return: dict
Example:
ret = feed.show('csirtgadgets', 'port-scanners', limit=5)
""" |
uri = self.client.remote + '/users/{0}/feeds/{1}'.format(user, name)
return self.client.get(uri, params={'limit': limit, 'lasttime': lasttime}) |
<SYSTEM_TASK:>
Create a row for the schedule table.
<END_TASK>
<USER_TASK:>
Description:
def make_schedule_row(schedule_day, slot, seen_items):
"""Create a row for the schedule table.""" |
row = ScheduleRow(schedule_day, slot)
skip = {}
expanding = {}
all_items = list(slot.scheduleitem_set
.select_related('talk', 'page', 'venue')
.all())
for item in all_items:
if item in seen_items:
# Inc rowspan
seen_items[item]['rowspan'] += 1
# Note that we need to skip this during colspan checks
skip[item.venue] = seen_items[item]
continue
scheditem = {'item': item, 'rowspan': 1, 'colspan': 1}
row.items[item.venue] = scheditem
seen_items[item] = scheditem
if item.expand:
expanding[item.venue] = []
empty = []
expanding_right = None
skipping = 0
skip_item = None
for venue in schedule_day.venues:
if venue in skip:
# We need to skip all the venues this item spans over
skipping = 1
skip_item = skip[venue]
continue
if venue in expanding:
item = row.items[venue]
for empty_venue in empty:
row.items.pop(empty_venue)
item['colspan'] += 1
empty = []
expanding_right = item
elif venue in row.items:
empty = []
expanding_right = None
elif expanding_right:
expanding_right['colspan'] += 1
elif skipping > 0 and skipping < skip_item['colspan']:
skipping += 1
else:
skipping = 0
empty.append(venue)
row.items[venue] = {'item': None, 'rowspan': 1, 'colspan': 1}
return row |
<SYSTEM_TASK:>
Helper function which creates an ordered list of schedule days
<END_TASK>
<USER_TASK:>
Description:
def generate_schedule(today=None):
"""Helper function which creates an ordered list of schedule days""" |
# We create a list of slots and schedule items
schedule_days = {}
seen_items = {}
for slot in Slot.objects.all().order_by('end_time', 'start_time', 'day'):
day = slot.get_day()
if today and day != today:
# Restrict ourselves to only today
continue
schedule_day = schedule_days.get(day)
if schedule_day is None:
schedule_day = schedule_days[day] = ScheduleDay(day)
row = make_schedule_row(schedule_day, slot, seen_items)
schedule_day.rows.append(row)
return sorted(schedule_days.values(), key=lambda x: x.day.date) |
<SYSTEM_TASK:>
Create a iCal file from the schedule
<END_TASK>
<USER_TASK:>
Description:
def get(self, request):
"""Create a iCal file from the schedule""" |
# Heavily inspired by https://djangosnippets.org/snippets/2223/ and
# the icalendar documentation
calendar = Calendar()
site = get_current_site(request)
calendar.add('prodid', '-//%s Schedule//%s//' % (site.name, site.domain))
calendar.add('version', '2.0')
# Since we don't need to format anything here, we can just use a list
# of schedule items
for item in ScheduleItem.objects.all():
sched_event = Event()
sched_event.add('dtstamp', item.last_updated)
sched_event.add('summary', item.get_title())
sched_event.add('location', item.venue.name)
sched_event.add('dtstart', item.get_start_datetime())
sched_event.add('duration', datetime.timedelta(minutes=item.get_duration_minutes()))
sched_event.add('class', 'PUBLIC')
sched_event.add('uid', '%s@%s' % (item.pk, site.domain))
calendar.add_component(sched_event)
response = HttpResponse(calendar.to_ical(), content_type="text/calendar")
response['Content-Disposition'] = 'attachment; filename=schedule.ics'
return response |
<SYSTEM_TASK:>
Override django-bakery to skip pages marked exclude_from_static
<END_TASK>
<USER_TASK:>
Description:
def build_object(self, obj):
"""Override django-bakery to skip pages marked exclude_from_static""" |
if not obj.exclude_from_static:
super(ShowPage, self).build_object(obj) |
<SYSTEM_TASK:>
Override django-bakery to skip talks that raise 403
<END_TASK>
<USER_TASK:>
Description:
def build_object(self, obj):
"""Override django-bakery to skip talks that raise 403""" |
try:
super(TalkView, self).build_object(obj)
except PermissionDenied:
# We cleanup the directory created
self.unbuild_object(obj) |
<SYSTEM_TASK:>
Override delete to only withdraw
<END_TASK>
<USER_TASK:>
Description:
def delete(self, request, *args, **kwargs):
"""Override delete to only withdraw""" |
talk = self.get_object()
talk.status = WITHDRAWN
talk.save()
revisions.set_user(self.request.user)
revisions.set_comment("Talk Withdrawn")
return HttpResponseRedirect(self.success_url) |
<SYSTEM_TASK:>
A decorator that applies an ordering to the QuerySet returned by a
<END_TASK>
<USER_TASK:>
Description:
def order_results_by(*fields):
"""A decorator that applies an ordering to the QuerySet returned by a
function.
""" |
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kw):
result = f(*args, **kw)
return result.order_by(*fields)
return wrapper
return decorator |
<SYSTEM_TASK:>
A decorator for caching the result of a function.
<END_TASK>
<USER_TASK:>
Description:
def cache_result(cache_key, timeout):
"""A decorator for caching the result of a function.""" |
def decorator(f):
cache_name = settings.WAFER_CACHE
@functools.wraps(f)
def wrapper(*args, **kw):
cache = caches[cache_name]
result = cache.get(cache_key)
if result is None:
result = f(*args, **kw)
cache.set(cache_key, result, timeout)
return result
def invalidate():
cache = caches[cache_name]
cache.delete(cache_key)
wrapper.invalidate = invalidate
return wrapper
return decorator |
<SYSTEM_TASK:>
We save all the schedule items associated with this slot, so
<END_TASK>
<USER_TASK:>
Description:
def update_schedule_items(*args, **kw):
"""We save all the schedule items associated with this slot, so
the last_update time is updated to reflect any changes to the
timing of the slots""" |
slot = kw.pop('instance', None)
if not slot:
return
for item in slot.scheduleitem_set.all():
item.save(update_fields=['last_updated'])
# We also need to update the next slot, in case we changed it's
# times as well
next_slot = slot.slot_set.all()
if next_slot.count():
# From the way we structure the slot tree, we know that
# there's only 1 next slot that could have changed.
for item in next_slot[0].scheduleitem_set.all():
item.save(update_fields=['last_updated']) |
<SYSTEM_TASK:>
Create the difference between the current revision and a previous version
<END_TASK>
<USER_TASK:>
Description:
def make_diff(current, revision):
"""Create the difference between the current revision and a previous version""" |
the_diff = []
dmp = diff_match_patch()
for field in (set(current.field_dict.keys()) | set(revision.field_dict.keys())):
# These exclusions really should be configurable
if field == 'id' or field.endswith('_rendered'):
continue
# KeyError's may happen if the database structure changes
# between the creation of revisions. This isn't ideal,
# but should not be a fatal error.
# Log this?
missing_field = False
try:
cur_val = current.field_dict[field] or ""
except KeyError:
cur_val = "No such field in latest version\n"
missing_field = True
try:
old_val = revision.field_dict[field] or ""
except KeyError:
old_val = "No such field in old version\n"
missing_field = True
if missing_field:
# Ensure that the complete texts are marked as changed
# so new entries containing any of the marker words
# don't show up as differences
diffs = [(dmp.DIFF_DELETE, old_val), (dmp.DIFF_INSERT, cur_val)]
patch = dmp.diff_prettyHtml(diffs)
elif isinstance(cur_val, Markup):
# we roll our own diff here, so we can compare of the raw
# markdown, rather than the rendered result.
if cur_val.raw == old_val.raw:
continue
diffs = dmp.diff_main(old_val.raw, cur_val.raw)
patch = dmp.diff_prettyHtml(diffs)
elif cur_val == old_val:
continue
else:
# Compare the actual field values
diffs = dmp.diff_main(force_text(old_val), force_text(cur_val))
patch = dmp.diff_prettyHtml(diffs)
the_diff.append((field, patch))
the_diff.sort()
return the_diff |
<SYSTEM_TASK:>
Actually compare two versions.
<END_TASK>
<USER_TASK:>
Description:
def compare_view(self, request, object_id, version_id, extra_context=None):
"""Actually compare two versions.""" |
opts = self.model._meta
object_id = unquote(object_id)
# get_for_object's ordering means this is always the latest revision.
# The reversion we want to compare to
current = Version.objects.get_for_object_reference(self.model, object_id)[0]
revision = Version.objects.get_for_object_reference(self.model, object_id).filter(id=version_id)[0]
the_diff = make_diff(current, revision)
context = {
"title": _("Comparing current %(model)s with revision created %(date)s") % {
'model': current,
'date' : get_date(revision),
},
"opts": opts,
"compare_list_url": reverse("%s:%s_%s_comparelist" % (self.admin_site.name, opts.app_label, opts.model_name),
args=(quote(object_id),)),
"diff_list": the_diff,
}
extra_context = extra_context or {}
context.update(extra_context)
return render(request, self.compare_template or self._get_template_list("compare.html"),
context) |
<SYSTEM_TASK:>
Allow selecting versions to compare.
<END_TASK>
<USER_TASK:>
Description:
def comparelist_view(self, request, object_id, extra_context=None):
"""Allow selecting versions to compare.""" |
opts = self.model._meta
object_id = unquote(object_id)
current = get_object_or_404(self.model, pk=object_id)
# As done by reversion's history_view
action_list = [
{
"revision": version.revision,
"url": reverse("%s:%s_%s_compare" % (self.admin_site.name, opts.app_label, opts.model_name), args=(quote(version.object_id), version.id)),
} for version in self._reversion_order_version_queryset(Version.objects.get_for_object_reference(
self.model,
object_id).select_related("revision__user"))]
context = {"action_list": action_list,
"opts": opts,
"object_id": quote(object_id),
"original": current,
}
extra_context = extra_context or {}
context.update(extra_context)
return render(request, self.compare_list_template or self._get_template_list("compare_list.html"),
context) |
<SYSTEM_TASK:>
Given a directory name, return the Page representing it in the menu
<END_TASK>
<USER_TASK:>
Description:
def get_parent(self, directory):
"""
Given a directory name, return the Page representing it in the menu
heirarchy.
""" |
assert settings.PAGE_DIR.startswith('/')
assert settings.PAGE_DIR.endswith('/')
parents = directory[len(settings.PAGE_DIR):]
page = None
if parents:
for slug in parents.split('/'):
page = Page.objects.get(parent=page, slug=slug)
return page |
<SYSTEM_TASK:>
Authorizes Coursera's OAuth2 client for using coursera.org API servers for
<END_TASK>
<USER_TASK:>
Description:
def authorize(args):
"""
Authorizes Coursera's OAuth2 client for using coursera.org API servers for
a specific application
""" |
oauth2_instance = oauth2.build_oauth2(args.app, args)
oauth2_instance.build_authorizer()
logging.info('Application "%s" authorized!', args.app) |
<SYSTEM_TASK:>
Checks courseraoauth2client's connectivity to the coursera.org API servers
<END_TASK>
<USER_TASK:>
Description:
def check_auth(args):
"""
Checks courseraoauth2client's connectivity to the coursera.org API servers
for a specific application
""" |
oauth2_instance = oauth2.build_oauth2(args.app, args)
auth = oauth2_instance.build_authorizer()
my_profile_url = (
'https://api.coursera.org/api/externalBasicProfiles.v1?'
'q=me&fields=name'
)
r = requests.get(my_profile_url, auth=auth)
if r.status_code != 200:
logging.error('Received response code %s from the basic profile API.',
r.status_code)
logging.debug('Response body:\n%s', r.text)
sys.exit(1)
try:
external_id = r.json()['elements'][0]['id']
except:
logging.error(
'Could not parse the external id out of the response body %s',
r.text)
external_id = None
try:
name = r.json()['elements'][0]['name']
except:
logging.error(
'Could not parse the name out of the response body %s',
r.text)
name = None
if not args.quiet > 0:
print 'Name: %s' % name
print 'External ID: %s' % external_id
if name is None or external_id is None:
sys.exit(1) |
<SYSTEM_TASK:>
Warps the length scale with a piecewise quintic "bucket" shape.
<END_TASK>
<USER_TASK:>
Description:
def quintic_bucket_warp(x, n, l1, l2, l3, x0, w1, w2, w3):
"""Warps the length scale with a piecewise quintic "bucket" shape.
Parameters
----------
x : float or array-like of float
Locations to evaluate length scale at.
n : non-negative int
Derivative order to evaluate. Only first derivatives are supported.
l1 : positive float
Length scale to the left of the bucket.
l2 : positive float
Length scale in the bucket.
l3 : positive float
Length scale to the right of the bucket.
x0 : float
Location of the center of the bucket.
w1 : positive float
Width of the left side quintic section.
w2 : positive float
Width of the bucket.
w3 : positive float
Width of the right side quintic section.
""" |
x1 = x0 - w2 / 2.0 - w1 / 2.0
x2 = x0 + w2 / 2.0 + w3 / 2.0
x_shift_1 = 2.0 * (x - x1) / w1
x_shift_3 = 2.0 * (x - x2) / w3
if n == 0:
return (
l1 * (x <= (x1 - w1 / 2.0)) + (
0.5 * (l2 - l1) * (
3.0 / 8.0 * x_shift_1**5 -
5.0 / 4.0 * x_shift_1**3 +
15.0 / 8.0 * x_shift_1
) + (l1 + l2) / 2.0
) * ((x > (x1 - w1 / 2.0)) & (x < (x1 + w1 / 2.0))) +
l2 * ((x >= (x1 + w1 / 2.0)) & (x <= x2 - w3 / 2.0)) + (
0.5 * (l3 - l2) * (
3.0 / 8.0 * x_shift_3**5 -
5.0 / 4.0 * x_shift_3**3 +
15.0 / 8.0 * x_shift_3
) + (l2 + l3) / 2.0
) * ((x > (x2 - w3 / 2.0)) & (x < (x2 + w3 / 2.0))) +
l3 * (x >= (x2 + w3 / 2.0))
)
elif n == 1:
return (
(
0.5 * (l2 - l1) * (
5.0 * 3.0 / 8.0 * x_shift_1**4 -
3.0 * 5.0 / 4.0 * x_shift_1**2 +
15.0 / 8.0
) / w1
) * ((x > (x1 - w1 / 2.0)) & (x < (x1 + w1 / 2.0))) + (
0.5 * (l3 - l2) * (
5.0 * 3.0 / 8.0 * x_shift_3**4 -
3.0 * 5.0 / 4.0 * x_shift_3**2 +
15.0 / 8.0
) / w3
) * ((x > (x2 - w3 / 2.0)) & (x < (x2 + w3 / 2.0)))
)
else:
raise NotImplementedError("Only up to first derivatives are supported!") |
<SYSTEM_TASK:>
Create a user, if the provided `user` is None, from the parameters.
<END_TASK>
<USER_TASK:>
Description:
def sso(user, desired_username, name, email, profile_fields=None):
"""
Create a user, if the provided `user` is None, from the parameters.
Then log the user in, and return it.
""" |
if not user:
if not settings.REGISTRATION_OPEN:
raise SSOError('Account registration is closed')
user = _create_desired_user(desired_username)
_configure_user(user, name, email, profile_fields)
if not user.is_active:
raise SSOError('Account disabled')
# login() expects the logging in backend to be set on the user.
# We are bypassing login, so fake it.
user.backend = settings.AUTHENTICATION_BACKENDS[0]
return user |
<SYSTEM_TASK:>
Post a debit of 'amount' and a credit of -amount against this account and credit_account respectively.
<END_TASK>
<USER_TASK:>
Description:
def debit(self, amount, credit_account, description, debit_memo="", credit_memo="", datetime=None):
""" Post a debit of 'amount' and a credit of -amount against this account and credit_account respectively.
note amount must be non-negative.
""" |
assert amount >= 0
return self.post(amount, credit_account, description, self_memo=debit_memo, other_memo=credit_memo, datetime=datetime) |
<SYSTEM_TASK:>
Post a credit of 'amount' and a debit of -amount against this account and credit_account respectively.
<END_TASK>
<USER_TASK:>
Description:
def credit(self, amount, debit_account, description, debit_memo="", credit_memo="", datetime=None):
""" Post a credit of 'amount' and a debit of -amount against this account and credit_account respectively.
note amount must be non-negative.
""" |
assert amount >= 0
return self.post(-amount, debit_account, description, self_memo=credit_memo, other_memo=debit_memo, datetime=datetime) |
<SYSTEM_TASK:>
Post a transaction of 'amount' against this account and the negative amount against 'other_account'.
<END_TASK>
<USER_TASK:>
Description:
def post(self, amount, other_account, description, self_memo="", other_memo="", datetime=None):
""" Post a transaction of 'amount' against this account and the negative amount against 'other_account'.
This will show as a debit or credit against this account when amount > 0 or amount < 0 respectively.
""" |
#Note: debits are always positive, credits are always negative. They should be negated before displaying
#(expense and liability?) accounts
tx = self._new_transaction()
if datetime:
tx.t_stamp = datetime
#else now()
tx.description = description
tx.save()
a1 = self._make_ae(self._DEBIT_IN_DB() * amount, self_memo, tx)
a1.save()
a2 = other_account._make_ae(-self._DEBIT_IN_DB() * amount, other_memo, tx)
a2.save()
return (a1, a2) |
<SYSTEM_TASK:>
Returns a Totals object containing the sum of all debits, credits
<END_TASK>
<USER_TASK:>
Description:
def totals(self, start=None, end=None):
"""Returns a Totals object containing the sum of all debits, credits
and net change over the period of time from start to end.
'start' is inclusive, 'end' is exclusive
""" |
qs = self._entries_range(start=start, end=end)
qs_positive = qs.filter(amount__gt=Decimal("0.00")).all().aggregate(Sum('amount'))
qs_negative = qs.filter(amount__lt=Decimal("0.00")).all().aggregate(Sum('amount'))
#Is there a cleaner way of saying this? Should the sum of 0 things be None?
positives = qs_positive['amount__sum'] if qs_positive['amount__sum'] is not None else 0
negatives = -qs_negative['amount__sum'] if qs_negative['amount__sum'] is not None else 0
if self._DEBIT_IN_DB() > 0:
debits = positives
credits = negatives
else:
debits = negatives
credits = positives
net = debits-credits
if self._positive_credit():
net = -net
return self.Totals(credits, debits, net) |
<SYSTEM_TASK:>
Returns a list of entries for this account.
<END_TASK>
<USER_TASK:>
Description:
def ledger(self, start=None, end=None):
"""Returns a list of entries for this account.
Ledger returns a sequence of LedgerEntry's matching the criteria
in chronological order. The returned sequence can be boolean-tested
(ie. test that nothing was returned).
If 'start' is given, only entries on or after that datetime are
returned. 'start' must be given with a timezone.
If 'end' is given, only entries before that datetime are
returned. 'end' must be given with a timezone.
""" |
DEBIT_IN_DB = self._DEBIT_IN_DB()
flip = 1
if self._positive_credit():
flip *= -1
qs = self._entries_range(start=start, end=end)
qs = qs.order_by("transaction__t_stamp", "transaction__tid")
balance = Decimal("0.00")
if start:
balance = self.balance(start)
if not qs:
return []
#helper is a hack so the caller can test for no entries.
def helper(balance_in):
balance = balance_in
for e in qs.all():
amount = e.amount * DEBIT_IN_DB
o_balance = balance
balance += flip * amount
yield LedgerEntry(amount, e, o_balance, balance)
return helper(balance) |
<SYSTEM_TASK:>
Find any slots that overlap
<END_TASK>
<USER_TASK:>
Description:
def find_overlapping_slots(all_slots):
"""Find any slots that overlap""" |
overlaps = set([])
for slot in all_slots:
# Because slots are ordered, we can be more efficient than this
# N^2 loop, but this is simple and, since the number of slots
# should be low, this should be "fast enough"
start = slot.get_start_time()
end = slot.end_time
for other_slot in all_slots:
if other_slot.pk == slot.pk:
continue
if other_slot.get_day() != slot.get_day():
# different days, can't overlap
continue
# Overlap if the start_time or end_time is bounded by our times
# start_time <= other.start_time < end_time
# or
# start_time < other.end_time <= end_time
other_start = other_slot.get_start_time()
other_end = other_slot.end_time
if start <= other_start and other_start < end:
overlaps.add(slot)
overlaps.add(other_slot)
elif start < other_end and other_end <= end:
overlaps.add(slot)
overlaps.add(other_slot)
return overlaps |
<SYSTEM_TASK:>
Find any items that have slots that aren't contiguous
<END_TASK>
<USER_TASK:>
Description:
def find_non_contiguous(all_items):
"""Find any items that have slots that aren't contiguous""" |
non_contiguous = []
for item in all_items:
if item.slots.count() < 2:
# No point in checking
continue
last_slot = None
for slot in item.slots.all().order_by('end_time'):
if last_slot:
if last_slot.end_time != slot.get_start_time():
non_contiguous.append(item)
break
last_slot = slot
return non_contiguous |
<SYSTEM_TASK:>
Find venues assigned slots that aren't on the allowed list
<END_TASK>
<USER_TASK:>
Description:
def find_invalid_venues(all_items):
"""Find venues assigned slots that aren't on the allowed list
of days.""" |
venues = {}
for item in all_items:
valid = False
item_days = list(item.venue.days.all())
for slot in item.slots.all():
for day in item_days:
if day == slot.get_day():
valid = True
break
if not valid:
venues.setdefault(item.venue, [])
venues[item.venue].append(item)
return venues.items() |
<SYSTEM_TASK:>
Helper routine to easily test if the schedule is valid
<END_TASK>
<USER_TASK:>
Description:
def check_schedule():
"""Helper routine to easily test if the schedule is valid""" |
all_items = prefetch_schedule_items()
for validator, _type, _msg in SCHEDULE_ITEM_VALIDATORS:
if validator(all_items):
return False
all_slots = prefetch_slots()
for validator, _type, _msg in SLOT_VALIDATORS:
if validator(all_slots):
return False
return True |
<SYSTEM_TASK:>
Helper routine to report issues with the schedule
<END_TASK>
<USER_TASK:>
Description:
def validate_schedule():
"""Helper routine to report issues with the schedule""" |
all_items = prefetch_schedule_items()
errors = []
for validator, _type, msg in SCHEDULE_ITEM_VALIDATORS:
if validator(all_items):
errors.append(msg)
all_slots = prefetch_slots()
for validator, _type, msg in SLOT_VALIDATORS:
if validator(all_slots):
errors.append(msg)
return errors |
<SYSTEM_TASK:>
Change the form depending on whether we're adding or
<END_TASK>
<USER_TASK:>
Description:
def get_form(self, request, obj=None, **kwargs):
"""Change the form depending on whether we're adding or
editing the slot.""" |
if obj is None:
# Adding a new Slot
kwargs['form'] = SlotAdminAddForm
return super(SlotAdmin, self).get_form(request, obj, **kwargs) |
<SYSTEM_TASK:>
Return the menus from the cache or generate them if needed.
<END_TASK>
<USER_TASK:>
Description:
def get_cached_menus():
"""Return the menus from the cache or generate them if needed.""" |
items = cache.get(CACHE_KEY)
if items is None:
menu = generate_menu()
cache.set(CACHE_KEY, menu.items)
else:
menu = Menu(items)
return menu |
<SYSTEM_TASK:>
If argument is not a string, return it.
<END_TASK>
<USER_TASK:>
Description:
def maybe_obj(str_or_obj):
"""If argument is not a string, return it.
Otherwise import the dotted name and return that.
""" |
if not isinstance(str_or_obj, six.string_types):
return str_or_obj
parts = str_or_obj.split(".")
mod, modname = None, None
for p in parts:
modname = p if modname is None else "%s.%s" % (modname, p)
try:
mod = __import__(modname)
except ImportError:
if mod is None:
raise
break
obj = mod
for p in parts[1:]:
obj = getattr(obj, p)
return obj |
<SYSTEM_TASK:>
Some types get serialized to JSON, as strings.
<END_TASK>
<USER_TASK:>
Description:
def deserialize_by_field(value, field):
"""
Some types get serialized to JSON, as strings.
If we know what they are supposed to be, we can deserialize them
""" |
if isinstance(field, forms.DateTimeField):
value = parse_datetime(value)
elif isinstance(field, forms.DateField):
value = parse_date(value)
elif isinstance(field, forms.TimeField):
value = parse_time(value)
return value |
<SYSTEM_TASK:>
Returns the corresponding url from the sponsors images
<END_TASK>
<USER_TASK:>
Description:
def sponsor_image_url(sponsor, name):
"""Returns the corresponding url from the sponsors images""" |
if sponsor.files.filter(name=name).exists():
# We avoid worrying about multiple matches by always
# returning the first one.
return sponsor.files.filter(name=name).first().item.url
return '' |
<SYSTEM_TASK:>
returns the corresponding url from the tagged image list.
<END_TASK>
<USER_TASK:>
Description:
def sponsor_tagged_image(sponsor, tag):
"""returns the corresponding url from the tagged image list.""" |
if sponsor.files.filter(tag_name=tag).exists():
return sponsor.files.filter(tag_name=tag).first().tagged_file.item.url
return '' |
<SYSTEM_TASK:>
Check to see if the currently logged in user belongs to a specific
<END_TASK>
<USER_TASK:>
Description:
def ifusergroup(parser, token):
""" Check to see if the currently logged in user belongs to a specific
group. Requires the Django authentication contrib app and middleware.
Usage: {% ifusergroup Admins %} ... {% endifusergroup %}, or
{% ifusergroup Admins Clients Sellers %} ... {% else %} ... {% endifusergroup %}
""" |
try:
tokensp = token.split_contents()
groups = []
groups+=tokensp[1:]
except ValueError:
raise template.TemplateSyntaxError("Tag 'ifusergroup' requires at least 1 argument.")
nodelist_true = parser.parse(('else', 'endifusergroup'))
token = parser.next_token()
if token.contents == 'else':
nodelist_false = parser.parse(tuple(['endifusergroup',]))
parser.delete_first_token()
else:
nodelist_false = NodeList()
return GroupCheckNode(groups, nodelist_true, nodelist_false) |
<SYSTEM_TASK:>
Called if all forms are valid. Creates a Recipe instance along with associated Ingredients and Instructions and then redirects to a success page.
<END_TASK>
<USER_TASK:>
Description:
def form_valid(self, form, forms):
"""
Called if all forms are valid. Creates a Recipe instance along with associated Ingredients and Instructions and then redirects to a success page.
""" |
if self.object:
form.save()
for (formobj, linkerfield) in forms:
if form != formobj:
formobj.save()
else:
self.object = form.save()
for (formobj, linkerfield) in forms:
if form != formobj:
setattr(formobj.instance, linkerfield, self.object)
formobj.save()
return HttpResponseRedirect(self.get_success_url()) |
<SYSTEM_TASK:>
Called if a form is invalid. Re-renders the context data with the data-filled forms and errors.
<END_TASK>
<USER_TASK:>
Description:
def form_invalid(self, form, forms, open_tabs, position_form_default):
"""
Called if a form is invalid. Re-renders the context data with the data-filled forms and errors.
""" |
# return self.render_to_response( self.get_context_data( form = form, forms = forms ) )
return self.render_to_response(self.get_context_data(form=form, forms=forms, open_tabs=open_tabs, position_form_default=position_form_default)) |
<SYSTEM_TASK:>
Make a plot of a mean curve with uncertainty envelopes.
<END_TASK>
<USER_TASK:>
Description:
def univariate_envelope_plot(x, mean, std, ax=None, base_alpha=0.375, envelopes=[1, 3], lb=None, ub=None, expansion=10, **kwargs):
"""Make a plot of a mean curve with uncertainty envelopes.
""" |
if ax is None:
f = plt.figure()
ax = f.add_subplot(1, 1, 1)
elif ax == 'gca':
ax = plt.gca()
mean = scipy.asarray(mean, dtype=float).copy()
std = scipy.asarray(std, dtype=float).copy()
# Truncate the data so matplotlib doesn't die:
if lb is not None and ub is not None and expansion != 1.0:
expansion *= ub - lb
ub = ub + expansion
lb = lb - expansion
if ub is not None:
mean[mean > ub] = ub
if lb is not None:
mean[mean < lb] = lb
l = ax.plot(x, mean, **kwargs)
color = plt.getp(l[0], 'color')
e = []
for i in envelopes:
lower = mean - i * std
upper = mean + i * std
if ub is not None:
lower[lower > ub] = ub
upper[upper > ub] = ub
if lb is not None:
lower[lower < lb] = lb
upper[upper < lb] = lb
e.append(ax.fill_between(x, lower, upper, facecolor=color, alpha=base_alpha / i))
return (l, e) |
<SYSTEM_TASK:>
Gains token from secure backend service.
<END_TASK>
<USER_TASK:>
Description:
def fetch_token(self):
"""Gains token from secure backend service.
:return: Token formatted for Cocaine protocol header.
""" |
grant_type = 'client_credentials'
channel = yield self._tvm.ticket_full(
self._client_id, self._client_secret, grant_type, {})
ticket = yield channel.rx.get()
raise gen.Return(self._make_token(ticket)) |
<SYSTEM_TASK:>
Extracting information from an albacore summary file.
<END_TASK>
<USER_TASK:>
Description:
def process_summary(summaryfile, **kwargs):
"""Extracting information from an albacore summary file.
Only reads which have a >0 length are returned.
The fields below may or may not exist, depending on the type of sequencing performed.
Fields 1-14 are for 1D sequencing.
Fields 1-23 for 2D sequencing.
Fields 24-27, 2-5, 22-23 for 1D^2 (1D2) sequencing
Fields 28-38 for barcoded workflows
1 filename
2 read_id
3 run_id
4 channel
5 start_time
6 duration
7 num_events
8 template_start
9 num_events_template
10 template_duration
11 num_called_template
12 sequence_length_template
13 mean_qscore_template
14 strand_score_template
15 complement_start
16 num_events_complement
17 complement_duration
18 num_called_complement
19 sequence_length_complement
20 mean_qscore_complement
21 strand_score_complement
22 sequence_length_2d
23 mean_qscore_2d
24 filename1
25 filename2
26 read_id1
27 read_id2
28 barcode_arrangement
29 barcode_score
30 barcode_full_arrangement
31 front_score
32 rear_score
33 front_begin_index
34 front_foundseq_length
35 rear_end_index
36 rear_foundseq_length
37 kit
38 variant
""" |
logging.info("Nanoget: Collecting metrics from summary file {} for {} sequencing".format(
summaryfile, kwargs["readtype"]))
ut.check_existance(summaryfile)
if kwargs["readtype"] == "1D":
cols = ["read_id", "run_id", "channel", "start_time", "duration",
"sequence_length_template", "mean_qscore_template"]
elif kwargs["readtype"] in ["2D", "1D2"]:
cols = ["read_id", "run_id", "channel", "start_time", "duration",
"sequence_length_2d", "mean_qscore_2d"]
if kwargs["barcoded"]:
cols.append("barcode_arrangement")
logging.info("Nanoget: Extracting metrics per barcode.")
try:
datadf = pd.read_csv(
filepath_or_buffer=summaryfile,
sep="\t",
usecols=cols,
)
except ValueError:
logging.error("Nanoget: did not find expected columns in summary file {}:\n {}".format(
summaryfile, ', '.join(cols)))
sys.exit("ERROR: expected columns in summary file {} not found:\n {}".format(
summaryfile, ', '.join(cols)))
if kwargs["barcoded"]:
datadf.columns = ["readIDs", "runIDs", "channelIDs", "time", "duration",
"lengths", "quals", "barcode"]
else:
datadf.columns = ["readIDs", "runIDs", "channelIDs", "time", "duration", "lengths", "quals"]
logging.info("Nanoget: Finished collecting statistics from summary file {}".format(summaryfile))
return ut.reduce_memory_usage(datadf.loc[datadf["lengths"] != 0].copy()) |
<SYSTEM_TASK:>
Check if bam file is valid.
<END_TASK>
<USER_TASK:>
Description:
def check_bam(bam, samtype="bam"):
"""Check if bam file is valid.
Bam file should:
- exists
- has an index (create if necessary)
- is sorted by coordinate
- has at least one mapped read
""" |
ut.check_existance(bam)
samfile = pysam.AlignmentFile(bam, "rb")
if not samfile.has_index():
pysam.index(bam)
samfile = pysam.AlignmentFile(bam, "rb") # Need to reload the samfile after creating index
logging.info("Nanoget: No index for bam file could be found, created index.")
if not samfile.header['HD']['SO'] == 'coordinate':
logging.error("Nanoget: Bam file {} not sorted by coordinate!.".format(bam))
sys.exit("Please use a bam file sorted by coordinate.")
if samtype == "bam":
logging.info("Nanoget: Bam file {} contains {} mapped and {} unmapped reads.".format(
bam, samfile.mapped, samfile.unmapped))
if samfile.mapped == 0:
logging.error("Nanoget: Bam file {} does not contain aligned reads.".format(bam))
sys.exit("FATAL: not a single read was mapped in bam file {}".format(bam))
return samfile |
<SYSTEM_TASK:>
Extracting metrics from unaligned bam format
<END_TASK>
<USER_TASK:>
Description:
def process_ubam(bam, **kwargs):
"""Extracting metrics from unaligned bam format
Extracting lengths
""" |
logging.info("Nanoget: Starting to collect statistics from ubam file {}.".format(bam))
samfile = pysam.AlignmentFile(bam, "rb", check_sq=False)
if not samfile.has_index():
pysam.index(bam)
# Need to reload the samfile after creating index
samfile = pysam.AlignmentFile(bam, "rb")
logging.info("Nanoget: No index for bam file could be found, created index.")
datadf = pd.DataFrame(
data=[(read.query_name, nanomath.ave_qual(read.query_qualities), read.query_length)
for read in samfile.fetch(until_eof=True)],
columns=["readIDs", "quals", "lengths"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: ubam {} contains {} reads.".format(
bam, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf) |
<SYSTEM_TASK:>
Combines metrics from bam after extraction.
<END_TASK>
<USER_TASK:>
Description:
def process_bam(bam, **kwargs):
"""Combines metrics from bam after extraction.
Processing function: calls pool of worker functions
to extract from a bam file the following metrics:
-lengths
-aligned lengths
-qualities
-aligned qualities
-mapping qualities
-edit distances to the reference genome scaled by read length
Returned in a pandas DataFrame
""" |
logging.info("Nanoget: Starting to collect statistics from bam file {}.".format(bam))
samfile = check_bam(bam)
chromosomes = samfile.references
params = zip([bam] * len(chromosomes), chromosomes)
with cfutures.ProcessPoolExecutor() as executor:
datadf = pd.DataFrame(
data=[res for sublist in executor.map(extract_from_bam, params) for res in sublist],
columns=["readIDs", "quals", "aligned_quals", "lengths",
"aligned_lengths", "mapQ", "percentIdentity"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: bam {} contains {} primary alignments.".format(
bam, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf) |
<SYSTEM_TASK:>
Extracts metrics from bam.
<END_TASK>
<USER_TASK:>
Description:
def extract_from_bam(params):
"""Extracts metrics from bam.
Worker function per chromosome
loop over a bam file and create list with tuples containing metrics:
-qualities
-aligned qualities
-lengths
-aligned lengths
-mapping qualities
-edit distances to the reference genome scaled by read length
""" |
bam, chromosome = params
samfile = pysam.AlignmentFile(bam, "rb")
return [
(read.query_name,
nanomath.ave_qual(read.query_qualities),
nanomath.ave_qual(read.query_alignment_qualities),
read.query_length,
read.query_alignment_length,
read.mapping_quality,
get_pID(read))
for read in samfile.fetch(reference=chromosome, multiple_iterators=True)
if not read.is_secondary] |
<SYSTEM_TASK:>
Return the percent identity of a read.
<END_TASK>
<USER_TASK:>
Description:
def get_pID(read):
"""Return the percent identity of a read.
based on the NM tag if present,
if not calculate from MD tag and CIGAR string
read.query_alignment_length can be zero in the case of ultra long reads aligned with minimap2 -L
""" |
try:
return 100 * (1 - read.get_tag("NM") / read.query_alignment_length)
except KeyError:
try:
return 100 * (1 - (parse_MD(read.get_tag("MD")) + parse_CIGAR(read.cigartuples))
/ read.query_alignment_length)
except KeyError:
return None
except ZeroDivisionError:
return None |
<SYSTEM_TASK:>
Return handles from compressed files according to extension.
<END_TASK>
<USER_TASK:>
Description:
def handle_compressed_input(inputfq, file_type="fastq"):
"""Return handles from compressed files according to extension.
Check for which fastq input is presented and open a handle accordingly
Can read from compressed files (gz, bz2, bgz) or uncompressed
Relies on file extensions to recognize compression
""" |
ut.check_existance(inputfq)
if inputfq.endswith(('.gz', 'bgz')):
import gzip
logging.info("Nanoget: Decompressing gzipped {} {}".format(file_type, inputfq))
return gzip.open(inputfq, 'rt')
elif inputfq.endswith('.bz2'):
import bz2
logging.info("Nanoget: Decompressing bz2 compressed {} {}".format(file_type, inputfq))
return bz2.open(inputfq, 'rt')
elif inputfq.endswith(('.fastq', '.fq', 'fasta', '.fa', '.fas')):
return open(inputfq, 'r')
else:
logging.error("INPUT ERROR: Unrecognized file extension {}".format(inputfq))
sys.exit('INPUT ERROR:\nUnrecognized file extension in {}\n'
'Supported are gz, bz2, bgz, fastq, fq, fasta, fa and fas'.format(inputfq)) |
<SYSTEM_TASK:>
Combine metrics extracted from a fasta file.
<END_TASK>
<USER_TASK:>
Description:
def process_fasta(fasta, **kwargs):
"""Combine metrics extracted from a fasta file.""" |
logging.info("Nanoget: Starting to collect statistics from a fasta file.")
inputfasta = handle_compressed_input(fasta, file_type="fasta")
return ut.reduce_memory_usage(pd.DataFrame(
data=[len(rec) for rec in SeqIO.parse(inputfasta, "fasta")],
columns=["lengths"]
).dropna()) |
<SYSTEM_TASK:>
Combine metrics extracted from a fastq file.
<END_TASK>
<USER_TASK:>
Description:
def process_fastq_plain(fastq, **kwargs):
"""Combine metrics extracted from a fastq file.""" |
logging.info("Nanoget: Starting to collect statistics from plain fastq file.")
inputfastq = handle_compressed_input(fastq)
return ut.reduce_memory_usage(pd.DataFrame(
data=[res for res in extract_from_fastq(inputfastq) if res],
columns=["quals", "lengths"]
).dropna()) |
<SYSTEM_TASK:>
Generator for returning metrics extracted from fastq.
<END_TASK>
<USER_TASK:>
Description:
def stream_fastq_full(fastq, threads):
"""Generator for returning metrics extracted from fastq.
Extract from a fastq file:
-readname
-average and median quality
-read_lenght
""" |
logging.info("Nanoget: Starting to collect full metrics from plain fastq file.")
inputfastq = handle_compressed_input(fastq)
with cfutures.ProcessPoolExecutor(max_workers=threads) as executor:
for results in executor.map(extract_all_from_fastq, SeqIO.parse(inputfastq, "fastq")):
yield results
logging.info("Nanoget: Finished collecting statistics from plain fastq file.") |
<SYSTEM_TASK:>
Extract metrics from a richer fastq file.
<END_TASK>
<USER_TASK:>
Description:
def process_fastq_rich(fastq, **kwargs):
"""Extract metrics from a richer fastq file.
Extract information from fastq files generated by albacore or MinKNOW,
containing richer information in the header (key-value pairs)
read=<int> [72]
ch=<int> [159]
start_time=<timestamp> [2016-07-15T14:23:22Z] # UTC ISO 8601 ISO 3339 timestamp
Z indicates UTC time, T is the delimiter between date expression and time expression
dateutil.parser.parse("2016-07-15T14:23:22Z") imported as dparse
-> datetime.datetime(2016, 7, 15, 14, 23, 22, tzinfo=tzutc())
""" |
logging.info("Nanoget: Starting to collect statistics from rich fastq file.")
inputfastq = handle_compressed_input(fastq)
res = []
for record in SeqIO.parse(inputfastq, "fastq"):
try:
read_info = info_to_dict(record.description)
res.append(
(nanomath.ave_qual(record.letter_annotations["phred_quality"]),
len(record),
read_info["ch"],
read_info["start_time"],
read_info["runid"]))
except KeyError:
logging.error("Nanoget: keyerror when processing record {}".format(record.description))
sys.exit("Unexpected fastq identifier:\n{}\n\n \
missing one or more of expected fields 'ch', 'start_time' or 'runid'".format(
record.description))
df = pd.DataFrame(
data=res,
columns=["quals", "lengths", "channelIDs", "timestamp", "runIDs"]).dropna()
df["channelIDs"] = df["channelIDs"].astype("int64")
return ut.reduce_memory_usage(df) |
<SYSTEM_TASK:>
Minimal fastq metrics extractor.
<END_TASK>
<USER_TASK:>
Description:
def fq_minimal(fq):
"""Minimal fastq metrics extractor.
Quickly parse a fasta/fastq file - but makes expectations on the file format
There will be dragons if unexpected format is used
Expects a fastq_rich format, but extracts only timestamp and length
""" |
try:
while True:
time = next(fq)[1:].split(" ")[4][11:-1]
length = len(next(fq))
next(fq)
next(fq)
yield time, length
except StopIteration:
yield None |
<SYSTEM_TASK:>
Returns Piece subclass given index of piece.
<END_TASK>
<USER_TASK:>
Description:
def _get_piece(string, index):
"""
Returns Piece subclass given index of piece.
:type: index: int
:type: loc Location
:raise: KeyError
""" |
piece = string[index].strip()
piece = piece.upper()
piece_dict = {'R': Rook,
'P': Pawn,
'B': Bishop,
'N': Knight,
'Q': Queen,
'K': King}
try:
return piece_dict[piece]
except KeyError:
raise ValueError("Piece {} is invalid".format(piece)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.