text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
return a location key form the locationMap
<END_TASK>
<USER_TASK:>
Description:
def _findLocation(self, reference_name, start, end):
"""
return a location key form the locationMap
""" |
try:
# TODO - sequence_annotations does not have build?
return self._locationMap['hg19'][reference_name][start][end]
except:
return None |
<SYSTEM_TASK:>
Appends the specified protocolElement to the value list for this
<END_TASK>
<USER_TASK:>
Description:
def addValue(self, protocolElement):
"""
Appends the specified protocolElement to the value list for this
response.
""" |
self._numElements += 1
self._bufferSize += protocolElement.ByteSize()
attr = getattr(self._protoObject, self._valueListName)
obj = attr.add()
obj.CopyFrom(protocolElement) |
<SYSTEM_TASK:>
Returns a string version of the SearchResponse that has
<END_TASK>
<USER_TASK:>
Description:
def getSerializedResponse(self):
"""
Returns a string version of the SearchResponse that has
been built by this SearchResponseBuilder.
""" |
self._protoObject.next_page_token = pb.string(self._nextPageToken)
s = protocol.toJson(self._protoObject)
return s |
<SYSTEM_TASK:>
Populates this Ontology using values in the specified DB row.
<END_TASK>
<USER_TASK:>
Description:
def populateFromRow(self, ontologyRecord):
"""
Populates this Ontology using values in the specified DB row.
""" |
self._id = ontologyRecord.id
self._dataUrl = ontologyRecord.dataurl
self._readFile() |
<SYSTEM_TASK:>
Returns a GA4GH OntologyTerm object by name.
<END_TASK>
<USER_TASK:>
Description:
def getGaTermByName(self, name):
"""
Returns a GA4GH OntologyTerm object by name.
:param name: name of the ontology term, ex. "gene".
:return: GA4GH OntologyTerm object.
""" |
# TODO what is the correct value when we have no mapping??
termIds = self.getTermIds(name)
if len(termIds) == 0:
termId = ""
# TODO add logging for missed term translation.
else:
# TODO what is the correct behaviour here when we have multiple
# IDs matching a given name?
termId = termIds[0]
term = protocol.OntologyTerm()
term.term = name
term.term_id = termId
return term |
<SYSTEM_TASK:>
Converts the specified error code into the corresponding class object.
<END_TASK>
<USER_TASK:>
Description:
def getExceptionClass(errorCode):
"""
Converts the specified error code into the corresponding class object.
Raises a KeyError if the errorCode is not found.
""" |
classMap = {}
for name, class_ in inspect.getmembers(sys.modules[__name__]):
if inspect.isclass(class_) and issubclass(class_, BaseServerException):
classMap[class_.getErrorCode()] = class_
return classMap[errorCode] |
<SYSTEM_TASK:>
Converts this exception into the GA4GH protocol type so that
<END_TASK>
<USER_TASK:>
Description:
def toProtocolElement(self):
"""
Converts this exception into the GA4GH protocol type so that
it can be communicated back to the client.
""" |
error = protocol.GAException()
error.error_code = self.getErrorCode()
error.message = self.getMessage()
return error |
<SYSTEM_TASK:>
Initialize new reference and perform checks.
<END_TASK>
<USER_TASK:>
Description:
def _init_goterm_ref(self, rec_curr, name, lnum):
"""Initialize new reference and perform checks.""" |
if rec_curr is None:
return GOTerm()
msg = "PREVIOUS {REC} WAS NOT TERMINATED AS EXPECTED".format(REC=name)
self._die(msg, lnum) |
<SYSTEM_TASK:>
Initialize new typedef and perform checks.
<END_TASK>
<USER_TASK:>
Description:
def _init_typedef(self, typedef_curr, name, lnum):
"""Initialize new typedef and perform checks.""" |
if typedef_curr is None:
return TypeDef()
msg = "PREVIOUS {REC} WAS NOT TERMINATED AS EXPECTED".format(REC=name)
self._die(msg, lnum) |
<SYSTEM_TASK:>
Adds a term's nested attributes.
<END_TASK>
<USER_TASK:>
Description:
def _add_nested(self, rec, name, value):
"""Adds a term's nested attributes.""" |
# Remove comments and split term into typedef / target term.
(typedef, target_term) = value.split('!')[0].rstrip().split(' ')
# Save the nested term.
getattr(rec, name)[typedef].append(target_term) |
<SYSTEM_TASK:>
Raise an Exception if file read is unexpected.
<END_TASK>
<USER_TASK:>
Description:
def _die(self, msg, lnum):
"""Raise an Exception if file read is unexpected.""" |
raise Exception("**FATAL {FILE}({LNUM}): {MSG}\n".format(
FILE=self.obo_file, LNUM=lnum, MSG=msg)) |
<SYSTEM_TASK:>
Write hierarchy for a GO Term record.
<END_TASK>
<USER_TASK:>
Description:
def write_hier_rec(self, gos_printed, out=sys.stdout,
len_dash=1, max_depth=None, num_child=None, short_prt=False,
include_only=None, go_marks=None,
depth=1, dp="-"):
"""Write hierarchy for a GO Term record.""" |
# Added by DV Klopfenstein
GO_id = self.id
# Shortens hierarchy report by only printing the hierarchy
# for the sub-set of user-specified GO terms which are connected.
if include_only is not None and GO_id not in include_only:
return
nrp = short_prt and GO_id in gos_printed
if go_marks is not None:
out.write('{} '.format('>' if GO_id in go_marks else ' '))
if len_dash is not None:
# Default character indicating hierarchy level is '-'.
# '=' is used to indicate a hierarchical path printed in detail previously.
letter = '-' if not nrp or not self.children else '='
dp = ''.join([letter]*depth)
out.write('{DASHES:{N}} '.format(DASHES=dp, N=len_dash))
if num_child is not None:
out.write('{N:>5} '.format(N=len(self.get_all_children())))
out.write('{GO}\tL-{L:>02}\tD-{D:>02}\t{desc}\n'.format(
GO=self.id, L=self.level, D=self.depth, desc=self.name))
# Track GOs previously printed only if needed
if short_prt:
gos_printed.add(GO_id)
# Do not print hierarchy below this turn if it has already been printed
if nrp:
return
depth += 1
if max_depth is not None and depth > max_depth:
return
for p in self.children:
p.write_hier_rec(gos_printed, out, len_dash, max_depth, num_child, short_prt,
include_only, go_marks,
depth, dp) |
<SYSTEM_TASK:>
Returns all possible paths to the root node
<END_TASK>
<USER_TASK:>
Description:
def paths_to_top(self, term):
""" Returns all possible paths to the root node
Each path includes the term given. The order of the path is
top -> bottom, i.e. it starts with the root and ends with the
given term (inclusively).
Parameters:
-----------
- term:
the id of the GO term, where the paths begin (i.e. the
accession 'GO:0003682')
Returns:
--------
- a list of lists of GO Terms
""" |
# error handling consistent with original authors
if term not in self:
print("Term %s not found!" % term, file=sys.stderr)
return
def _paths_to_top_recursive(rec):
if rec.level == 0:
return [[rec]]
paths = []
for parent in rec.parents:
top_paths = _paths_to_top_recursive(parent)
for top_path in top_paths:
top_path.append(rec)
paths.append(top_path)
return paths
go_term = self[term]
return _paths_to_top_recursive(go_term) |
<SYSTEM_TASK:>
draw AMIGO style network, lineage containing one query record.
<END_TASK>
<USER_TASK:>
Description:
def make_graph_pydot(self, recs, nodecolor,
edgecolor, dpi,
draw_parents=True, draw_children=True):
"""draw AMIGO style network, lineage containing one query record.""" |
import pydot
G = pydot.Dot(graph_type='digraph', dpi="{}".format(dpi)) # Directed Graph
edgeset = set()
usr_ids = [rec.id for rec in recs]
for rec in recs:
if draw_parents:
edgeset.update(rec.get_all_parent_edges())
if draw_children:
edgeset.update(rec.get_all_child_edges())
lw = self._label_wrap
rec_id_set = set([rec_id for endpts in edgeset for rec_id in endpts])
nodes = {str(ID):pydot.Node(
lw(ID).replace("GO:",""), # Node name
shape="box",
style="rounded, filled",
# Highlight query terms in plum:
fillcolor="beige" if ID not in usr_ids else "plum",
color=nodecolor)
for ID in rec_id_set}
# add nodes explicitly via add_node
for rec_id, node in nodes.items():
G.add_node(node)
for src, target in edgeset:
# default layout in graphviz is top->bottom, so we invert
# the direction and plot using dir="back"
G.add_edge(pydot.Edge(nodes[target], nodes[src],
shape="normal",
color=edgecolor,
label="is_a",
dir="back"))
return G |
<SYSTEM_TASK:>
Unpacks sqlite rows as returned by fetchall
<END_TASK>
<USER_TASK:>
Description:
def sqliteRowsToDicts(sqliteRows):
"""
Unpacks sqlite rows as returned by fetchall
into an array of simple dicts.
:param sqliteRows: array of rows returned from fetchall DB call
:return: array of dicts, keyed by the column names.
""" |
return map(lambda r: dict(zip(r.keys(), r)), sqliteRows) |
<SYSTEM_TASK:>
Returns rows of a sql fetch query on demand
<END_TASK>
<USER_TASK:>
Description:
def iterativeFetch(query, batchSize=default_batch_size):
"""
Returns rows of a sql fetch query on demand
""" |
while True:
rows = query.fetchmany(batchSize)
if not rows:
break
rowDicts = sqliteRowsToDicts(rows)
for rowDict in rowDicts:
yield rowDict |
<SYSTEM_TASK:>
Parses the specified pageToken and returns a list of the specified
<END_TASK>
<USER_TASK:>
Description:
def _parsePageToken(pageToken, numValues):
"""
Parses the specified pageToken and returns a list of the specified
number of values. Page tokens are assumed to consist of a fixed
number of integers seperated by colons. If the page token does
not conform to this specification, raise a InvalidPageToken
exception.
""" |
tokens = pageToken.split(":")
if len(tokens) != numValues:
msg = "Invalid number of values in page token"
raise exceptions.BadPageTokenException(msg)
try:
values = map(int, tokens)
except ValueError:
msg = "Malformed integers in page token"
raise exceptions.BadPageTokenException(msg)
return values |
<SYSTEM_TASK:>
Attempts to parse the specified key in the specified argument
<END_TASK>
<USER_TASK:>
Description:
def _parseIntegerArgument(args, key, defaultValue):
"""
Attempts to parse the specified key in the specified argument
dictionary into an integer. If the argument cannot be parsed,
raises a BadRequestIntegerException. If the key is not present,
return the specified default value.
""" |
ret = defaultValue
try:
if key in args:
try:
ret = int(args[key])
except ValueError:
raise exceptions.BadRequestIntegerException(key, args[key])
except TypeError:
raise Exception((key, args))
return ret |
<SYSTEM_TASK:>
Returns true when an annotation should be included.
<END_TASK>
<USER_TASK:>
Description:
def filterVariantAnnotation(self, vann):
"""
Returns true when an annotation should be included.
""" |
# TODO reintroduce feature ID search
ret = False
if len(self._effects) != 0 and not vann.transcript_effects:
return False
elif len(self._effects) == 0:
return True
for teff in vann.transcript_effects:
if self.filterEffect(teff):
ret = True
return ret |
<SYSTEM_TASK:>
Returns true when any of the transcript effects
<END_TASK>
<USER_TASK:>
Description:
def filterEffect(self, teff):
"""
Returns true when any of the transcript effects
are present in the request.
""" |
ret = False
for effect in teff.effects:
ret = self._matchAnyEffects(effect) or ret
return ret |
<SYSTEM_TASK:>
Tests whether a requested effect and an effect
<END_TASK>
<USER_TASK:>
Description:
def _checkIdEquality(self, requestedEffect, effect):
"""
Tests whether a requested effect and an effect
present in an annotation are equal.
""" |
return self._idPresent(requestedEffect) and (
effect.term_id == requestedEffect.term_id) |
<SYSTEM_TASK:>
Call this method before importing a ga4gh module in the scripts dir.
<END_TASK>
<USER_TASK:>
Description:
def ga4ghImportGlue():
"""
Call this method before importing a ga4gh module in the scripts dir.
Otherwise, you will be using the installed package instead of
the development package.
Assumes a certain directory structure.
""" |
path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(path) |
<SYSTEM_TASK:>
Update the priority of the file handle. The element is first
<END_TASK>
<USER_TASK:>
Description:
def _update(self, dataFile, handle):
"""
Update the priority of the file handle. The element is first
removed and then added to the left of the deque.
""" |
self._cache.remove((dataFile, handle))
self._add(dataFile, handle) |
<SYSTEM_TASK:>
Remove the least recently used file handle from the cache.
<END_TASK>
<USER_TASK:>
Description:
def _removeLru(self):
"""
Remove the least recently used file handle from the cache.
The pop method removes an element from the right of the deque.
Returns the name of the file that has been removed.
""" |
(dataFile, handle) = self._cache.pop()
handle.close()
return dataFile |
<SYSTEM_TASK:>
Returns handle associated to the filename. If the file is
<END_TASK>
<USER_TASK:>
Description:
def getFileHandle(self, dataFile, openMethod):
"""
Returns handle associated to the filename. If the file is
already opened, update its priority in the cache and return
its handle. Otherwise, open the file using openMethod, store
it in the cache and return the corresponding handle.
""" |
if dataFile in self._memoTable:
handle = self._memoTable[dataFile]
self._update(dataFile, handle)
return handle
else:
try:
handle = openMethod(dataFile)
except ValueError:
raise exceptions.FileOpenFailedException(dataFile)
self._memoTable[dataFile] = handle
self._add(dataFile, handle)
if len(self._memoTable) > self._maxCacheSize:
dataFile = self._removeLru()
del self._memoTable[dataFile]
return handle |
<SYSTEM_TASK:>
Join an array of ids into a compound id string
<END_TASK>
<USER_TASK:>
Description:
def join(cls, splits):
"""
Join an array of ids into a compound id string
""" |
segments = []
for split in splits:
segments.append('"{}",'.format(split))
if len(segments) > 0:
segments[-1] = segments[-1][:-1]
jsonString = '[{}]'.format(''.join(segments))
return jsonString |
<SYSTEM_TASK:>
Parses the specified compoundId string and returns an instance
<END_TASK>
<USER_TASK:>
Description:
def parse(cls, compoundIdStr):
"""
Parses the specified compoundId string and returns an instance
of this CompoundId class.
:raises: An ObjectWithIdNotFoundException if parsing fails. This is
because this method is a client-facing method, and if a malformed
identifier (under our internal rules) is provided, the response should
be that the identifier does not exist.
""" |
if not isinstance(compoundIdStr, basestring):
raise exceptions.BadIdentifierException(compoundIdStr)
try:
deobfuscated = cls.deobfuscate(compoundIdStr)
except TypeError:
# When a string that cannot be converted to base64 is passed
# as an argument, b64decode raises a TypeError. We must treat
# this as an ID not found error.
raise exceptions.ObjectWithIdNotFoundException(compoundIdStr)
try:
encodedSplits = cls.split(deobfuscated)
splits = [cls.decode(split) for split in encodedSplits]
except (UnicodeDecodeError, ValueError):
# Sometimes base64 decoding succeeds but we're left with
# unicode gibberish. This is also and IdNotFound.
raise exceptions.ObjectWithIdNotFoundException(compoundIdStr)
# pull the differentiator out of the splits before instantiating
# the class, if the differentiator exists
fieldsLength = len(cls.fields)
if cls.differentiator is not None:
differentiatorIndex = cls.fields.index(
cls.differentiatorFieldName)
if differentiatorIndex < len(splits):
del splits[differentiatorIndex]
else:
raise exceptions.ObjectWithIdNotFoundException(
compoundIdStr)
fieldsLength -= 1
if len(splits) != fieldsLength:
raise exceptions.ObjectWithIdNotFoundException(compoundIdStr)
return cls(None, *splits) |
<SYSTEM_TASK:>
Mildly obfuscates the specified ID string in an easily reversible
<END_TASK>
<USER_TASK:>
Description:
def obfuscate(cls, idStr):
"""
Mildly obfuscates the specified ID string in an easily reversible
fashion. This is not intended for security purposes, but rather to
dissuade users from depending on our internal ID structures.
""" |
return unicode(base64.urlsafe_b64encode(
idStr.encode('utf-8')).replace(b'=', b'')) |
<SYSTEM_TASK:>
Sets the attrbutes of a message during serialization.
<END_TASK>
<USER_TASK:>
Description:
def serializeAttributes(self, msg):
"""
Sets the attrbutes of a message during serialization.
""" |
attributes = self.getAttributes()
for key in attributes:
protocol.setAttribute(
msg.attributes.attr[key].values, attributes[key])
return msg |
<SYSTEM_TASK:>
Scans the specified directory for files with the specified globbing
<END_TASK>
<USER_TASK:>
Description:
def _scanDataFiles(self, dataDir, patterns):
"""
Scans the specified directory for files with the specified globbing
pattern and calls self._addDataFile for each. Raises an
EmptyDirException if no data files are found.
""" |
numDataFiles = 0
for pattern in patterns:
scanPath = os.path.join(dataDir, pattern)
for filename in glob.glob(scanPath):
self._addDataFile(filename)
numDataFiles += 1
if numDataFiles == 0:
raise exceptions.EmptyDirException(dataDir, patterns) |
<SYSTEM_TASK:>
Attempts to get a list of peers from a file specified in configuration.
<END_TASK>
<USER_TASK:>
Description:
def getInitialPeerList(filePath, logger=None):
"""
Attempts to get a list of peers from a file specified in configuration.
This file has one URL per line and can contain newlines and comments.
# Main ga4gh node
http://1kgenomes.ga4gh.org
# Local intranet peer
https://192.168.1.1
The server will attempt to add URLs in this file to its registry at
startup and will log a warning if the file isn't found.
""" |
ret = []
with open(filePath) as textFile:
ret = textFile.readlines()
if len(ret) == 0:
if logger:
logger.warn("Couldn't load the initial "
"peer list. Try adding a "
"file named 'initial_peers.txt' "
"to {}".format(os.getcwd()))
# Remove lines that start with a hash or are empty.
return filter(lambda x: x != "" and not x.find("#") != -1, ret) |
<SYSTEM_TASK:>
Takes the datarepository, a url, and an optional logger and attempts
<END_TASK>
<USER_TASK:>
Description:
def insertInitialPeer(dataRepository, url, logger=None):
"""
Takes the datarepository, a url, and an optional logger and attempts
to add the peer into the repository.
""" |
insertPeer = dataRepository.insertPeer
try:
peer = datamodel.peers.Peer(url)
insertPeer(peer)
except exceptions.RepoManagerException as exc:
if logger:
logger.debug(
"Peer already in registry {} {}".format(peer.getUrl(), exc))
except exceptions.BadUrlException as exc:
if logger:
logger.debug("A URL in the initial "
"peer list {} was malformed. {}".format(url), exc) |
<SYSTEM_TASK:>
Attempts to return whether a given URL string is valid by checking
<END_TASK>
<USER_TASK:>
Description:
def isUrl(urlString):
"""
Attempts to return whether a given URL string is valid by checking
for the presence of the URL scheme and netloc using the urlparse
module, and then using a regex.
From http://stackoverflow.com/questions/7160737/
""" |
parsed = urlparse.urlparse(urlString)
urlparseValid = parsed.netloc != '' and parsed.scheme != ''
regex = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)'
r'+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
return regex.match(urlString) and urlparseValid |
<SYSTEM_TASK:>
Attempt to safely set the URL by string.
<END_TASK>
<USER_TASK:>
Description:
def setUrl(self, url):
"""
Attempt to safely set the URL by string.
""" |
if isUrl(url):
self._url = url
else:
raise exceptions.BadUrlException(url)
return self |
<SYSTEM_TASK:>
Sets the attributes dictionary from a JSON string.
<END_TASK>
<USER_TASK:>
Description:
def setAttributesJson(self, attributesJson):
"""
Sets the attributes dictionary from a JSON string.
""" |
try:
self._attributes = json.loads(attributesJson)
except:
raise exceptions.InvalidJsonException(attributesJson)
return self |
<SYSTEM_TASK:>
This method accepts a model record and sets class variables.
<END_TASK>
<USER_TASK:>
Description:
def populateFromRow(self, peerRecord):
"""
This method accepts a model record and sets class variables.
""" |
self.setUrl(peerRecord.url) \
.setAttributesJson(peerRecord.attributes)
return self |
<SYSTEM_TASK:>
Returns a generator over the objects in the specified list using
<END_TASK>
<USER_TASK:>
Description:
def _protocolListGenerator(self, request, objectList):
"""
Returns a generator over the objects in the specified list using
_protocolObjectGenerator to generate page tokens.
""" |
return self._protocolObjectGenerator(
request, len(objectList), lambda index: objectList[index]) |
<SYSTEM_TASK:>
Returns a generator over the objects in the specified list using
<END_TASK>
<USER_TASK:>
Description:
def _objectListGenerator(self, request, objectList):
"""
Returns a generator over the objects in the specified list using
_topLevelObjectGenerator to generate page tokens.
""" |
return self._topLevelObjectGenerator(
request, len(objectList), lambda index: objectList[index]) |
<SYSTEM_TASK:>
Runs a get request by converting the specified datamodel
<END_TASK>
<USER_TASK:>
Description:
def runGetRequest(self, obj):
"""
Runs a get request by converting the specified datamodel
object into its protocol representation.
""" |
protocolElement = obj.toProtocolElement()
jsonString = protocol.toJson(protocolElement)
return jsonString |
<SYSTEM_TASK:>
Runs a listReferenceBases request for the specified ID and
<END_TASK>
<USER_TASK:>
Description:
def runListReferenceBases(self, requestJson):
"""
Runs a listReferenceBases request for the specified ID and
request arguments.
""" |
# In the case when an empty post request is made to the endpoint
# we instantiate an empty ListReferenceBasesRequest.
if not requestJson:
request = protocol.ListReferenceBasesRequest()
else:
try:
request = protocol.fromJson(
requestJson,
protocol.ListReferenceBasesRequest)
except protocol.json_format.ParseError:
raise exceptions.InvalidJsonException(requestJson)
compoundId = datamodel.ReferenceCompoundId.parse(request.reference_id)
referenceSet = self.getDataRepository().getReferenceSet(
compoundId.reference_set_id)
reference = referenceSet.getReference(request.reference_id)
start = request.start
end = request.end
if end == 0: # assume meant "get all"
end = reference.getLength()
if request.page_token:
pageTokenStr = request.page_token
start = paging._parsePageToken(pageTokenStr, 1)[0]
chunkSize = self._maxResponseLength
nextPageToken = None
if start + chunkSize < end:
end = start + chunkSize
nextPageToken = str(start + chunkSize)
sequence = reference.getBases(start, end)
# build response
response = protocol.ListReferenceBasesResponse()
response.offset = start
response.sequence = sequence
if nextPageToken:
response.next_page_token = nextPageToken
return protocol.toJson(response) |
<SYSTEM_TASK:>
Returns a callset with the given id
<END_TASK>
<USER_TASK:>
Description:
def runGetCallSet(self, id_):
"""
Returns a callset with the given id
""" |
compoundId = datamodel.CallSetCompoundId.parse(id_)
dataset = self.getDataRepository().getDataset(compoundId.dataset_id)
variantSet = dataset.getVariantSet(compoundId.variant_set_id)
callSet = variantSet.getCallSet(id_)
return self.runGetRequest(callSet) |
<SYSTEM_TASK:>
Returns information about the service including protocol version.
<END_TASK>
<USER_TASK:>
Description:
def runGetInfo(self, request):
"""
Returns information about the service including protocol version.
""" |
return protocol.toJson(protocol.GetInfoResponse(
protocol_version=protocol.version)) |
<SYSTEM_TASK:>
Takes a flask request from the frontend and attempts to parse
<END_TASK>
<USER_TASK:>
Description:
def runAddAnnouncement(self, flaskrequest):
"""
Takes a flask request from the frontend and attempts to parse
into an AnnouncePeerRequest. If successful, it will log the
announcement to the `announcement` table with some other metadata
gathered from the request.
""" |
announcement = {}
# We want to parse the request ourselves to collect a little more
# data about it.
try:
requestData = protocol.fromJson(
flaskrequest.get_data(), protocol.AnnouncePeerRequest)
announcement['hostname'] = flaskrequest.host_url
announcement['remote_addr'] = flaskrequest.remote_addr
announcement['user_agent'] = flaskrequest.headers.get('User-Agent')
except AttributeError:
# Sometimes in testing we will send protocol requests instead
# of flask requests and so the hostname and user agent won't
# be present.
try:
requestData = protocol.fromJson(
flaskrequest, protocol.AnnouncePeerRequest)
except Exception as e:
raise exceptions.InvalidJsonException(e)
except Exception as e:
raise exceptions.InvalidJsonException(e)
# Validate the url before accepting the announcement
peer = datamodel.peers.Peer(requestData.peer.url)
peer.setAttributesJson(protocol.toJson(
requestData.peer.attributes))
announcement['url'] = peer.getUrl()
announcement['attributes'] = peer.getAttributes()
try:
self.getDataRepository().insertAnnouncement(announcement)
except:
raise exceptions.BadRequestException(announcement['url'])
return protocol.toJson(
protocol.AnnouncePeerResponse(success=True)) |
<SYSTEM_TASK:>
Takes a ListPeersRequest and returns a ListPeersResponse using
<END_TASK>
<USER_TASK:>
Description:
def runListPeers(self, request):
"""
Takes a ListPeersRequest and returns a ListPeersResponse using
a page_token and page_size if provided.
""" |
return self.runSearchRequest(
request,
protocol.ListPeersRequest,
protocol.ListPeersResponse,
self.peersGenerator) |
<SYSTEM_TASK:>
Returns a variant with the given id
<END_TASK>
<USER_TASK:>
Description:
def runGetVariant(self, id_):
"""
Returns a variant with the given id
""" |
compoundId = datamodel.VariantCompoundId.parse(id_)
dataset = self.getDataRepository().getDataset(compoundId.dataset_id)
variantSet = dataset.getVariantSet(compoundId.variant_set_id)
gaVariant = variantSet.getVariant(compoundId)
# TODO variant is a special case here, as it's returning a
# protocol element rather than a datamodel object. We should
# fix this for consistency.
jsonString = protocol.toJson(gaVariant)
return jsonString |
<SYSTEM_TASK:>
Runs a getBiosample request for the specified ID.
<END_TASK>
<USER_TASK:>
Description:
def runGetBiosample(self, id_):
"""
Runs a getBiosample request for the specified ID.
""" |
compoundId = datamodel.BiosampleCompoundId.parse(id_)
dataset = self.getDataRepository().getDataset(compoundId.dataset_id)
biosample = dataset.getBiosample(id_)
return self.runGetRequest(biosample) |
<SYSTEM_TASK:>
Runs a getIndividual request for the specified ID.
<END_TASK>
<USER_TASK:>
Description:
def runGetIndividual(self, id_):
"""
Runs a getIndividual request for the specified ID.
""" |
compoundId = datamodel.BiosampleCompoundId.parse(id_)
dataset = self.getDataRepository().getDataset(compoundId.dataset_id)
individual = dataset.getIndividual(id_)
return self.runGetRequest(individual) |
<SYSTEM_TASK:>
Returns JSON string of the feature object corresponding to
<END_TASK>
<USER_TASK:>
Description:
def runGetFeature(self, id_):
"""
Returns JSON string of the feature object corresponding to
the feature compoundID passed in.
""" |
compoundId = datamodel.FeatureCompoundId.parse(id_)
dataset = self.getDataRepository().getDataset(compoundId.dataset_id)
featureSet = dataset.getFeatureSet(compoundId.feature_set_id)
gaFeature = featureSet.getFeature(compoundId)
jsonString = protocol.toJson(gaFeature)
return jsonString |
<SYSTEM_TASK:>
Returns a read group with the given id_
<END_TASK>
<USER_TASK:>
Description:
def runGetReadGroup(self, id_):
"""
Returns a read group with the given id_
""" |
compoundId = datamodel.ReadGroupCompoundId.parse(id_)
dataset = self.getDataRepository().getDataset(compoundId.dataset_id)
readGroupSet = dataset.getReadGroupSet(compoundId.read_group_set_id)
readGroup = readGroupSet.getReadGroup(id_)
return self.runGetRequest(readGroup) |
<SYSTEM_TASK:>
Runs a getReference request for the specified ID.
<END_TASK>
<USER_TASK:>
Description:
def runGetReference(self, id_):
"""
Runs a getReference request for the specified ID.
""" |
compoundId = datamodel.ReferenceCompoundId.parse(id_)
referenceSet = self.getDataRepository().getReferenceSet(
compoundId.reference_set_id)
reference = referenceSet.getReference(id_)
return self.runGetRequest(reference) |
<SYSTEM_TASK:>
Runs a getReferenceSet request for the specified ID.
<END_TASK>
<USER_TASK:>
Description:
def runGetReferenceSet(self, id_):
"""
Runs a getReferenceSet request for the specified ID.
""" |
referenceSet = self.getDataRepository().getReferenceSet(id_)
return self.runGetRequest(referenceSet) |
<SYSTEM_TASK:>
Runs a getFeatureSet request for the specified ID.
<END_TASK>
<USER_TASK:>
Description:
def runGetFeatureSet(self, id_):
"""
Runs a getFeatureSet request for the specified ID.
""" |
compoundId = datamodel.FeatureSetCompoundId.parse(id_)
dataset = self.getDataRepository().getDataset(compoundId.dataset_id)
featureSet = dataset.getFeatureSet(id_)
return self.runGetRequest(featureSet) |
<SYSTEM_TASK:>
Runs a getContinuousSet request for the specified ID.
<END_TASK>
<USER_TASK:>
Description:
def runGetContinuousSet(self, id_):
"""
Runs a getContinuousSet request for the specified ID.
""" |
compoundId = datamodel.ContinuousSetCompoundId.parse(id_)
dataset = self.getDataRepository().getDataset(compoundId.dataset_id)
continuousSet = dataset.getContinuousSet(id_)
return self.runGetRequest(continuousSet) |
<SYSTEM_TASK:>
Runs a getDataset request for the specified ID.
<END_TASK>
<USER_TASK:>
Description:
def runGetDataset(self, id_):
"""
Runs a getDataset request for the specified ID.
""" |
dataset = self.getDataRepository().getDataset(id_)
return self.runGetRequest(dataset) |
<SYSTEM_TASK:>
Runs a getRnaQuantification request for the specified ID.
<END_TASK>
<USER_TASK:>
Description:
def runGetRnaQuantification(self, id_):
"""
Runs a getRnaQuantification request for the specified ID.
""" |
compoundId = datamodel.RnaQuantificationCompoundId.parse(id_)
dataset = self.getDataRepository().getDataset(compoundId.dataset_id)
rnaQuantificationSet = dataset.getRnaQuantificationSet(
compoundId.rna_quantification_set_id)
rnaQuantification = rnaQuantificationSet.getRnaQuantification(id_)
return self.runGetRequest(rnaQuantification) |
<SYSTEM_TASK:>
Runs a getRnaQuantificationSet request for the specified ID.
<END_TASK>
<USER_TASK:>
Description:
def runGetRnaQuantificationSet(self, id_):
"""
Runs a getRnaQuantificationSet request for the specified ID.
""" |
compoundId = datamodel.RnaQuantificationSetCompoundId.parse(id_)
dataset = self.getDataRepository().getDataset(compoundId.dataset_id)
rnaQuantificationSet = dataset.getRnaQuantificationSet(id_)
return self.runGetRequest(rnaQuantificationSet) |
<SYSTEM_TASK:>
Runs a getExpressionLevel request for the specified ID.
<END_TASK>
<USER_TASK:>
Description:
def runGetExpressionLevel(self, id_):
"""
Runs a getExpressionLevel request for the specified ID.
""" |
compoundId = datamodel.ExpressionLevelCompoundId.parse(id_)
dataset = self.getDataRepository().getDataset(compoundId.dataset_id)
rnaQuantificationSet = dataset.getRnaQuantificationSet(
compoundId.rna_quantification_set_id)
rnaQuantification = rnaQuantificationSet.getRnaQuantification(
compoundId.rna_quantification_id)
expressionLevel = rnaQuantification.getExpressionLevel(compoundId)
return self.runGetRequest(expressionLevel) |
<SYSTEM_TASK:>
Runs the specified SearchBiosamplesRequest.
<END_TASK>
<USER_TASK:>
Description:
def runSearchBiosamples(self, request):
"""
Runs the specified SearchBiosamplesRequest.
""" |
return self.runSearchRequest(
request, protocol.SearchBiosamplesRequest,
protocol.SearchBiosamplesResponse,
self.biosamplesGenerator) |
<SYSTEM_TASK:>
Runs the specified SearchVariantAnnotationsRequest.
<END_TASK>
<USER_TASK:>
Description:
def runSearchVariantAnnotations(self, request):
"""
Runs the specified SearchVariantAnnotationsRequest.
""" |
return self.runSearchRequest(
request, protocol.SearchVariantAnnotationsRequest,
protocol.SearchVariantAnnotationsResponse,
self.variantAnnotationsGenerator) |
<SYSTEM_TASK:>
Returns a SearchFeaturesResponse for the specified
<END_TASK>
<USER_TASK:>
Description:
def runSearchFeatures(self, request):
"""
Returns a SearchFeaturesResponse for the specified
SearchFeaturesRequest object.
:param request: JSON string representing searchFeaturesRequest
:return: JSON string representing searchFeatureResponse
""" |
return self.runSearchRequest(
request, protocol.SearchFeaturesRequest,
protocol.SearchFeaturesResponse,
self.featuresGenerator) |
<SYSTEM_TASK:>
Returns a SearchContinuousResponse for the specified
<END_TASK>
<USER_TASK:>
Description:
def runSearchContinuous(self, request):
"""
Returns a SearchContinuousResponse for the specified
SearchContinuousRequest object.
:param request: JSON string representing searchContinuousRequest
:return: JSON string representing searchContinuousResponse
""" |
return self.runSearchRequest(
request, protocol.SearchContinuousRequest,
protocol.SearchContinuousResponse,
self.continuousGenerator) |
<SYSTEM_TASK:>
Populates the instance variables of this Dataset from the
<END_TASK>
<USER_TASK:>
Description:
def populateFromRow(self, dataset):
"""
Populates the instance variables of this Dataset from the
specified database row.
""" |
self._description = dataset.description
self.setAttributesJson(dataset.attributes) |
<SYSTEM_TASK:>
Adds the specified variantSet to this dataset.
<END_TASK>
<USER_TASK:>
Description:
def addVariantSet(self, variantSet):
"""
Adds the specified variantSet to this dataset.
""" |
id_ = variantSet.getId()
self._variantSetIdMap[id_] = variantSet
self._variantSetNameMap[variantSet.getLocalId()] = variantSet
self._variantSetIds.append(id_) |
<SYSTEM_TASK:>
Adds the specified biosample to this dataset.
<END_TASK>
<USER_TASK:>
Description:
def addBiosample(self, biosample):
"""
Adds the specified biosample to this dataset.
""" |
id_ = biosample.getId()
self._biosampleIdMap[id_] = biosample
self._biosampleIds.append(id_)
self._biosampleNameMap[biosample.getName()] = biosample |
<SYSTEM_TASK:>
Adds the specified individual to this dataset.
<END_TASK>
<USER_TASK:>
Description:
def addIndividual(self, individual):
"""
Adds the specified individual to this dataset.
""" |
id_ = individual.getId()
self._individualIdMap[id_] = individual
self._individualIds.append(id_)
self._individualNameMap[individual.getName()] = individual |
<SYSTEM_TASK:>
Adds the specified continuousSet to this dataset.
<END_TASK>
<USER_TASK:>
Description:
def addContinuousSet(self, continuousSet):
"""
Adds the specified continuousSet to this dataset.
""" |
id_ = continuousSet.getId()
self._continuousSetIdMap[id_] = continuousSet
self._continuousSetIds.append(id_)
name = continuousSet.getLocalId()
self._continuousSetNameMap[name] = continuousSet |
<SYSTEM_TASK:>
Adds the specified readGroupSet to this dataset.
<END_TASK>
<USER_TASK:>
Description:
def addReadGroupSet(self, readGroupSet):
"""
Adds the specified readGroupSet to this dataset.
""" |
id_ = readGroupSet.getId()
self._readGroupSetIdMap[id_] = readGroupSet
self._readGroupSetNameMap[readGroupSet.getLocalId()] = readGroupSet
self._readGroupSetIds.append(id_) |
<SYSTEM_TASK:>
Adds the specified rnaQuantification set to this dataset.
<END_TASK>
<USER_TASK:>
Description:
def addRnaQuantificationSet(self, rnaQuantSet):
"""
Adds the specified rnaQuantification set to this dataset.
""" |
id_ = rnaQuantSet.getId()
self._rnaQuantificationSetIdMap[id_] = rnaQuantSet
self._rnaQuantificationSetIds.append(id_)
name = rnaQuantSet.getLocalId()
self._rnaQuantificationSetNameMap[name] = rnaQuantSet |
<SYSTEM_TASK:>
Returns the VariantSet with the specified name, or raises a
<END_TASK>
<USER_TASK:>
Description:
def getVariantSet(self, id_):
"""
Returns the VariantSet with the specified name, or raises a
VariantSetNotFoundException otherwise.
""" |
if id_ not in self._variantSetIdMap:
raise exceptions.VariantSetNotFoundException(id_)
return self._variantSetIdMap[id_] |
<SYSTEM_TASK:>
Returns a VariantSet with the specified name, or raises a
<END_TASK>
<USER_TASK:>
Description:
def getVariantSetByName(self, name):
"""
Returns a VariantSet with the specified name, or raises a
VariantSetNameNotFoundException if it does not exist.
""" |
if name not in self._variantSetNameMap:
raise exceptions.VariantSetNameNotFoundException(name)
return self._variantSetNameMap[name] |
<SYSTEM_TASK:>
Adds the specified g2p association set to this backend.
<END_TASK>
<USER_TASK:>
Description:
def addPhenotypeAssociationSet(self, phenotypeAssociationSet):
"""
Adds the specified g2p association set to this backend.
""" |
id_ = phenotypeAssociationSet.getId()
self._phenotypeAssociationSetIdMap[id_] = phenotypeAssociationSet
self._phenotypeAssociationSetNameMap[
phenotypeAssociationSet.getLocalId()] = phenotypeAssociationSet
self._phenotypeAssociationSetIds.append(id_) |
<SYSTEM_TASK:>
Returns the FeatureSet with the specified id, or raises a
<END_TASK>
<USER_TASK:>
Description:
def getFeatureSet(self, id_):
"""
Returns the FeatureSet with the specified id, or raises a
FeatureSetNotFoundException otherwise.
""" |
if id_ not in self._featureSetIdMap:
raise exceptions.FeatureSetNotFoundException(id_)
return self._featureSetIdMap[id_] |
<SYSTEM_TASK:>
Returns the FeatureSet with the specified name, or raises
<END_TASK>
<USER_TASK:>
Description:
def getFeatureSetByName(self, name):
"""
Returns the FeatureSet with the specified name, or raises
an exception otherwise.
""" |
if name not in self._featureSetNameMap:
raise exceptions.FeatureSetNameNotFoundException(name)
return self._featureSetNameMap[name] |
<SYSTEM_TASK:>
Returns the ContinuousSet with the specified id, or raises a
<END_TASK>
<USER_TASK:>
Description:
def getContinuousSet(self, id_):
"""
Returns the ContinuousSet with the specified id, or raises a
ContinuousSetNotFoundException otherwise.
""" |
if id_ not in self._continuousSetIdMap:
raise exceptions.ContinuousSetNotFoundException(id_)
return self._continuousSetIdMap[id_] |
<SYSTEM_TASK:>
Returns the ContinuousSet with the specified name, or raises
<END_TASK>
<USER_TASK:>
Description:
def getContinuousSetByName(self, name):
"""
Returns the ContinuousSet with the specified name, or raises
an exception otherwise.
""" |
if name not in self._continuousSetNameMap:
raise exceptions.ContinuousSetNameNotFoundException(name)
return self._continuousSetNameMap[name] |
<SYSTEM_TASK:>
Returns a Biosample with the specified name, or raises a
<END_TASK>
<USER_TASK:>
Description:
def getBiosampleByName(self, name):
"""
Returns a Biosample with the specified name, or raises a
BiosampleNameNotFoundException if it does not exist.
""" |
if name not in self._biosampleNameMap:
raise exceptions.BiosampleNameNotFoundException(name)
return self._biosampleNameMap[name] |
<SYSTEM_TASK:>
Returns the Biosample with the specified id, or raises
<END_TASK>
<USER_TASK:>
Description:
def getBiosample(self, id_):
"""
Returns the Biosample with the specified id, or raises
a BiosampleNotFoundException otherwise.
""" |
if id_ not in self._biosampleIdMap:
raise exceptions.BiosampleNotFoundException(id_)
return self._biosampleIdMap[id_] |
<SYSTEM_TASK:>
Returns an individual with the specified name, or raises a
<END_TASK>
<USER_TASK:>
Description:
def getIndividualByName(self, name):
"""
Returns an individual with the specified name, or raises a
IndividualNameNotFoundException if it does not exist.
""" |
if name not in self._individualNameMap:
raise exceptions.IndividualNameNotFoundException(name)
return self._individualNameMap[name] |
<SYSTEM_TASK:>
Returns the Individual with the specified id, or raises
<END_TASK>
<USER_TASK:>
Description:
def getIndividual(self, id_):
"""
Returns the Individual with the specified id, or raises
a IndividualNotFoundException otherwise.
""" |
if id_ not in self._individualIdMap:
raise exceptions.IndividualNotFoundException(id_)
return self._individualIdMap[id_] |
<SYSTEM_TASK:>
Returns a ReadGroupSet with the specified name, or raises a
<END_TASK>
<USER_TASK:>
Description:
def getReadGroupSetByName(self, name):
"""
Returns a ReadGroupSet with the specified name, or raises a
ReadGroupSetNameNotFoundException if it does not exist.
""" |
if name not in self._readGroupSetNameMap:
raise exceptions.ReadGroupSetNameNotFoundException(name)
return self._readGroupSetNameMap[name] |
<SYSTEM_TASK:>
Returns the ReadGroupSet with the specified name, or raises
<END_TASK>
<USER_TASK:>
Description:
def getReadGroupSet(self, id_):
"""
Returns the ReadGroupSet with the specified name, or raises
a ReadGroupSetNotFoundException otherwise.
""" |
if id_ not in self._readGroupSetIdMap:
raise exceptions.ReadGroupNotFoundException(id_)
return self._readGroupSetIdMap[id_] |
<SYSTEM_TASK:>
Returns the RnaQuantification set with the specified name, or raises
<END_TASK>
<USER_TASK:>
Description:
def getRnaQuantificationSetByName(self, name):
"""
Returns the RnaQuantification set with the specified name, or raises
an exception otherwise.
""" |
if name not in self._rnaQuantificationSetNameMap:
raise exceptions.RnaQuantificationSetNameNotFoundException(name)
return self._rnaQuantificationSetNameMap[name] |
<SYSTEM_TASK:>
Returns the RnaQuantification set with the specified name, or raises
<END_TASK>
<USER_TASK:>
Description:
def getRnaQuantificationSet(self, id_):
"""
Returns the RnaQuantification set with the specified name, or raises
a RnaQuantificationSetNotFoundException otherwise.
""" |
if id_ not in self._rnaQuantificationSetIdMap:
raise exceptions.RnaQuantificationSetNotFoundException(id_)
return self._rnaQuantificationSetIdMap[id_] |
<SYSTEM_TASK:>
Adds the specified ReadGroup to this ReadGroupSet.
<END_TASK>
<USER_TASK:>
Description:
def addReadGroup(self, readGroup):
"""
Adds the specified ReadGroup to this ReadGroupSet.
""" |
id_ = readGroup.getId()
self._readGroupIdMap[id_] = readGroup
self._readGroupIds.append(id_) |
<SYSTEM_TASK:>
Returns the ReadGroup with the specified id if it exists in this
<END_TASK>
<USER_TASK:>
Description:
def getReadGroup(self, id_):
"""
Returns the ReadGroup with the specified id if it exists in this
ReadGroupSet, or raises a ReadGroupNotFoundException otherwise.
""" |
if id_ not in self._readGroupIdMap:
raise exceptions.ReadGroupNotFoundException(id_)
return self._readGroupIdMap[id_] |
<SYSTEM_TASK:>
Returns the GA4GH protocol representation of this ReadGroupSet.
<END_TASK>
<USER_TASK:>
Description:
def toProtocolElement(self):
"""
Returns the GA4GH protocol representation of this ReadGroupSet.
""" |
readGroupSet = protocol.ReadGroupSet()
readGroupSet.id = self.getId()
readGroupSet.read_groups.extend(
[readGroup.toProtocolElement()
for readGroup in self.getReadGroups()]
)
readGroupSet.name = self.getLocalId()
readGroupSet.dataset_id = self.getParentContainer().getId()
readGroupSet.stats.CopyFrom(self.getStats())
self.serializeAttributes(readGroupSet)
return readGroupSet |
<SYSTEM_TASK:>
Returns a string ID suitable for use in the specified GA
<END_TASK>
<USER_TASK:>
Description:
def getReadAlignmentId(self, gaAlignment):
"""
Returns a string ID suitable for use in the specified GA
ReadAlignment object in this ReadGroupSet.
""" |
compoundId = datamodel.ReadAlignmentCompoundId(
self.getCompoundId(), gaAlignment.fragment_name)
return str(compoundId) |
<SYSTEM_TASK:>
Returns the GA4GH protocol representation of this read group set's
<END_TASK>
<USER_TASK:>
Description:
def getStats(self):
"""
Returns the GA4GH protocol representation of this read group set's
ReadStats.
""" |
stats = protocol.ReadStats()
stats.aligned_read_count = self._numAlignedReads
stats.unaligned_read_count = self._numUnalignedReads
return stats |
<SYSTEM_TASK:>
Populates the instance variables of this ReadGroupSet from the
<END_TASK>
<USER_TASK:>
Description:
def populateFromRow(self, readGroupSetRecord):
"""
Populates the instance variables of this ReadGroupSet from the
specified database row.
""" |
self._dataUrl = readGroupSetRecord.dataurl
self._indexFile = readGroupSetRecord.indexfile
self._programs = []
for jsonDict in json.loads(readGroupSetRecord.programs):
program = protocol.fromJson(json.dumps(jsonDict),
protocol.Program)
self._programs.append(program)
stats = protocol.fromJson(readGroupSetRecord.stats, protocol.ReadStats)
self._numAlignedReads = stats.aligned_read_count
self._numUnalignedReads = stats.unaligned_read_count |
<SYSTEM_TASK:>
Populates the instance variables of this ReadGroupSet from the
<END_TASK>
<USER_TASK:>
Description:
def populateFromFile(self, dataUrl, indexFile=None):
"""
Populates the instance variables of this ReadGroupSet from the
specified dataUrl and indexFile. If indexFile is not specified
guess usual form.
""" |
self._dataUrl = dataUrl
self._indexFile = indexFile
if indexFile is None:
self._indexFile = dataUrl + ".bai"
samFile = self.getFileHandle(self._dataUrl)
self._setHeaderFields(samFile)
if 'RG' not in samFile.header or len(samFile.header['RG']) == 0:
readGroup = HtslibReadGroup(self, self.defaultReadGroupName)
self.addReadGroup(readGroup)
else:
for readGroupHeader in samFile.header['RG']:
readGroup = HtslibReadGroup(self, readGroupHeader['ID'])
readGroup.populateFromHeader(readGroupHeader)
self.addReadGroup(readGroup)
self._bamHeaderReferenceSetName = None
for referenceInfo in samFile.header['SQ']:
if 'AS' not in referenceInfo:
infoDict = parseMalformedBamHeader(referenceInfo)
else:
infoDict = referenceInfo
name = infoDict.get('AS', references.DEFAULT_REFERENCESET_NAME)
if self._bamHeaderReferenceSetName is None:
self._bamHeaderReferenceSetName = name
elif self._bamHeaderReferenceSetName != name:
raise exceptions.MultipleReferenceSetsInReadGroupSet(
self._dataUrl, name, self._bamFileReferenceName)
self._numAlignedReads = samFile.mapped
self._numUnalignedReads = samFile.unmapped |
<SYSTEM_TASK:>
Returns the GA4GH protocol representation of this ReadGroup.
<END_TASK>
<USER_TASK:>
Description:
def toProtocolElement(self):
"""
Returns the GA4GH protocol representation of this ReadGroup.
""" |
# TODO this is very incomplete, but we don't have the
# implementation to fill out the rest of the fields currently
readGroup = protocol.ReadGroup()
readGroup.id = self.getId()
readGroup.created = self._creationTime
readGroup.updated = self._updateTime
dataset = self.getParentContainer().getParentContainer()
readGroup.dataset_id = dataset.getId()
readGroup.name = self.getLocalId()
readGroup.predicted_insert_size = pb.int(self.getPredictedInsertSize())
referenceSet = self._parentContainer.getReferenceSet()
readGroup.sample_name = pb.string(self.getSampleName())
readGroup.biosample_id = pb.string(self.getBiosampleId())
if referenceSet is not None:
readGroup.reference_set_id = referenceSet.getId()
readGroup.stats.CopyFrom(self.getStats())
readGroup.programs.extend(self.getPrograms())
readGroup.description = pb.string(self.getDescription())
readGroup.experiment.CopyFrom(self.getExperiment())
self.serializeAttributes(readGroup)
return readGroup |
<SYSTEM_TASK:>
Returns the GA4GH protocol representation of this read group's
<END_TASK>
<USER_TASK:>
Description:
def getStats(self):
"""
Returns the GA4GH protocol representation of this read group's
ReadStats.
""" |
stats = protocol.ReadStats()
stats.aligned_read_count = self.getNumAlignedReads()
stats.unaligned_read_count = self.getNumUnalignedReads()
# TODO base_count requires iterating through all reads
return stats |
<SYSTEM_TASK:>
Returns the GA4GH protocol representation of this read group's
<END_TASK>
<USER_TASK:>
Description:
def getExperiment(self):
"""
Returns the GA4GH protocol representation of this read group's
Experiment.
""" |
experiment = protocol.Experiment()
experiment.id = self.getExperimentId()
experiment.instrument_model = pb.string(self.getInstrumentModel())
experiment.sequencing_center = pb.string(self.getSequencingCenter())
experiment.description = pb.string(self.getExperimentDescription())
experiment.library = pb.string(self.getLibrary())
experiment.platform_unit = pb.string(self.getPlatformUnit())
experiment.message_create_time = self._iso8601
experiment.message_update_time = self._iso8601
experiment.run_time = pb.string(self.getRunTime())
return experiment |
<SYSTEM_TASK:>
Populate the instance variables using the specified SAM header.
<END_TASK>
<USER_TASK:>
Description:
def populateFromHeader(self, readGroupHeader):
"""
Populate the instance variables using the specified SAM header.
""" |
self._sampleName = readGroupHeader.get('SM', None)
self._description = readGroupHeader.get('DS', None)
if 'PI' in readGroupHeader:
self._predictedInsertSize = int(readGroupHeader['PI'])
self._instrumentModel = readGroupHeader.get('PL', None)
self._sequencingCenter = readGroupHeader.get('CN', None)
self._experimentDescription = readGroupHeader.get('DS', None)
self._library = readGroupHeader.get('LB', None)
self._platformUnit = readGroupHeader.get('PU', None)
self._runTime = readGroupHeader.get('DT', None) |
<SYSTEM_TASK:>
Populate the instance variables using the specified DB row.
<END_TASK>
<USER_TASK:>
Description:
def populateFromRow(self, readGroupRecord):
"""
Populate the instance variables using the specified DB row.
""" |
self._sampleName = readGroupRecord.samplename
self._biosampleId = readGroupRecord.biosampleid
self._description = readGroupRecord.description
self._predictedInsertSize = readGroupRecord.predictedinsertsize
stats = protocol.fromJson(readGroupRecord.stats, protocol.ReadStats)
self._numAlignedReads = stats.aligned_read_count
self._numUnalignedReads = stats.unaligned_read_count
experiment = protocol.fromJson(
readGroupRecord.experiment, protocol.Experiment)
self._instrumentModel = experiment.instrument_model
self._sequencingCenter = experiment.sequencing_center
self._experimentDescription = experiment.description
self._library = experiment.library
self._platformUnit = experiment.platform_unit
self._runTime = experiment.run_time |
<SYSTEM_TASK:>
Returns the filename of the specified path without its extensions.
<END_TASK>
<USER_TASK:>
Description:
def getNameFromPath(filePath):
"""
Returns the filename of the specified path without its extensions.
This is usually how we derive the default name for a given object.
""" |
if len(filePath) == 0:
raise ValueError("Cannot have empty path for name")
fileName = os.path.split(os.path.normpath(filePath))[1]
# We need to handle things like .fa.gz, so we can't use
# os.path.splitext
ret = fileName.split(".")[0]
assert ret != ""
return ret |
<SYSTEM_TASK:>
Exits the repo manager with error status.
<END_TASK>
<USER_TASK:>
Description:
def repoExitError(message):
"""
Exits the repo manager with error status.
""" |
wrapper = textwrap.TextWrapper(
break_on_hyphens=False, break_long_words=False)
formatted = wrapper.fill("{}: error: {}".format(sys.argv[0], message))
sys.exit(formatted) |
<SYSTEM_TASK:>
Runs the specified function that updates the repo with the specified
<END_TASK>
<USER_TASK:>
Description:
def _updateRepo(self, func, *args, **kwargs):
"""
Runs the specified function that updates the repo with the specified
arguments. This method ensures that all updates are transactional,
so that if any part of the update fails no changes are made to the
repo.
""" |
# TODO how do we make this properly transactional?
self._repo.open(datarepo.MODE_WRITE)
try:
func(*args, **kwargs)
self._repo.commit()
finally:
self._repo.close() |
<SYSTEM_TASK:>
Adds a new Ontology to this repo.
<END_TASK>
<USER_TASK:>
Description:
def addOntology(self):
"""
Adds a new Ontology to this repo.
""" |
self._openRepo()
name = self._args.name
filePath = self._getFilePath(self._args.filePath,
self._args.relativePath)
if name is None:
name = getNameFromPath(filePath)
ontology = ontologies.Ontology(name)
ontology.populateFromFile(filePath)
self._updateRepo(self._repo.insertOntology, ontology) |
<SYSTEM_TASK:>
Adds a new dataset into this repo.
<END_TASK>
<USER_TASK:>
Description:
def addDataset(self):
"""
Adds a new dataset into this repo.
""" |
self._openRepo()
dataset = datasets.Dataset(self._args.datasetName)
dataset.setDescription(self._args.description)
dataset.setAttributes(json.loads(self._args.attributes))
self._updateRepo(self._repo.insertDataset, dataset) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.