text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Fetch feature by featureID.
<END_TASK>
<USER_TASK:>
Description:
def getFeatureById(self, featureId):
"""
Fetch feature by featureID.
:param featureId: the FeatureID as found in GFF3 records
:return: dictionary representing a feature object,
or None if no match is found.
""" |
sql = "SELECT * FROM FEATURE WHERE id = ?"
query = self._dbconn.execute(sql, (featureId,))
ret = query.fetchone()
if ret is None:
return None
return sqlite_backend.sqliteRowToDict(ret) |
<SYSTEM_TASK:>
Returns the representation of this FeatureSet as the corresponding
<END_TASK>
<USER_TASK:>
Description:
def toProtocolElement(self):
"""
Returns the representation of this FeatureSet as the corresponding
ProtocolElement.
""" |
gaFeatureSet = protocol.FeatureSet()
gaFeatureSet.id = self.getId()
gaFeatureSet.dataset_id = self.getParentContainer().getId()
gaFeatureSet.reference_set_id = pb.string(self._referenceSet.getId())
gaFeatureSet.name = self._name
gaFeatureSet.source_uri = self._sourceUri
attributes = self.getAttributes()
for key in attributes:
gaFeatureSet.attributes.attr[key] \
.values.extend(protocol.encodeValue(attributes[key]))
return gaFeatureSet |
<SYSTEM_TASK:>
Returns server-style compound ID for an internal featureId.
<END_TASK>
<USER_TASK:>
Description:
def getCompoundIdForFeatureId(self, featureId):
"""
Returns server-style compound ID for an internal featureId.
:param long featureId: id of feature in database
:return: string representing ID for the specified GA4GH protocol
Feature object in this FeatureSet.
""" |
if featureId is not None and featureId != "":
compoundId = datamodel.FeatureCompoundId(
self.getCompoundId(), str(featureId))
else:
compoundId = ""
return str(compoundId) |
<SYSTEM_TASK:>
Fetches a simulated feature by ID.
<END_TASK>
<USER_TASK:>
Description:
def getFeature(self, compoundId):
"""
Fetches a simulated feature by ID.
:param compoundId: any non-null string
:return: A simulated feature with id set to the same value as the
passed-in compoundId.
":raises: exceptions.ObjectWithIdNotFoundException if None is passed
in for the compoundId.
""" |
if compoundId is None:
raise exceptions.ObjectWithIdNotFoundException(compoundId)
randomNumberGenerator = random.Random()
randomNumberGenerator.seed(self._randomSeed)
feature = self._generateSimulatedFeature(randomNumberGenerator)
feature.id = str(compoundId)
feature.parent_id = "" # TODO: Test with nonempty parentIDs?
return feature |
<SYSTEM_TASK:>
Returns a set number of simulated features.
<END_TASK>
<USER_TASK:>
Description:
def getFeatures(self, referenceName=None, start=None, end=None,
startIndex=None, maxResults=None,
featureTypes=None, parentId=None,
name=None, geneSymbol=None, numFeatures=10):
"""
Returns a set number of simulated features.
:param referenceName: name of reference to "search" on
:param start: start coordinate of query
:param end: end coordinate of query
:param startIndex: None or int
:param maxResults: None or int
:param featureTypes: optional list of ontology terms to limit query
:param parentId: optional parentId to limit query.
:param name: the name of the feature
:param geneSymbol: the symbol for the gene the features are on
:param numFeatures: number of features to generate in the return.
10 is a reasonable (if arbitrary) default.
:return: Yields feature list
""" |
randomNumberGenerator = random.Random()
randomNumberGenerator.seed(self._randomSeed)
for featureId in range(numFeatures):
gaFeature = self._generateSimulatedFeature(randomNumberGenerator)
gaFeature.id = self.getCompoundIdForFeatureId(featureId)
match = (
gaFeature.start < end and
gaFeature.end > start and
gaFeature.reference_name == referenceName and (
featureTypes is None or len(featureTypes) == 0 or
gaFeature.feature_type in featureTypes))
if match:
gaFeature.parent_id = "" # TODO: Test nonempty parentIDs?
yield gaFeature |
<SYSTEM_TASK:>
Populates the instance variables of this RnaQuantificationSet from the
<END_TASK>
<USER_TASK:>
Description:
def populateFromFile(self, dataUrl):
"""
Populates the instance variables of this RnaQuantificationSet from the
specified data URL.
""" |
self._dbFilePath = dataUrl
self._db = SqliteRnaBackend(self._dbFilePath)
self.addRnaQuants() |
<SYSTEM_TASK:>
Populates the instance variables of this RnaQuantificationSet from the
<END_TASK>
<USER_TASK:>
Description:
def populateFromRow(self, quantificationSetRecord):
"""
Populates the instance variables of this RnaQuantificationSet from the
specified DB row.
""" |
self._dbFilePath = quantificationSetRecord.dataurl
self.setAttributesJson(quantificationSetRecord.attributes)
self._db = SqliteRnaBackend(self._dbFilePath)
self.addRnaQuants() |
<SYSTEM_TASK:>
Returns the list of ExpressionLevels in this RNA Quantification.
<END_TASK>
<USER_TASK:>
Description:
def getExpressionLevels(
self, threshold=0.0, names=[], startIndex=0, maxResults=0):
"""
Returns the list of ExpressionLevels in this RNA Quantification.
""" |
rnaQuantificationId = self.getLocalId()
with self._db as dataSource:
expressionsReturned = dataSource.searchExpressionLevelsInDb(
rnaQuantificationId,
names=names,
threshold=threshold,
startIndex=startIndex,
maxResults=maxResults)
expressionLevels = [
SqliteExpressionLevel(self, expressionEntry) for
expressionEntry in expressionsReturned]
return expressionLevels |
<SYSTEM_TASK:>
Populates this CallSet from the specified DB row.
<END_TASK>
<USER_TASK:>
Description:
def populateFromRow(self, callSetRecord):
"""
Populates this CallSet from the specified DB row.
""" |
self._biosampleId = callSetRecord.biosampleid
self.setAttributesJson(callSetRecord.attributes) |
<SYSTEM_TASK:>
Returns the representation of this CallSet as the corresponding
<END_TASK>
<USER_TASK:>
Description:
def toProtocolElement(self):
"""
Returns the representation of this CallSet as the corresponding
ProtocolElement.
""" |
variantSet = self.getParentContainer()
gaCallSet = protocol.CallSet(
biosample_id=self.getBiosampleId())
if variantSet.getCreationTime():
gaCallSet.created = variantSet.getCreationTime()
if variantSet.getUpdatedTime():
gaCallSet.updated = variantSet.getUpdatedTime()
gaCallSet.id = self.getId()
gaCallSet.name = self.getLocalId()
gaCallSet.variant_set_ids.append(variantSet.getId())
self.serializeAttributes(gaCallSet)
return gaCallSet |
<SYSTEM_TASK:>
Returns the AnnotationSet in this dataset with the specified 'id'
<END_TASK>
<USER_TASK:>
Description:
def getVariantAnnotationSet(self, id_):
"""
Returns the AnnotationSet in this dataset with the specified 'id'
""" |
if id_ not in self._variantAnnotationSetIdMap:
raise exceptions.AnnotationSetNotFoundException(id_)
return self._variantAnnotationSetIdMap[id_] |
<SYSTEM_TASK:>
Adds the specfied CallSet to this VariantSet.
<END_TASK>
<USER_TASK:>
Description:
def addCallSet(self, callSet):
"""
Adds the specfied CallSet to this VariantSet.
""" |
callSetId = callSet.getId()
self._callSetIdMap[callSetId] = callSet
self._callSetNameMap[callSet.getLocalId()] = callSet
self._callSetIds.append(callSetId)
self._callSetIdToIndex[callSet.getId()] = len(self._callSetIds) - 1 |
<SYSTEM_TASK:>
Adds a CallSet for the specified sample name.
<END_TASK>
<USER_TASK:>
Description:
def addCallSetFromName(self, sampleName):
"""
Adds a CallSet for the specified sample name.
""" |
callSet = CallSet(self, sampleName)
self.addCallSet(callSet) |
<SYSTEM_TASK:>
Returns a CallSet with the specified name, or raises a
<END_TASK>
<USER_TASK:>
Description:
def getCallSetByName(self, name):
"""
Returns a CallSet with the specified name, or raises a
CallSetNameNotFoundException if it does not exist.
""" |
if name not in self._callSetNameMap:
raise exceptions.CallSetNameNotFoundException(name)
return self._callSetNameMap[name] |
<SYSTEM_TASK:>
Returns a CallSet with the specified id, or raises a
<END_TASK>
<USER_TASK:>
Description:
def getCallSet(self, id_):
"""
Returns a CallSet with the specified id, or raises a
CallSetNotFoundException if it does not exist.
""" |
if id_ not in self._callSetIdMap:
raise exceptions.CallSetNotFoundException(id_)
return self._callSetIdMap[id_] |
<SYSTEM_TASK:>
Converts this VariantSet into its GA4GH protocol equivalent.
<END_TASK>
<USER_TASK:>
Description:
def toProtocolElement(self):
"""
Converts this VariantSet into its GA4GH protocol equivalent.
""" |
protocolElement = protocol.VariantSet()
protocolElement.id = self.getId()
protocolElement.dataset_id = self.getParentContainer().getId()
protocolElement.reference_set_id = self._referenceSet.getId()
protocolElement.metadata.extend(self.getMetadata())
protocolElement.dataset_id = self.getParentContainer().getId()
protocolElement.reference_set_id = self._referenceSet.getId()
protocolElement.name = self.getLocalId()
self.serializeAttributes(protocolElement)
return protocolElement |
<SYSTEM_TASK:>
Convenience method to set the common fields in a GA Variant
<END_TASK>
<USER_TASK:>
Description:
def _createGaVariant(self):
"""
Convenience method to set the common fields in a GA Variant
object from this variant set.
""" |
ret = protocol.Variant()
if self._creationTime:
ret.created = self._creationTime
if self._updatedTime:
ret.updated = self._updatedTime
ret.variant_set_id = self.getId()
return ret |
<SYSTEM_TASK:>
Returns an ID string suitable for the specified GA Variant
<END_TASK>
<USER_TASK:>
Description:
def getVariantId(self, gaVariant):
"""
Returns an ID string suitable for the specified GA Variant
object in this variant set.
""" |
md5 = self.hashVariant(gaVariant)
compoundId = datamodel.VariantCompoundId(
self.getCompoundId(), gaVariant.reference_name,
str(gaVariant.start), md5)
return str(compoundId) |
<SYSTEM_TASK:>
Returns the callSetId for the specified sampleName in this
<END_TASK>
<USER_TASK:>
Description:
def getCallSetId(self, sampleName):
"""
Returns the callSetId for the specified sampleName in this
VariantSet.
""" |
compoundId = datamodel.CallSetCompoundId(
self.getCompoundId(), sampleName)
return str(compoundId) |
<SYSTEM_TASK:>
Produces an MD5 hash of the ga variant object to distinguish
<END_TASK>
<USER_TASK:>
Description:
def hashVariant(cls, gaVariant):
"""
Produces an MD5 hash of the ga variant object to distinguish
it from other variants at the same genomic coordinate.
""" |
hash_str = gaVariant.reference_bases + \
str(tuple(gaVariant.alternate_bases))
return hashlib.md5(hash_str).hexdigest() |
<SYSTEM_TASK:>
Generate a random variant for the specified position using the
<END_TASK>
<USER_TASK:>
Description:
def generateVariant(self, referenceName, position, randomNumberGenerator):
"""
Generate a random variant for the specified position using the
specified random number generator. This generator should be seeded
with a value that is unique to this position so that the same variant
will always be produced regardless of the order it is generated in.
""" |
variant = self._createGaVariant()
variant.reference_name = referenceName
variant.start = position
variant.end = position + 1 # SNPs only for now
bases = ["A", "C", "G", "T"]
ref = randomNumberGenerator.choice(bases)
variant.reference_bases = ref
alt = randomNumberGenerator.choice(
[base for base in bases if base != ref])
variant.alternate_bases.append(alt)
randChoice = randomNumberGenerator.randint(0, 2)
if randChoice == 0:
variant.filters_applied = False
elif randChoice == 1:
variant.filters_applied = True
variant.filters_passed = True
else:
variant.filters_applied = True
variant.filters_passed = False
variant.filters_failed.append('q10')
for callSet in self.getCallSets():
call = variant.calls.add()
call.call_set_id = callSet.getId()
# for now, the genotype is either [0,1], [1,1] or [1,0] with equal
# probability; probably will want to do something more
# sophisticated later.
randomChoice = randomNumberGenerator.choice(
[[0, 1], [1, 0], [1, 1]])
call.genotype.extend(randomChoice)
# TODO What is a reasonable model for generating these likelihoods?
# Are these log-scaled? Spec does not say.
call.genotype_likelihood.extend([-100, -100, -100])
variant.id = self.getVariantId(variant)
return variant |
<SYSTEM_TASK:>
Populates this VariantSet from the specified DB row.
<END_TASK>
<USER_TASK:>
Description:
def populateFromRow(self, variantSetRecord):
"""
Populates this VariantSet from the specified DB row.
""" |
self._created = variantSetRecord.created
self._updated = variantSetRecord.updated
self.setAttributesJson(variantSetRecord.attributes)
self._chromFileMap = {}
# We can't load directly as we want tuples to be stored
# rather than lists.
for key, value in json.loads(variantSetRecord.dataurlindexmap).items():
self._chromFileMap[key] = tuple(value)
self._metadata = []
for jsonDict in json.loads(variantSetRecord.metadata):
metadata = protocol.fromJson(json.dumps(jsonDict),
protocol.VariantSetMetadata)
self._metadata.append(metadata) |
<SYSTEM_TASK:>
Populates this variant set using the specified lists of data
<END_TASK>
<USER_TASK:>
Description:
def populateFromFile(self, dataUrls, indexFiles):
"""
Populates this variant set using the specified lists of data
files and indexes. These must be in the same order, such that
the jth index file corresponds to the jth data file.
""" |
assert len(dataUrls) == len(indexFiles)
for dataUrl, indexFile in zip(dataUrls, indexFiles):
varFile = pysam.VariantFile(dataUrl, index_filename=indexFile)
try:
self._populateFromVariantFile(varFile, dataUrl, indexFile)
finally:
varFile.close() |
<SYSTEM_TASK:>
Populates this VariantSet by examing all the VCF files in the
<END_TASK>
<USER_TASK:>
Description:
def populateFromDirectory(self, vcfDirectory):
"""
Populates this VariantSet by examing all the VCF files in the
specified directory. This is mainly used for as a convenience
for testing purposes.
""" |
pattern = os.path.join(vcfDirectory, "*.vcf.gz")
dataFiles = []
indexFiles = []
for vcfFile in glob.glob(pattern):
dataFiles.append(vcfFile)
indexFiles.append(vcfFile + ".tbi")
self.populateFromFile(dataFiles, indexFiles) |
<SYSTEM_TASK:>
Perform consistency check on the variant set
<END_TASK>
<USER_TASK:>
Description:
def checkConsistency(self):
"""
Perform consistency check on the variant set
""" |
for referenceName, (dataUrl, indexFile) in self._chromFileMap.items():
varFile = pysam.VariantFile(dataUrl, index_filename=indexFile)
try:
for chrom in varFile.index:
chrom, _, _ = self.sanitizeVariantFileFetch(chrom)
if not isEmptyIter(varFile.fetch(chrom)):
self._checkMetadata(varFile)
self._checkCallSetIds(varFile)
finally:
varFile.close() |
<SYSTEM_TASK:>
Populates the instance variables of this VariantSet from the specified
<END_TASK>
<USER_TASK:>
Description:
def _populateFromVariantFile(self, varFile, dataUrl, indexFile):
"""
Populates the instance variables of this VariantSet from the specified
pysam VariantFile object.
""" |
if varFile.index is None:
raise exceptions.NotIndexedException(dataUrl)
for chrom in varFile.index:
# Unlike Tabix indices, CSI indices include all contigs defined
# in the BCF header. Thus we must test each one to see if
# records exist or else they are likely to trigger spurious
# overlapping errors.
chrom, _, _ = self.sanitizeVariantFileFetch(chrom)
if not isEmptyIter(varFile.fetch(chrom)):
if chrom in self._chromFileMap:
raise exceptions.OverlappingVcfException(dataUrl, chrom)
self._chromFileMap[chrom] = dataUrl, indexFile
self._updateMetadata(varFile)
self._updateCallSetIds(varFile)
self._updateVariantAnnotationSets(varFile, dataUrl) |
<SYSTEM_TASK:>
Updates the variant annotation set associated with this variant using
<END_TASK>
<USER_TASK:>
Description:
def _updateVariantAnnotationSets(self, variantFile, dataUrl):
"""
Updates the variant annotation set associated with this variant using
information in the specified pysam variantFile.
""" |
# TODO check the consistency of this between VCF files.
if not self.isAnnotated():
annotationType = None
for record in variantFile.header.records:
if record.type == "GENERIC":
if record.key == "SnpEffVersion":
annotationType = ANNOTATIONS_SNPEFF
elif record.key == "VEP":
version = record.value.split()[0]
# TODO we need _much_ more sophisticated processing
# of VEP versions here. When do they become
# incompatible?
if version == "v82":
annotationType = ANNOTATIONS_VEP_V82
elif version == "v77":
annotationType = ANNOTATIONS_VEP_V77
else:
# TODO raise a proper typed exception there with
# the file name as an argument.
raise ValueError(
"Unsupported VEP version {} in '{}'".format(
version, dataUrl))
if annotationType is None:
infoKeys = variantFile.header.info.keys()
if 'CSQ' in infoKeys or 'ANN' in infoKeys:
# TODO likewise, we want a properly typed exception that
# we can throw back to the repo manager UI and display
# as an import error.
raise ValueError(
"Unsupported annotations in '{}'".format(dataUrl))
if annotationType is not None:
vas = HtslibVariantAnnotationSet(self, self.getLocalId())
vas.populateFromFile(variantFile, annotationType)
self.addVariantAnnotationSet(vas) |
<SYSTEM_TASK:>
Updates the metadata for his variant set based on the specified
<END_TASK>
<USER_TASK:>
Description:
def _updateMetadata(self, variantFile):
"""
Updates the metadata for his variant set based on the specified
variant file
""" |
metadata = self._getMetadataFromVcf(variantFile)
if self._metadata is None:
self._metadata = metadata |
<SYSTEM_TASK:>
Checks that metadata is consistent
<END_TASK>
<USER_TASK:>
Description:
def _checkMetadata(self, variantFile):
"""
Checks that metadata is consistent
""" |
metadata = self._getMetadataFromVcf(variantFile)
if self._metadata is not None and self._metadata != metadata:
raise exceptions.InconsistentMetaDataException(
variantFile.filename) |
<SYSTEM_TASK:>
Checks callSetIds for consistency
<END_TASK>
<USER_TASK:>
Description:
def _checkCallSetIds(self, variantFile):
"""
Checks callSetIds for consistency
""" |
if len(self._callSetIdMap) > 0:
callSetIds = set([
self.getCallSetId(sample)
for sample in variantFile.header.samples])
if callSetIds != set(self._callSetIdMap.keys()):
raise exceptions.InconsistentCallSetIdException(
variantFile.filename) |
<SYSTEM_TASK:>
Converts the specified pysam variant record into a GA4GH Variant
<END_TASK>
<USER_TASK:>
Description:
def convertVariant(self, record, callSetIds):
"""
Converts the specified pysam variant record into a GA4GH Variant
object. Only calls for the specified list of callSetIds will
be included.
""" |
variant = self._createGaVariant()
variant.reference_name = record.contig
if record.id is not None:
variant.names.extend(record.id.split(';'))
variant.start = record.start # 0-based inclusive
variant.end = record.stop # 0-based exclusive
variant.reference_bases = record.ref
if record.alts is not None:
variant.alternate_bases.extend(list(record.alts))
filterKeys = record.filter.keys()
if len(filterKeys) == 0:
variant.filters_applied = False
else:
variant.filters_applied = True
if len(filterKeys) == 1 and filterKeys[0] == 'PASS':
variant.filters_passed = True
else:
variant.filters_passed = False
variant.filters_failed.extend(filterKeys)
# record.qual is also available, when supported by GAVariant.
for key, value in record.info.iteritems():
if value is None:
continue
if key == 'SVTYPE':
variant.variant_type = value
elif key == 'SVLEN':
variant.svlen = int(value[0])
elif key == 'CIPOS':
variant.cipos.extend(value)
elif key == 'CIEND':
variant.ciend.extend(value)
elif isinstance(value, str):
value = value.split(',')
protocol.setAttribute(
variant.attributes.attr[key].values, value)
for callSetId in callSetIds:
callSet = self.getCallSet(callSetId)
pysamCall = record.samples[str(callSet.getSampleName())]
variant.calls.add().CopyFrom(
self._convertGaCall(callSet, pysamCall))
variant.id = self.getVariantId(variant)
return variant |
<SYSTEM_TASK:>
Returns an iterator over the pysam VCF records corresponding to the
<END_TASK>
<USER_TASK:>
Description:
def getPysamVariants(self, referenceName, startPosition, endPosition):
"""
Returns an iterator over the pysam VCF records corresponding to the
specified query.
""" |
if referenceName in self._chromFileMap:
varFileName = self._chromFileMap[referenceName]
referenceName, startPosition, endPosition = \
self.sanitizeVariantFileFetch(
referenceName, startPosition, endPosition)
cursor = self.getFileHandle(varFileName).fetch(
referenceName, startPosition, endPosition)
for record in cursor:
yield record |
<SYSTEM_TASK:>
Returns an iterator over the specified variants. The parameters
<END_TASK>
<USER_TASK:>
Description:
def getVariants(self, referenceName, startPosition, endPosition,
callSetIds=[]):
"""
Returns an iterator over the specified variants. The parameters
correspond to the attributes of a GASearchVariantsRequest object.
""" |
if callSetIds is None:
callSetIds = self._callSetIds
else:
for callSetId in callSetIds:
if callSetId not in self._callSetIds:
raise exceptions.CallSetNotInVariantSetException(
callSetId, self.getId())
for record in self.getPysamVariants(
referenceName, startPosition, endPosition):
yield self.convertVariant(record, callSetIds) |
<SYSTEM_TASK:>
Convenience method to set the common fields in a GA VariantAnnotation
<END_TASK>
<USER_TASK:>
Description:
def _createGaVariantAnnotation(self):
"""
Convenience method to set the common fields in a GA VariantAnnotation
object from this variant set.
""" |
ret = protocol.VariantAnnotation()
ret.created = self._creationTime
ret.variant_annotation_set_id = self.getId()
return ret |
<SYSTEM_TASK:>
Converts this VariantAnnotationSet into its GA4GH protocol equivalent.
<END_TASK>
<USER_TASK:>
Description:
def toProtocolElement(self):
"""
Converts this VariantAnnotationSet into its GA4GH protocol equivalent.
""" |
protocolElement = protocol.VariantAnnotationSet()
protocolElement.id = self.getId()
protocolElement.variant_set_id = self._variantSet.getId()
protocolElement.name = self.getLocalId()
protocolElement.analysis.CopyFrom(self.getAnalysis())
self.serializeAttributes(protocolElement)
return protocolElement |
<SYSTEM_TASK:>
Generate a random variant annotation based on a given variant.
<END_TASK>
<USER_TASK:>
Description:
def generateVariantAnnotation(self, variant):
"""
Generate a random variant annotation based on a given variant.
This generator should be seeded with a value that is unique to the
variant so that the same annotation will always be produced regardless
of the order it is generated in.
""" |
# To make this reproducible, make a seed based on this
# specific variant.
seed = self._randomSeed + variant.start + variant.end
randomNumberGenerator = random.Random()
randomNumberGenerator.seed(seed)
ann = protocol.VariantAnnotation()
ann.variant_annotation_set_id = str(self.getCompoundId())
ann.variant_id = variant.id
ann.created = datetime.datetime.now().isoformat() + "Z"
# make a transcript effect for each alternate base element
# multiplied by a random integer (1,5)
for base in variant.alternate_bases:
ann.transcript_effects.add().CopyFrom(
self.generateTranscriptEffect(
variant, ann, base, randomNumberGenerator))
ann.id = self.getVariantAnnotationId(variant, ann)
return ann |
<SYSTEM_TASK:>
Populates this VariantAnnotationSet from the specified DB row.
<END_TASK>
<USER_TASK:>
Description:
def populateFromRow(self, annotationSetRecord):
"""
Populates this VariantAnnotationSet from the specified DB row.
""" |
self._annotationType = annotationSetRecord.annotationtype
self._analysis = protocol.fromJson(
annotationSetRecord.analysis, protocol.Analysis)
self._creationTime = annotationSetRecord.created
self._updatedTime = annotationSetRecord.updated
self.setAttributesJson(annotationSetRecord.attributes) |
<SYSTEM_TASK:>
Assembles metadata within the VCF header into a GA4GH Analysis object.
<END_TASK>
<USER_TASK:>
Description:
def _getAnnotationAnalysis(self, varFile):
"""
Assembles metadata within the VCF header into a GA4GH Analysis object.
:return: protocol.Analysis
""" |
header = varFile.header
analysis = protocol.Analysis()
formats = header.formats.items()
infos = header.info.items()
filters = header.filters.items()
for prefix, content in [("FORMAT", formats), ("INFO", infos),
("FILTER", filters)]:
for contentKey, value in content:
key = "{0}.{1}".format(prefix, value.name)
if key not in analysis.attributes.attr:
analysis.attributes.attr[key].Clear()
if value.description is not None:
analysis.attributes.attr[
key].values.add().string_value = value.description
analysis.created = self._creationTime
analysis.updated = self._updatedTime
for r in header.records:
# Don't add a key to info if there's nothing in the value
if r.value is not None:
if r.key not in analysis.attributes.attr:
analysis.attributes.attr[r.key].Clear()
analysis.attributes.attr[r.key] \
.values.add().string_value = str(r.value)
if r.key == "created" or r.key == "fileDate":
# TODO handle more date formats
try:
if '-' in r.value:
fmtStr = "%Y-%m-%d"
else:
fmtStr = "%Y%m%d"
analysis.created = datetime.datetime.strptime(
r.value, fmtStr).isoformat() + "Z"
except ValueError:
# is there a logger we should tell?
# print("INFO: Could not parse variant annotation time")
pass # analysis.create_date_time remains datetime.now()
if r.key == "software":
analysis.software.append(r.value)
if r.key == "name":
analysis.name = r.value
if r.key == "description":
analysis.description = r.value
analysis.id = str(datamodel.VariantAnnotationSetAnalysisCompoundId(
self._compoundId, "analysis"))
return analysis |
<SYSTEM_TASK:>
Converts the specfied pysam variant record into a GA4GH variant
<END_TASK>
<USER_TASK:>
Description:
def convertVariantAnnotation(self, record):
"""
Converts the specfied pysam variant record into a GA4GH variant
annotation object using the specified function to convert the
transcripts.
""" |
variant = self._variantSet.convertVariant(record, [])
annotation = self._createGaVariantAnnotation()
annotation.variant_id = variant.id
gDots = record.info.get(b'HGVS.g')
# Convert annotations from INFO field into TranscriptEffect
transcriptEffects = []
annotations = record.info.get(b'ANN') or record.info.get(b'CSQ')
for i, ann in enumerate(annotations):
hgvsG = gDots[i % len(variant.alternate_bases)] if gDots else None
transcriptEffects.append(self.convertTranscriptEffect(ann, hgvsG))
annotation.transcript_effects.extend(transcriptEffects)
annotation.id = self.getVariantAnnotationId(variant, annotation)
return variant, annotation |
<SYSTEM_TASK:>
Return name=value for a single attribute
<END_TASK>
<USER_TASK:>
Description:
def _attributeStr(self, name):
"""
Return name=value for a single attribute
""" |
return "{}={}".format(
_encodeAttr(name),
",".join([_encodeAttr(v) for v in self.attributes[name]])) |
<SYSTEM_TASK:>
Return name=value, semi-colon-separated string for attributes,
<END_TASK>
<USER_TASK:>
Description:
def _attributeStrs(self):
"""
Return name=value, semi-colon-separated string for attributes,
including url-style quoting
""" |
return ";".join([self._attributeStr(name)
for name in self.attributes.iterkeys()]) |
<SYSTEM_TASK:>
ID attribute from GFF3 or None if record doesn't have it.
<END_TASK>
<USER_TASK:>
Description:
def featureName(self):
"""
ID attribute from GFF3 or None if record doesn't have it.
Called "Name" rather than "Id" within GA4GH, as there is
no guarantee of either uniqueness or existence.
""" |
featId = self.attributes.get("ID")
if featId is not None:
featId = featId[0]
return featId |
<SYSTEM_TASK:>
Link a feature with its parents.
<END_TASK>
<USER_TASK:>
Description:
def _linkFeature(self, feature):
"""
Link a feature with its parents.
""" |
parentNames = feature.attributes.get("Parent")
if parentNames is None:
self.roots.add(feature)
else:
for parentName in parentNames:
self._linkToParent(feature, parentName) |
<SYSTEM_TASK:>
Link a feature with its children
<END_TASK>
<USER_TASK:>
Description:
def _linkToParent(self, feature, parentName):
"""
Link a feature with its children
""" |
parentParts = self.byFeatureName.get(parentName)
if parentParts is None:
raise GFF3Exception(
"Parent feature does not exist: {}".format(parentName),
self.fileName)
# parent maybe disjoint
for parentPart in parentParts:
feature.parents.add(parentPart)
parentPart.children.add(feature) |
<SYSTEM_TASK:>
finish loading the set, constructing the tree
<END_TASK>
<USER_TASK:>
Description:
def linkChildFeaturesToParents(self):
"""
finish loading the set, constructing the tree
""" |
# features maybe disjoint
for featureParts in self.byFeatureName.itervalues():
for feature in featureParts:
self._linkFeature(feature) |
<SYSTEM_TASK:>
Writes a single record to a file provided by the filehandle fh.
<END_TASK>
<USER_TASK:>
Description:
def _writeRec(self, fh, rec):
"""
Writes a single record to a file provided by the filehandle fh.
""" |
fh.write(str(rec) + "\n")
for child in sorted(rec.children, key=self._recSortKey):
self._writeRec(fh, child) |
<SYSTEM_TASK:>
Write set to a GFF3 format file.
<END_TASK>
<USER_TASK:>
Description:
def write(self, fh):
"""
Write set to a GFF3 format file.
:param file fh: file handle for file to write to
""" |
fh.write(GFF3_HEADER+"\n")
for root in sorted(self.roots, key=self._recSortKey):
self._writeRec(fh, root) |
<SYSTEM_TASK:>
open input file, optionally with decompression
<END_TASK>
<USER_TASK:>
Description:
def _open(self):
"""
open input file, optionally with decompression
""" |
if self.fileName.endswith(".gz"):
return gzip.open(self.fileName)
elif self.fileName.endswith(".bz2"):
return bz2.BZ2File(self.fileName)
else:
return open(self.fileName) |
<SYSTEM_TASK:>
Parse the attributes and values
<END_TASK>
<USER_TASK:>
Description:
def _parseAttrs(self, attrsStr):
"""
Parse the attributes and values
""" |
attributes = dict()
for attrStr in self.SPLIT_ATTR_COL_RE.split(attrsStr):
name, vals = self._parseAttrVal(attrStr)
if name in attributes:
raise GFF3Exception(
"duplicated attribute name: {}".format(name),
self.fileName, self.lineNumber)
attributes[name] = vals
return attributes |
<SYSTEM_TASK:>
Run the parse and return the resulting Gff3Set object.
<END_TASK>
<USER_TASK:>
Description:
def parse(self):
"""
Run the parse and return the resulting Gff3Set object.
""" |
fh = self._open()
try:
gff3Set = Gff3Set(self.fileName)
for line in fh:
self.lineNumber += 1
self._parseLine(gff3Set, line[0:-1])
finally:
fh.close()
gff3Set.linkChildFeaturesToParents()
return gff3Set |
<SYSTEM_TASK:>
Adds the specified dataset to this data repository.
<END_TASK>
<USER_TASK:>
Description:
def addDataset(self, dataset):
"""
Adds the specified dataset to this data repository.
""" |
id_ = dataset.getId()
self._datasetIdMap[id_] = dataset
self._datasetNameMap[dataset.getLocalId()] = dataset
self._datasetIds.append(id_) |
<SYSTEM_TASK:>
Adds the specified reference set to this data repository.
<END_TASK>
<USER_TASK:>
Description:
def addReferenceSet(self, referenceSet):
"""
Adds the specified reference set to this data repository.
""" |
id_ = referenceSet.getId()
self._referenceSetIdMap[id_] = referenceSet
self._referenceSetNameMap[referenceSet.getLocalId()] = referenceSet
self._referenceSetIds.append(id_) |
<SYSTEM_TASK:>
Add an ontology map to this data repository.
<END_TASK>
<USER_TASK:>
Description:
def addOntology(self, ontology):
"""
Add an ontology map to this data repository.
""" |
self._ontologyNameMap[ontology.getName()] = ontology
self._ontologyIdMap[ontology.getId()] = ontology
self._ontologyIds.append(ontology.getId()) |
<SYSTEM_TASK:>
Select the first peer in the datarepo with the given url simulating
<END_TASK>
<USER_TASK:>
Description:
def getPeer(self, url):
"""
Select the first peer in the datarepo with the given url simulating
the behavior of selecting by URL. This is only used during testing.
""" |
peers = filter(lambda x: x.getUrl() == url, self.getPeers())
if len(peers) == 0:
raise exceptions.PeerNotFoundException(url)
return peers[0] |
<SYSTEM_TASK:>
Returns a dataset with the specified ID, or raises a
<END_TASK>
<USER_TASK:>
Description:
def getDataset(self, id_):
"""
Returns a dataset with the specified ID, or raises a
DatasetNotFoundException if it does not exist.
""" |
if id_ not in self._datasetIdMap:
raise exceptions.DatasetNotFoundException(id_)
return self._datasetIdMap[id_] |
<SYSTEM_TASK:>
Returns the dataset with the specified name.
<END_TASK>
<USER_TASK:>
Description:
def getDatasetByName(self, name):
"""
Returns the dataset with the specified name.
""" |
if name not in self._datasetNameMap:
raise exceptions.DatasetNameNotFoundException(name)
return self._datasetNameMap[name] |
<SYSTEM_TASK:>
Returns the ontology with the specified ID.
<END_TASK>
<USER_TASK:>
Description:
def getOntology(self, id_):
"""
Returns the ontology with the specified ID.
""" |
if id_ not in self._ontologyIdMap:
raise exceptions.OntologyNotFoundException(id_)
return self._ontologyIdMap[id_] |
<SYSTEM_TASK:>
Returns an ontology by name
<END_TASK>
<USER_TASK:>
Description:
def getOntologyByName(self, name):
"""
Returns an ontology by name
""" |
if name not in self._ontologyNameMap:
raise exceptions.OntologyNameNotFoundException(name)
return self._ontologyNameMap[name] |
<SYSTEM_TASK:>
Retuns the ReferenceSet with the specified ID, or raises a
<END_TASK>
<USER_TASK:>
Description:
def getReferenceSet(self, id_):
"""
Retuns the ReferenceSet with the specified ID, or raises a
ReferenceSetNotFoundException if it does not exist.
""" |
if id_ not in self._referenceSetIdMap:
raise exceptions.ReferenceSetNotFoundException(id_)
return self._referenceSetIdMap[id_] |
<SYSTEM_TASK:>
Returns the reference set with the specified name.
<END_TASK>
<USER_TASK:>
Description:
def getReferenceSetByName(self, name):
"""
Returns the reference set with the specified name.
""" |
if name not in self._referenceSetNameMap:
raise exceptions.ReferenceSetNameNotFoundException(name)
return self._referenceSetNameMap[name] |
<SYSTEM_TASK:>
Return an iterator over all read groups in the data repo
<END_TASK>
<USER_TASK:>
Description:
def allReadGroups(self):
"""
Return an iterator over all read groups in the data repo
""" |
for dataset in self.getDatasets():
for readGroupSet in dataset.getReadGroupSets():
for readGroup in readGroupSet.getReadGroups():
yield readGroup |
<SYSTEM_TASK:>
Return an iterator over all features in the data repo
<END_TASK>
<USER_TASK:>
Description:
def allFeatures(self):
"""
Return an iterator over all features in the data repo
""" |
for dataset in self.getDatasets():
for featureSet in dataset.getFeatureSets():
for feature in featureSet.getFeatures():
yield feature |
<SYSTEM_TASK:>
Return an iterator over all call sets in the data repo
<END_TASK>
<USER_TASK:>
Description:
def allCallSets(self):
"""
Return an iterator over all call sets in the data repo
""" |
for dataset in self.getDatasets():
for variantSet in dataset.getVariantSets():
for callSet in variantSet.getCallSets():
yield callSet |
<SYSTEM_TASK:>
Return an iterator over all expression levels
<END_TASK>
<USER_TASK:>
Description:
def allExpressionLevels(self):
"""
Return an iterator over all expression levels
""" |
for dataset in self.getDatasets():
for rnaQuantificationSet in dataset.getRnaQuantificationSets():
for rnaQuantification in \
rnaQuantificationSet.getRnaQuantifications():
for expressionLevel in \
rnaQuantification.getExpressionLevels():
yield expressionLevel |
<SYSTEM_TASK:>
Finds a peer by URL and return the first peer record with that URL.
<END_TASK>
<USER_TASK:>
Description:
def getPeer(self, url):
"""
Finds a peer by URL and return the first peer record with that URL.
""" |
peers = list(models.Peer.select().where(models.Peer.url == url))
if len(peers) == 0:
raise exceptions.PeerNotFoundException(url)
return peers[0] |
<SYSTEM_TASK:>
Get the list of peers using an SQL offset and limit. Returns a list
<END_TASK>
<USER_TASK:>
Description:
def getPeers(self, offset=0, limit=1000):
"""
Get the list of peers using an SQL offset and limit. Returns a list
of peer datamodel objects in a list.
""" |
select = models.Peer.select().order_by(
models.Peer.url).limit(limit).offset(offset)
return [peers.Peer(p.url, record=p) for p in select] |
<SYSTEM_TASK:>
Takes a model class and attempts to create a table in TSV format
<END_TASK>
<USER_TASK:>
Description:
def tableToTsv(self, model):
"""
Takes a model class and attempts to create a table in TSV format
that can be imported into a spreadsheet program.
""" |
first = True
for item in model.select():
if first:
header = "".join(
["{}\t".format(x) for x in model._meta.fields.keys()])
print(header)
first = False
row = "".join(
["{}\t".format(
getattr(item, key)) for key in model._meta.fields.keys()])
print(row) |
<SYSTEM_TASK:>
Adds an announcement to the registry for later analysis.
<END_TASK>
<USER_TASK:>
Description:
def insertAnnouncement(self, announcement):
"""
Adds an announcement to the registry for later analysis.
""" |
url = announcement.get('url', None)
try:
peers.Peer(url)
except:
raise exceptions.BadUrlException(url)
try:
# TODO get more details about the user agent
models.Announcement.create(
url=announcement.get('url'),
attributes=json.dumps(announcement.get('attributes', {})),
remote_addr=announcement.get('remote_addr', None),
user_agent=announcement.get('user_agent', None))
except Exception as e:
raise exceptions.RepoManagerException(e) |
<SYSTEM_TASK:>
Opens this repo in the specified mode.
<END_TASK>
<USER_TASK:>
Description:
def open(self, mode=MODE_READ):
"""
Opens this repo in the specified mode.
TODO: figure out the correct semantics of this and document
the intended future behaviour as well as the current
transitional behaviour.
""" |
if mode not in [MODE_READ, MODE_WRITE]:
error = "Open mode must be '{}' or '{}'".format(
MODE_READ, MODE_WRITE)
raise ValueError(error)
self._openMode = mode
if mode == MODE_READ:
self.assertExists()
if mode == MODE_READ:
# This is part of the transitional behaviour where
# we load the whole DB into memory to get access to
# the data model.
self.load() |
<SYSTEM_TASK:>
Inserts the specified ontology into this repository.
<END_TASK>
<USER_TASK:>
Description:
def insertOntology(self, ontology):
"""
Inserts the specified ontology into this repository.
""" |
try:
models.Ontology.create(
id=ontology.getName(),
name=ontology.getName(),
dataurl=ontology.getDataUrl(),
ontologyprefix=ontology.getOntologyPrefix())
except Exception:
raise exceptions.DuplicateNameException(
ontology.getName()) |
<SYSTEM_TASK:>
Removes the specified ontology term map from this repository.
<END_TASK>
<USER_TASK:>
Description:
def removeOntology(self, ontology):
"""
Removes the specified ontology term map from this repository.
""" |
q = models.Ontology.delete().where(id == ontology.getId())
q.execute() |
<SYSTEM_TASK:>
Inserts the specified reference into this repository.
<END_TASK>
<USER_TASK:>
Description:
def insertReference(self, reference):
"""
Inserts the specified reference into this repository.
""" |
models.Reference.create(
id=reference.getId(),
referencesetid=reference.getParentContainer().getId(),
name=reference.getLocalId(),
length=reference.getLength(),
isderived=reference.getIsDerived(),
species=json.dumps(reference.getSpecies()),
md5checksum=reference.getMd5Checksum(),
sourceaccessions=json.dumps(reference.getSourceAccessions()),
sourceuri=reference.getSourceUri()) |
<SYSTEM_TASK:>
Inserts the specified referenceSet into this repository.
<END_TASK>
<USER_TASK:>
Description:
def insertReferenceSet(self, referenceSet):
"""
Inserts the specified referenceSet into this repository.
""" |
try:
models.Referenceset.create(
id=referenceSet.getId(),
name=referenceSet.getLocalId(),
description=referenceSet.getDescription(),
assemblyid=referenceSet.getAssemblyId(),
isderived=referenceSet.getIsDerived(),
species=json.dumps(referenceSet.getSpecies()),
md5checksum=referenceSet.getMd5Checksum(),
sourceaccessions=json.dumps(
referenceSet.getSourceAccessions()),
sourceuri=referenceSet.getSourceUri(),
dataurl=referenceSet.getDataUrl())
for reference in referenceSet.getReferences():
self.insertReference(reference)
except Exception:
raise exceptions.DuplicateNameException(
referenceSet.getLocalId()) |
<SYSTEM_TASK:>
Inserts the specified dataset into this repository.
<END_TASK>
<USER_TASK:>
Description:
def insertDataset(self, dataset):
"""
Inserts the specified dataset into this repository.
""" |
try:
models.Dataset.create(
id=dataset.getId(),
name=dataset.getLocalId(),
description=dataset.getDescription(),
attributes=json.dumps(dataset.getAttributes()))
except Exception:
raise exceptions.DuplicateNameException(
dataset.getLocalId()) |
<SYSTEM_TASK:>
Removes the specified dataset from this repository. This performs
<END_TASK>
<USER_TASK:>
Description:
def removeDataset(self, dataset):
"""
Removes the specified dataset from this repository. This performs
a cascading removal of all items within this dataset.
""" |
for datasetRecord in models.Dataset.select().where(
models.Dataset.id == dataset.getId()):
datasetRecord.delete_instance(recursive=True) |
<SYSTEM_TASK:>
Remove a phenotype association set from the repo
<END_TASK>
<USER_TASK:>
Description:
def removePhenotypeAssociationSet(self, phenotypeAssociationSet):
"""
Remove a phenotype association set from the repo
""" |
q = models.Phenotypeassociationset.delete().where(
models.Phenotypeassociationset.id ==
phenotypeAssociationSet.getId())
q.execute() |
<SYSTEM_TASK:>
Removes the specified featureSet from this repository.
<END_TASK>
<USER_TASK:>
Description:
def removeFeatureSet(self, featureSet):
"""
Removes the specified featureSet from this repository.
""" |
q = models.Featureset.delete().where(
models.Featureset.id == featureSet.getId())
q.execute() |
<SYSTEM_TASK:>
Removes the specified continuousSet from this repository.
<END_TASK>
<USER_TASK:>
Description:
def removeContinuousSet(self, continuousSet):
"""
Removes the specified continuousSet from this repository.
""" |
q = models.ContinuousSet.delete().where(
models.ContinuousSet.id == continuousSet.getId())
q.execute() |
<SYSTEM_TASK:>
Inserts the specified readGroup into the DB.
<END_TASK>
<USER_TASK:>
Description:
def insertReadGroup(self, readGroup):
"""
Inserts the specified readGroup into the DB.
""" |
statsJson = json.dumps(protocol.toJsonDict(readGroup.getStats()))
experimentJson = json.dumps(
protocol.toJsonDict(readGroup.getExperiment()))
try:
models.Readgroup.create(
id=readGroup.getId(),
readgroupsetid=readGroup.getParentContainer().getId(),
name=readGroup.getLocalId(),
predictedinsertedsize=readGroup.getPredictedInsertSize(),
samplename=readGroup.getSampleName(),
description=readGroup.getDescription(),
stats=statsJson,
experiment=experimentJson,
biosampleid=readGroup.getBiosampleId(),
attributes=json.dumps(readGroup.getAttributes()))
except Exception as e:
raise exceptions.RepoManagerException(e) |
<SYSTEM_TASK:>
Removes the specified readGroupSet from this repository. This performs
<END_TASK>
<USER_TASK:>
Description:
def removeReadGroupSet(self, readGroupSet):
"""
Removes the specified readGroupSet from this repository. This performs
a cascading removal of all items within this readGroupSet.
""" |
for readGroupSetRecord in models.Readgroupset.select().where(
models.Readgroupset.id == readGroupSet.getId()):
readGroupSetRecord.delete_instance(recursive=True) |
<SYSTEM_TASK:>
Removes the specified variantSet from this repository. This performs
<END_TASK>
<USER_TASK:>
Description:
def removeVariantSet(self, variantSet):
"""
Removes the specified variantSet from this repository. This performs
a cascading removal of all items within this variantSet.
""" |
for variantSetRecord in models.Variantset.select().where(
models.Variantset.id == variantSet.getId()):
variantSetRecord.delete_instance(recursive=True) |
<SYSTEM_TASK:>
Removes the specified biosample from this repository.
<END_TASK>
<USER_TASK:>
Description:
def removeBiosample(self, biosample):
"""
Removes the specified biosample from this repository.
""" |
q = models.Biosample.delete().where(
models.Biosample.id == biosample.getId())
q.execute() |
<SYSTEM_TASK:>
Removes the specified individual from this repository.
<END_TASK>
<USER_TASK:>
Description:
def removeIndividual(self, individual):
"""
Removes the specified individual from this repository.
""" |
q = models.Individual.delete().where(
models.Individual.id == individual.getId())
q.execute() |
<SYSTEM_TASK:>
Inserts a the specified readGroupSet into this repository.
<END_TASK>
<USER_TASK:>
Description:
def insertReadGroupSet(self, readGroupSet):
"""
Inserts a the specified readGroupSet into this repository.
""" |
programsJson = json.dumps(
[protocol.toJsonDict(program) for program in
readGroupSet.getPrograms()])
statsJson = json.dumps(protocol.toJsonDict(readGroupSet.getStats()))
try:
models.Readgroupset.create(
id=readGroupSet.getId(),
datasetid=readGroupSet.getParentContainer().getId(),
referencesetid=readGroupSet.getReferenceSet().getId(),
name=readGroupSet.getLocalId(),
programs=programsJson,
stats=statsJson,
dataurl=readGroupSet.getDataUrl(),
indexfile=readGroupSet.getIndexFile(),
attributes=json.dumps(readGroupSet.getAttributes()))
for readGroup in readGroupSet.getReadGroups():
self.insertReadGroup(readGroup)
except Exception as e:
raise exceptions.RepoManagerException(e) |
<SYSTEM_TASK:>
Removes the specified referenceSet from this repository. This performs
<END_TASK>
<USER_TASK:>
Description:
def removeReferenceSet(self, referenceSet):
"""
Removes the specified referenceSet from this repository. This performs
a cascading removal of all references within this referenceSet.
However, it does not remove any of the ReadGroupSets or items that
refer to this ReferenceSet. These must be deleted before the
referenceSet can be removed.
""" |
try:
q = models.Reference.delete().where(
models.Reference.referencesetid == referenceSet.getId())
q.execute()
q = models.Referenceset.delete().where(
models.Referenceset.id == referenceSet.getId())
q.execute()
except Exception:
msg = ("Unable to delete reference set. "
"There are objects currently in the registry which are "
"aligned against it. Remove these objects before removing "
"the reference set.")
raise exceptions.RepoManagerException(msg) |
<SYSTEM_TASK:>
Inserts a the specified variantAnnotationSet into this repository.
<END_TASK>
<USER_TASK:>
Description:
def insertVariantAnnotationSet(self, variantAnnotationSet):
"""
Inserts a the specified variantAnnotationSet into this repository.
""" |
analysisJson = json.dumps(
protocol.toJsonDict(variantAnnotationSet.getAnalysis()))
try:
models.Variantannotationset.create(
id=variantAnnotationSet.getId(),
variantsetid=variantAnnotationSet.getParentContainer().getId(),
ontologyid=variantAnnotationSet.getOntology().getId(),
name=variantAnnotationSet.getLocalId(),
analysis=analysisJson,
annotationtype=variantAnnotationSet.getAnnotationType(),
created=variantAnnotationSet.getCreationTime(),
updated=variantAnnotationSet.getUpdatedTime(),
attributes=json.dumps(variantAnnotationSet.getAttributes()))
except Exception as e:
raise exceptions.RepoManagerException(e) |
<SYSTEM_TASK:>
Inserts a the specified callSet into this repository.
<END_TASK>
<USER_TASK:>
Description:
def insertCallSet(self, callSet):
"""
Inserts a the specified callSet into this repository.
""" |
try:
models.Callset.create(
id=callSet.getId(),
name=callSet.getLocalId(),
variantsetid=callSet.getParentContainer().getId(),
biosampleid=callSet.getBiosampleId(),
attributes=json.dumps(callSet.getAttributes()))
except Exception as e:
raise exceptions.RepoManagerException(e) |
<SYSTEM_TASK:>
Inserts a the specified variantSet into this repository.
<END_TASK>
<USER_TASK:>
Description:
def insertVariantSet(self, variantSet):
"""
Inserts a the specified variantSet into this repository.
""" |
# We cheat a little here with the VariantSetMetadata, and encode these
# within the table as a JSON dump. These should really be stored in
# their own table
metadataJson = json.dumps(
[protocol.toJsonDict(metadata) for metadata in
variantSet.getMetadata()])
urlMapJson = json.dumps(variantSet.getReferenceToDataUrlIndexMap())
try:
models.Variantset.create(
id=variantSet.getId(),
datasetid=variantSet.getParentContainer().getId(),
referencesetid=variantSet.getReferenceSet().getId(),
name=variantSet.getLocalId(),
created=datetime.datetime.now(),
updated=datetime.datetime.now(),
metadata=metadataJson,
dataurlindexmap=urlMapJson,
attributes=json.dumps(variantSet.getAttributes()))
except Exception as e:
raise exceptions.RepoManagerException(e)
for callSet in variantSet.getCallSets():
self.insertCallSet(callSet) |
<SYSTEM_TASK:>
Inserts a the specified featureSet into this repository.
<END_TASK>
<USER_TASK:>
Description:
def insertFeatureSet(self, featureSet):
"""
Inserts a the specified featureSet into this repository.
""" |
# TODO add support for info and sourceUri fields.
try:
models.Featureset.create(
id=featureSet.getId(),
datasetid=featureSet.getParentContainer().getId(),
referencesetid=featureSet.getReferenceSet().getId(),
ontologyid=featureSet.getOntology().getId(),
name=featureSet.getLocalId(),
dataurl=featureSet.getDataUrl(),
attributes=json.dumps(featureSet.getAttributes()))
except Exception as e:
raise exceptions.RepoManagerException(e) |
<SYSTEM_TASK:>
Inserts a the specified continuousSet into this repository.
<END_TASK>
<USER_TASK:>
Description:
def insertContinuousSet(self, continuousSet):
"""
Inserts a the specified continuousSet into this repository.
""" |
# TODO add support for info and sourceUri fields.
try:
models.ContinuousSet.create(
id=continuousSet.getId(),
datasetid=continuousSet.getParentContainer().getId(),
referencesetid=continuousSet.getReferenceSet().getId(),
name=continuousSet.getLocalId(),
dataurl=continuousSet.getDataUrl(),
attributes=json.dumps(continuousSet.getAttributes()))
except Exception as e:
raise exceptions.RepoManagerException(e) |
<SYSTEM_TASK:>
Inserts the specified Biosample into this repository.
<END_TASK>
<USER_TASK:>
Description:
def insertBiosample(self, biosample):
"""
Inserts the specified Biosample into this repository.
""" |
try:
models.Biosample.create(
id=biosample.getId(),
datasetid=biosample.getParentContainer().getId(),
name=biosample.getLocalId(),
description=biosample.getDescription(),
disease=json.dumps(biosample.getDisease()),
created=biosample.getCreated(),
updated=biosample.getUpdated(),
individualid=biosample.getIndividualId(),
attributes=json.dumps(biosample.getAttributes()),
individualAgeAtCollection=json.dumps(
biosample.getIndividualAgeAtCollection()))
except Exception:
raise exceptions.DuplicateNameException(
biosample.getLocalId(),
biosample.getParentContainer().getLocalId()) |
<SYSTEM_TASK:>
Inserts the specified individual into this repository.
<END_TASK>
<USER_TASK:>
Description:
def insertIndividual(self, individual):
"""
Inserts the specified individual into this repository.
""" |
try:
models.Individual.create(
id=individual.getId(),
datasetId=individual.getParentContainer().getId(),
name=individual.getLocalId(),
description=individual.getDescription(),
created=individual.getCreated(),
updated=individual.getUpdated(),
species=json.dumps(individual.getSpecies()),
sex=json.dumps(individual.getSex()),
attributes=json.dumps(individual.getAttributes()))
except Exception:
raise exceptions.DuplicateNameException(
individual.getLocalId(),
individual.getParentContainer().getLocalId()) |
<SYSTEM_TASK:>
Inserts the specified phenotype annotation set into this repository.
<END_TASK>
<USER_TASK:>
Description:
def insertPhenotypeAssociationSet(self, phenotypeAssociationSet):
"""
Inserts the specified phenotype annotation set into this repository.
""" |
datasetId = phenotypeAssociationSet.getParentContainer().getId()
attributes = json.dumps(phenotypeAssociationSet.getAttributes())
try:
models.Phenotypeassociationset.create(
id=phenotypeAssociationSet.getId(),
name=phenotypeAssociationSet.getLocalId(),
datasetid=datasetId,
dataurl=phenotypeAssociationSet._dataUrl,
attributes=attributes)
except Exception:
raise exceptions.DuplicateNameException(
phenotypeAssociationSet.getParentContainer().getId()) |
<SYSTEM_TASK:>
Inserts a the specified rnaQuantificationSet into this repository.
<END_TASK>
<USER_TASK:>
Description:
def insertRnaQuantificationSet(self, rnaQuantificationSet):
"""
Inserts a the specified rnaQuantificationSet into this repository.
""" |
try:
models.Rnaquantificationset.create(
id=rnaQuantificationSet.getId(),
datasetid=rnaQuantificationSet.getParentContainer().getId(),
referencesetid=rnaQuantificationSet.getReferenceSet().getId(),
name=rnaQuantificationSet.getLocalId(),
dataurl=rnaQuantificationSet.getDataUrl(),
attributes=json.dumps(rnaQuantificationSet.getAttributes()))
except Exception:
raise exceptions.DuplicateNameException(
rnaQuantificationSet.getLocalId(),
rnaQuantificationSet.getParentContainer().getLocalId()) |
<SYSTEM_TASK:>
Removes the specified rnaQuantificationSet from this repository. This
<END_TASK>
<USER_TASK:>
Description:
def removeRnaQuantificationSet(self, rnaQuantificationSet):
"""
Removes the specified rnaQuantificationSet from this repository. This
performs a cascading removal of all items within this
rnaQuantificationSet.
""" |
q = models.Rnaquantificationset.delete().where(
models.Rnaquantificationset.id == rnaQuantificationSet.getId())
q.execute() |
<SYSTEM_TASK:>
Accepts a peer datamodel object and adds it to the registry.
<END_TASK>
<USER_TASK:>
Description:
def insertPeer(self, peer):
"""
Accepts a peer datamodel object and adds it to the registry.
""" |
try:
models.Peer.create(
url=peer.getUrl(),
attributes=json.dumps(peer.getAttributes()))
except Exception as e:
raise exceptions.RepoManagerException(e) |
<SYSTEM_TASK:>
Initialise this data repository, creating any necessary directories
<END_TASK>
<USER_TASK:>
Description:
def initialise(self):
"""
Initialise this data repository, creating any necessary directories
and file paths.
""" |
self._checkWriteMode()
self._createSystemTable()
self._createNetworkTables()
self._createOntologyTable()
self._createReferenceSetTable()
self._createReferenceTable()
self._createDatasetTable()
self._createReadGroupSetTable()
self._createReadGroupTable()
self._createCallSetTable()
self._createVariantSetTable()
self._createVariantAnnotationSetTable()
self._createFeatureSetTable()
self._createContinuousSetTable()
self._createBiosampleTable()
self._createIndividualTable()
self._createPhenotypeAssociationSetTable()
self._createRnaQuantificationSetTable() |
<SYSTEM_TASK:>
Loads this data repository into memory.
<END_TASK>
<USER_TASK:>
Description:
def load(self):
"""
Loads this data repository into memory.
""" |
self._readSystemTable()
self._readOntologyTable()
self._readReferenceSetTable()
self._readReferenceTable()
self._readDatasetTable()
self._readReadGroupSetTable()
self._readReadGroupTable()
self._readVariantSetTable()
self._readCallSetTable()
self._readVariantAnnotationSetTable()
self._readFeatureSetTable()
self._readContinuousSetTable()
self._readBiosampleTable()
self._readIndividualTable()
self._readPhenotypeAssociationSetTable()
self._readRnaQuantificationSetTable() |
<SYSTEM_TASK:>
find a feature and return ga4gh representation, use 'native' id as
<END_TASK>
<USER_TASK:>
Description:
def _getFeatureById(self, featureId):
"""
find a feature and return ga4gh representation, use 'native' id as
featureId
""" |
featureRef = rdflib.URIRef(featureId)
featureDetails = self._detailTuples([featureRef])
feature = {}
for detail in featureDetails:
feature[detail['predicate']] = []
for detail in featureDetails:
feature[detail['predicate']].append(detail['object'])
pbFeature = protocol.Feature()
term = protocol.OntologyTerm()
# Schema for feature only supports one type of `type`
# here we default to first OBO defined
for featureType in sorted(feature[TYPE]):
if "obolibrary" in featureType:
term.term = self._featureTypeLabel(featureType)
term.term_id = featureType
pbFeature.feature_type.MergeFrom(term)
break
pbFeature.id = featureId
# Schema for feature only supports one type of `name` `symbol`
# here we default to shortest for symbol and longest for name
feature[LABEL].sort(key=len)
pbFeature.gene_symbol = feature[LABEL][0]
pbFeature.name = feature[LABEL][-1]
pbFeature.attributes.MergeFrom(protocol.Attributes())
for key in feature:
for val in sorted(feature[key]):
pbFeature.attributes.attr[key].values.add().string_value = val
if featureId in self._locationMap:
location = self._locationMap[featureId]
pbFeature.reference_name = location["chromosome"]
pbFeature.start = location["begin"]
pbFeature.end = location["end"]
return pbFeature |
<SYSTEM_TASK:>
formulate a sparql query string based on parameters
<END_TASK>
<USER_TASK:>
Description:
def _filterSearchFeaturesRequest(self, reference_name, gene_symbol, name,
start, end):
"""
formulate a sparql query string based on parameters
""" |
filters = []
query = self._baseQuery()
filters = []
location = self._findLocation(reference_name, start, end)
if location:
filters.append("?feature = <{}>".format(location))
if gene_symbol:
filters.append('regex(?feature_label, "{}")')
if name:
filters.append(
'regex(?feature_label, "{}")'.format(name))
# apply filters
filter = "FILTER ({})".format(' && '.join(filters))
if len(filters) == 0:
filter = ""
query = query.replace("#%FILTER%", filter)
return query |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.