text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Adds a new reference set into this repo. <END_TASK> <USER_TASK:> Description: def addReferenceSet(self): """ Adds a new reference set into this repo. """
self._openRepo() name = self._args.name filePath = self._getFilePath(self._args.filePath, self._args.relativePath) if name is None: name = getNameFromPath(self._args.filePath) referenceSet = references.HtslibReferenceSet(name) referenceSet.populateFromFile(filePath) referenceSet.setDescription(self._args.description) if self._args.species is not None: referenceSet.setSpeciesFromJson(self._args.species) referenceSet.setIsDerived(self._args.isDerived) referenceSet.setAssemblyId(self._args.assemblyId) referenceSet.setAttributes(json.loads(self._args.attributes)) sourceAccessions = [] if self._args.sourceAccessions is not None: sourceAccessions = self._args.sourceAccessions.split(",") referenceSet.setSourceAccessions(sourceAccessions) referenceSet.setSourceUri(self._args.sourceUri) self._updateRepo(self._repo.insertReferenceSet, referenceSet)
<SYSTEM_TASK:> Adds a new phenotype association set to this repo. <END_TASK> <USER_TASK:> Description: def addPhenotypeAssociationSet(self): """ Adds a new phenotype association set to this repo. """
self._openRepo() name = self._args.name if name is None: name = getNameFromPath(self._args.dirPath) dataset = self._repo.getDatasetByName(self._args.datasetName) phenotypeAssociationSet = \ genotype_phenotype.RdfPhenotypeAssociationSet( dataset, name, self._args.dirPath) phenotypeAssociationSet.setAttributes( json.loads(self._args.attributes)) self._updateRepo( self._repo.insertPhenotypeAssociationSet, phenotypeAssociationSet)
<SYSTEM_TASK:> Removes a phenotype association set from the repo <END_TASK> <USER_TASK:> Description: def removePhenotypeAssociationSet(self): """ Removes a phenotype association set from the repo """
self._openRepo() dataset = self._repo.getDatasetByName(self._args.datasetName) phenotypeAssociationSet = dataset.getPhenotypeAssociationSetByName( self._args.name) def func(): self._updateRepo( self._repo.removePhenotypeAssociationSet, phenotypeAssociationSet) self._confirmDelete( "PhenotypeAssociationSet", phenotypeAssociationSet.getLocalId(), func)
<SYSTEM_TASK:> Removes a referenceSet from the repo. <END_TASK> <USER_TASK:> Description: def removeReferenceSet(self): """ Removes a referenceSet from the repo. """
self._openRepo() referenceSet = self._repo.getReferenceSetByName( self._args.referenceSetName) def func(): self._updateRepo(self._repo.removeReferenceSet, referenceSet) self._confirmDelete("ReferenceSet", referenceSet.getLocalId(), func)
<SYSTEM_TASK:> Removes a variantSet from the repo. <END_TASK> <USER_TASK:> Description: def removeVariantSet(self): """ Removes a variantSet from the repo. """
self._openRepo() dataset = self._repo.getDatasetByName(self._args.datasetName) variantSet = dataset.getVariantSetByName(self._args.variantSetName) def func(): self._updateRepo(self._repo.removeVariantSet, variantSet) self._confirmDelete("VariantSet", variantSet.getLocalId(), func)
<SYSTEM_TASK:> Adds a new feature set into this repo <END_TASK> <USER_TASK:> Description: def addFeatureSet(self): """ Adds a new feature set into this repo """
self._openRepo() dataset = self._repo.getDatasetByName(self._args.datasetName) filePath = self._getFilePath(self._args.filePath, self._args.relativePath) name = getNameFromPath(self._args.filePath) featureSet = sequence_annotations.Gff3DbFeatureSet( dataset, name) referenceSetName = self._args.referenceSetName if referenceSetName is None: raise exceptions.RepoManagerException( "A reference set name must be provided") referenceSet = self._repo.getReferenceSetByName(referenceSetName) featureSet.setReferenceSet(referenceSet) ontologyName = self._args.ontologyName if ontologyName is None: raise exceptions.RepoManagerException( "A sequence ontology name must be provided") ontology = self._repo.getOntologyByName(ontologyName) self._checkSequenceOntology(ontology) featureSet.setOntology(ontology) featureSet.populateFromFile(filePath) featureSet.setAttributes(json.loads(self._args.attributes)) self._updateRepo(self._repo.insertFeatureSet, featureSet)
<SYSTEM_TASK:> Removes a feature set from this repo <END_TASK> <USER_TASK:> Description: def removeFeatureSet(self): """ Removes a feature set from this repo """
self._openRepo() dataset = self._repo.getDatasetByName(self._args.datasetName) featureSet = dataset.getFeatureSetByName(self._args.featureSetName) def func(): self._updateRepo(self._repo.removeFeatureSet, featureSet) self._confirmDelete("FeatureSet", featureSet.getLocalId(), func)
<SYSTEM_TASK:> Adds a new continuous set into this repo <END_TASK> <USER_TASK:> Description: def addContinuousSet(self): """ Adds a new continuous set into this repo """
self._openRepo() dataset = self._repo.getDatasetByName(self._args.datasetName) filePath = self._getFilePath(self._args.filePath, self._args.relativePath) name = getNameFromPath(self._args.filePath) continuousSet = continuous.FileContinuousSet(dataset, name) referenceSetName = self._args.referenceSetName if referenceSetName is None: raise exceptions.RepoManagerException( "A reference set name must be provided") referenceSet = self._repo.getReferenceSetByName(referenceSetName) continuousSet.setReferenceSet(referenceSet) continuousSet.populateFromFile(filePath) self._updateRepo(self._repo.insertContinuousSet, continuousSet)
<SYSTEM_TASK:> Removes a continuous set from this repo <END_TASK> <USER_TASK:> Description: def removeContinuousSet(self): """ Removes a continuous set from this repo """
self._openRepo() dataset = self._repo.getDatasetByName(self._args.datasetName) continuousSet = dataset.getContinuousSetByName( self._args.continuousSetName) def func(): self._updateRepo(self._repo.removeContinuousSet, continuousSet) self._confirmDelete("ContinuousSet", continuousSet.getLocalId(), func)
<SYSTEM_TASK:> Adds a new biosample into this repo <END_TASK> <USER_TASK:> Description: def addBiosample(self): """ Adds a new biosample into this repo """
self._openRepo() dataset = self._repo.getDatasetByName(self._args.datasetName) biosample = bio_metadata.Biosample( dataset, self._args.biosampleName) biosample.populateFromJson(self._args.biosample) self._updateRepo(self._repo.insertBiosample, biosample)
<SYSTEM_TASK:> Removes a biosample from this repo <END_TASK> <USER_TASK:> Description: def removeBiosample(self): """ Removes a biosample from this repo """
self._openRepo() dataset = self._repo.getDatasetByName(self._args.datasetName) biosample = dataset.getBiosampleByName(self._args.biosampleName) def func(): self._updateRepo(self._repo.removeBiosample, biosample) self._confirmDelete("Biosample", biosample.getLocalId(), func)
<SYSTEM_TASK:> Adds a new individual into this repo <END_TASK> <USER_TASK:> Description: def addIndividual(self): """ Adds a new individual into this repo """
self._openRepo() dataset = self._repo.getDatasetByName(self._args.datasetName) individual = bio_metadata.Individual( dataset, self._args.individualName) individual.populateFromJson(self._args.individual) self._updateRepo(self._repo.insertIndividual, individual)
<SYSTEM_TASK:> Removes an individual from this repo <END_TASK> <USER_TASK:> Description: def removeIndividual(self): """ Removes an individual from this repo """
self._openRepo() dataset = self._repo.getDatasetByName(self._args.datasetName) individual = dataset.getIndividualByName(self._args.individualName) def func(): self._updateRepo(self._repo.removeIndividual, individual) self._confirmDelete("Individual", individual.getLocalId(), func)
<SYSTEM_TASK:> Adds a new peer into this repo <END_TASK> <USER_TASK:> Description: def addPeer(self): """ Adds a new peer into this repo """
self._openRepo() try: peer = peers.Peer( self._args.url, json.loads(self._args.attributes)) except exceptions.BadUrlException: raise exceptions.RepoManagerException("The URL for the peer was " "malformed.") except ValueError as e: raise exceptions.RepoManagerException( "The attributes message " "was malformed. {}".format(e)) self._updateRepo(self._repo.insertPeer, peer)
<SYSTEM_TASK:> Removes a peer by URL from this repo <END_TASK> <USER_TASK:> Description: def removePeer(self): """ Removes a peer by URL from this repo """
self._openRepo() def func(): self._updateRepo(self._repo.removePeer, self._args.url) self._confirmDelete("Peer", self._args.url, func)
<SYSTEM_TASK:> Removes an ontology from the repo. <END_TASK> <USER_TASK:> Description: def removeOntology(self): """ Removes an ontology from the repo. """
self._openRepo() ontology = self._repo.getOntologyByName(self._args.ontologyName) def func(): self._updateRepo(self._repo.removeOntology, ontology) self._confirmDelete("Ontology", ontology.getName(), func)
<SYSTEM_TASK:> Adds an rnaQuantification into this repo <END_TASK> <USER_TASK:> Description: def addRnaQuantification(self): """ Adds an rnaQuantification into this repo """
self._openRepo() dataset = self._repo.getDatasetByName(self._args.datasetName) biosampleId = "" if self._args.biosampleName: biosample = dataset.getBiosampleByName(self._args.biosampleName) biosampleId = biosample.getId() if self._args.name is None: name = getNameFromPath(self._args.quantificationFilePath) else: name = self._args.name # TODO: programs not fully supported by GA4GH yet programs = "" featureType = "gene" if self._args.transcript: featureType = "transcript" rnaseq2ga.rnaseq2ga( self._args.quantificationFilePath, self._args.filePath, name, self._args.format, dataset=dataset, featureType=featureType, description=self._args.description, programs=programs, featureSetNames=self._args.featureSetNames, readGroupSetNames=self._args.readGroupSetName, biosampleId=biosampleId)
<SYSTEM_TASK:> Reads RNA Quantification data in one of several formats and stores the data <END_TASK> <USER_TASK:> Description: def rnaseq2ga(quantificationFilename, sqlFilename, localName, rnaType, dataset=None, featureType="gene", description="", programs="", featureSetNames="", readGroupSetNames="", biosampleId=""): """ Reads RNA Quantification data in one of several formats and stores the data in a sqlite database for use by the GA4GH reference server. Supports the following quantification output types: Cufflinks, kallisto, RSEM. """
readGroupSetName = "" if readGroupSetNames: readGroupSetName = readGroupSetNames.strip().split(",")[0] featureSetIds = "" readGroupIds = "" if dataset: featureSetIdList = [] if featureSetNames: for annotationName in featureSetNames.split(","): featureSet = dataset.getFeatureSetByName(annotationName) featureSetIdList.append(featureSet.getId()) featureSetIds = ",".join(featureSetIdList) # TODO: multiple readGroupSets if readGroupSetName: readGroupSet = dataset.getReadGroupSetByName(readGroupSetName) readGroupIds = ",".join( [x.getId() for x in readGroupSet.getReadGroups()]) if rnaType not in SUPPORTED_RNA_INPUT_FORMATS: raise exceptions.UnsupportedFormatException(rnaType) rnaDB = RnaSqliteStore(sqlFilename) if rnaType == "cufflinks": writer = CufflinksWriter(rnaDB, featureType, dataset=dataset) elif rnaType == "kallisto": writer = KallistoWriter(rnaDB, featureType, dataset=dataset) elif rnaType == "rsem": writer = RsemWriter(rnaDB, featureType, dataset=dataset) writeRnaseqTable(rnaDB, [localName], description, featureSetIds, readGroupId=readGroupIds, programs=programs, biosampleId=biosampleId) writeExpressionTable(writer, [(localName, quantificationFilename)])
<SYSTEM_TASK:> Index columns that are queried. The expression index can <END_TASK> <USER_TASK:> Description: def createIndices(self): """ Index columns that are queried. The expression index can take a long time. """
sql = '''CREATE INDEX name_index ON Expression (name)''' self._cursor.execute(sql) self._dbConn.commit() sql = '''CREATE INDEX expression_index ON Expression (expression)''' self._cursor.execute(sql) self._dbConn.commit()
<SYSTEM_TASK:> Reads the quantification results file and adds entries to the <END_TASK> <USER_TASK:> Description: def writeExpression(self, rnaQuantificationId, quantfilename): """ Reads the quantification results file and adds entries to the specified database. """
isNormalized = self._isNormalized units = self._units with open(quantfilename, "r") as quantFile: quantificationReader = csv.reader(quantFile, delimiter=b"\t") header = next(quantificationReader) expressionLevelColNum = self.setColNum( header, self._expressionLevelCol) nameColNum = self.setColNum(header, self._nameCol) countColNum = self.setColNum(header, self._countCol, -1) confColLowNum = self.setColNum(header, self._confColLow, -1) confColHiNum = self.setColNum(header, self._confColHi, -1) expressionId = 0 for expression in quantificationReader: expressionLevel = expression[expressionLevelColNum] name = expression[nameColNum] rawCount = 0.0 if countColNum != -1: rawCount = expression[countColNum] confidenceLow = 0.0 confidenceHi = 0.0 score = 0.0 if confColLowNum != -1 and confColHiNum != -1: confidenceLow = float(expression[confColLowNum]) confidenceHi = float(expression[confColHiNum]) score = (confidenceLow + confidenceHi)/2 datafields = (expressionId, rnaQuantificationId, name, expressionLevel, isNormalized, rawCount, score, units, confidenceLow, confidenceHi) self._db.addExpression(datafields) expressionId += 1 self._db.batchAddExpression()
<SYSTEM_TASK:> Fetch sequences from NCBI using the eself interface. <END_TASK> <USER_TASK:> Description: def _fetchSequence(ac, startIndex=None, endIndex=None): """Fetch sequences from NCBI using the eself interface. An interbase interval may be optionally provided with startIndex and endIndex. NCBI eself will return just the requested subsequence, which might greatly reduce payload sizes (especially with chromosome-scale sequences). When wrapped is True, return list of sequence lines rather than concatenated sequence. >>> len(_fetchSequence('NP_056374.2')) 1596 Pass the desired interval rather than using Python's [] slice operator. >>> _fetchSequence('NP_056374.2',0,10) 'MESRETLSSS' >>> _fetchSequence('NP_056374.2')[0:10] 'MESRETLSSS' """
urlFmt = ( "http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?" "db=nucleotide&id={ac}&rettype=fasta&retmode=text") if startIndex is None or endIndex is None: url = urlFmt.format(ac=ac) else: urlFmt += "&seq_start={start}&seq_stop={stop}" url = urlFmt.format(ac=ac, start=startIndex + 1, stop=endIndex) resp = requests.get(url) resp.raise_for_status() seqlines = resp.content.splitlines()[1:] print("{ac}[{s},{e}) => {n} lines ({u})".format( ac=ac, s=startIndex, e=endIndex, n=len(seqlines), u=url)) # return response as list of lines, already line wrapped return seqlines
<SYSTEM_TASK:> Creates a new bam header based on the specified header from the <END_TASK> <USER_TASK:> Description: def createBamHeader(self, baseHeader): """ Creates a new bam header based on the specified header from the parent BAM file. """
header = dict(baseHeader) newSequences = [] for index, referenceInfo in enumerate(header['SQ']): if index < self.numChromosomes: referenceName = referenceInfo['SN'] # The sequence dictionary in the BAM file has to match up # with the sequence ids in the data, so we must be sure # that these still match up. assert referenceName == self.chromosomes[index] newReferenceInfo = { 'AS': self.referenceSetName, 'SN': referenceName, 'LN': 0, # FIXME 'UR': 'http://example.com', 'M5': 'dbb6e8ece0b5de29da56601613007c2a', # FIXME 'SP': 'Human' } newSequences.append(newReferenceInfo) header['SQ'] = newSequences return header
<SYSTEM_TASK:> Creates the repository for all the data we've just downloaded. <END_TASK> <USER_TASK:> Description: def createRepo(self): """ Creates the repository for all the data we've just downloaded. """
repo = datarepo.SqlDataRepository(self.repoPath) repo.open("w") repo.initialise() referenceSet = references.HtslibReferenceSet("GRCh37-subset") referenceSet.populateFromFile(self.fastaFilePath) referenceSet.setDescription("Subset of GRCh37 used for demonstration") referenceSet.setSpeciesFromJson( '{"id": "9606",' + '"term": "Homo sapiens", "source_name": "NCBI"}') for reference in referenceSet.getReferences(): reference.setSpeciesFromJson( '{"id": "9606",' + '"term": "Homo sapiens", "source_name": "NCBI"}') reference.setSourceAccessions( self.accessions[reference.getName()] + ".subset") repo.insertReferenceSet(referenceSet) dataset = datasets.Dataset("1kg-p3-subset") dataset.setDescription("Sample data from 1000 Genomes phase 3") repo.insertDataset(dataset) variantSet = variants.HtslibVariantSet(dataset, "mvncall") variantSet.setReferenceSet(referenceSet) dataUrls = [vcfFile for vcfFile, _ in self.vcfFilePaths] indexFiles = [indexFile for _, indexFile in self.vcfFilePaths] variantSet.populateFromFile(dataUrls, indexFiles) variantSet.checkConsistency() repo.insertVariantSet(variantSet) for sample, (bamFile, indexFile) in zip( self.samples, self.bamFilePaths): readGroupSet = reads.HtslibReadGroupSet(dataset, sample) readGroupSet.populateFromFile(bamFile, indexFile) readGroupSet.setReferenceSet(referenceSet) repo.insertReadGroupSet(readGroupSet) repo.commit() repo.close() self.log("Finished creating the repository; summary:\n") repo.open("r") repo.printSummary()
<SYSTEM_TASK:> A helper function used just to help modularize the code a bit. <END_TASK> <USER_TASK:> Description: def _configure_backend(app): """A helper function used just to help modularize the code a bit."""
# Allocate the backend # We use URLs to specify the backend. Currently we have file:// URLs (or # URLs with no scheme) for the SqlDataRepository, and special empty:// and # simulated:// URLs for empty or simulated data sources. dataSource = urlparse.urlparse(app.config["DATA_SOURCE"], "file") if dataSource.scheme == "simulated": # Ignore the query string randomSeed = app.config["SIMULATED_BACKEND_RANDOM_SEED"] numCalls = app.config["SIMULATED_BACKEND_NUM_CALLS"] variantDensity = app.config["SIMULATED_BACKEND_VARIANT_DENSITY"] numVariantSets = app.config["SIMULATED_BACKEND_NUM_VARIANT_SETS"] numReferenceSets = app.config[ "SIMULATED_BACKEND_NUM_REFERENCE_SETS"] numReferencesPerReferenceSet = app.config[ "SIMULATED_BACKEND_NUM_REFERENCES_PER_REFERENCE_SET"] numAlignmentsPerReadGroup = app.config[ "SIMULATED_BACKEND_NUM_ALIGNMENTS_PER_READ_GROUP"] numReadGroupsPerReadGroupSet = app.config[ "SIMULATED_BACKEND_NUM_READ_GROUPS_PER_READ_GROUP_SET"] numPhenotypeAssociations = app.config[ "SIMULATED_BACKEND_NUM_PHENOTYPE_ASSOCIATIONS"] numPhenotypeAssociationSets = app.config[ "SIMULATED_BACKEND_NUM_PHENOTYPE_ASSOCIATION_SETS"] numRnaQuantSets = app.config[ "SIMULATED_BACKEND_NUM_RNA_QUANTIFICATION_SETS"] numExpressionLevels = app.config[ "SIMULATED_BACKEND_NUM_EXPRESSION_LEVELS_PER_RNA_QUANT_SET"] dataRepository = datarepo.SimulatedDataRepository( randomSeed=randomSeed, numCalls=numCalls, variantDensity=variantDensity, numVariantSets=numVariantSets, numReferenceSets=numReferenceSets, numReferencesPerReferenceSet=numReferencesPerReferenceSet, numReadGroupsPerReadGroupSet=numReadGroupsPerReadGroupSet, numAlignments=numAlignmentsPerReadGroup, numPhenotypeAssociations=numPhenotypeAssociations, numPhenotypeAssociationSets=numPhenotypeAssociationSets, numRnaQuantSets=numRnaQuantSets, numExpressionLevels=numExpressionLevels) elif dataSource.scheme == "empty": dataRepository = datarepo.EmptyDataRepository() elif dataSource.scheme == "file": path = os.path.join(dataSource.netloc, dataSource.path) dataRepository = datarepo.SqlDataRepository(path) dataRepository.open(datarepo.MODE_READ) else: raise exceptions.ConfigurationException( "Unsupported data source scheme: " + dataSource.scheme) theBackend = backend.Backend(dataRepository) theBackend.setRequestValidation(app.config["REQUEST_VALIDATION"]) theBackend.setDefaultPageSize(app.config["DEFAULT_PAGE_SIZE"]) theBackend.setMaxResponseLength(app.config["MAX_RESPONSE_LENGTH"]) return theBackend
<SYSTEM_TASK:> Returns a Flask response object for the specified data and HTTP status. <END_TASK> <USER_TASK:> Description: def getFlaskResponse(responseString, httpStatus=200): """ Returns a Flask response object for the specified data and HTTP status. """
return flask.Response(responseString, status=httpStatus, mimetype=MIMETYPE)
<SYSTEM_TASK:> Handles the specified HTTP POST request, which maps to the specified <END_TASK> <USER_TASK:> Description: def handleHttpPost(request, endpoint): """ Handles the specified HTTP POST request, which maps to the specified protocol handler endpoint and protocol request class. """
if request.mimetype and request.mimetype != MIMETYPE: raise exceptions.UnsupportedMediaTypeException() request = request.get_data() if request == '' or request is None: request = '{}' responseStr = endpoint(request) return getFlaskResponse(responseStr)
<SYSTEM_TASK:> Handles an exception that occurs somewhere in the process of handling <END_TASK> <USER_TASK:> Description: def handleException(exception): """ Handles an exception that occurs somewhere in the process of handling a request. """
serverException = exception if not isinstance(exception, exceptions.BaseServerException): with app.test_request_context(): app.log_exception(exception) serverException = exceptions.getServerError(exception) error = serverException.toProtocolElement() # If the exception is being viewed by a web browser, we can render a nicer # view. if flask.request and 'Accept' in flask.request.headers and \ flask.request.headers['Accept'].find('text/html') != -1: message = "<h1>Error {}</h1><pre>{}</pre>".format( serverException.httpStatus, protocol.toJson(error)) if serverException.httpStatus == 401 \ or serverException.httpStatus == 403: message += "Please try <a href=\"/login\">logging in</a>." return message else: responseStr = protocol.toJson(error) return getFlaskResponse(responseStr, serverException.httpStatus)
<SYSTEM_TASK:> The request will have a parameter 'key' if it came from the command line <END_TASK> <USER_TASK:> Description: def checkAuthentication(): """ The request will have a parameter 'key' if it came from the command line client, or have a session key of 'key' if it's the browser. If the token is not found, start the login process. If there is no oidcClient, we are running naked and we don't check. If we're being redirected to the oidcCallback we don't check. :returns None if all is ok (and the request handler continues as usual). Otherwise if the key was in the session (therefore we're in a browser) then startLogin() will redirect to the OIDC provider. If the key was in the request arguments, we're using the command line and just raise an exception. """
if app.oidcClient is None: return if flask.request.endpoint == 'oidcCallback': return key = flask.session.get('key') or flask.request.args.get('key') if key is None or not app.cache.get(key): if 'key' in flask.request.args: raise exceptions.NotAuthenticatedException() else: return startLogin()
<SYSTEM_TASK:> Handles the specified flask request for one of the GET URLs <END_TASK> <USER_TASK:> Description: def handleFlaskGetRequest(id_, flaskRequest, endpoint): """ Handles the specified flask request for one of the GET URLs Invokes the specified endpoint to generate a response. """
if flaskRequest.method == "GET": return handleHttpGet(id_, endpoint) else: raise exceptions.MethodNotAllowedException()
<SYSTEM_TASK:> Handles the specified flask request for one of the POST URLS <END_TASK> <USER_TASK:> Description: def handleFlaskPostRequest(flaskRequest, endpoint): """ Handles the specified flask request for one of the POST URLS Invokes the specified endpoint to generate a response. """
if flaskRequest.method == "POST": return handleHttpPost(flaskRequest, endpoint) elif flaskRequest.method == "OPTIONS": return handleHttpOptions() else: raise exceptions.MethodNotAllowedException()
<SYSTEM_TASK:> This decorator wraps a view function so that it is protected when Auth0 <END_TASK> <USER_TASK:> Description: def auth_decorator(app=None): """ This decorator wraps a view function so that it is protected when Auth0 is enabled. This means that any request will be expected to have a signed token in the authorization header if the `AUTH0_ENABLED` configuration setting is True. The authorization header will have the form: "authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9....." If a request is not properly signed, an attempt is made to provide the client with useful error messages. This means that if a request is not authorized the underlying view function will not be executed. When `AUTH0_ENABLED` is false, this decorator will simply execute the decorated view without observing the authorization header. :param app: :return: Flask view decorator """
def requires_auth(f): @functools.wraps(f) def decorated(*args, **kwargs): # This decorator will only apply with AUTH0_ENABLED set to True. if app.config.get('AUTH0_ENABLED', False): client_id = app.config.get("AUTH0_CLIENT_ID") client_secret = app.config.get("AUTH0_CLIENT_SECRET") auth_header = flask.request.headers.get('Authorization', None) # Each of these functions will throw a 401 is there is a # problem decoding the token with some helpful error message. if auth_header: token, profile = decode_header( auth_header, client_id, client_secret) else: raise exceptions.NotAuthorizedException() # We store the token in the session so that later # stages can use it to connect identity and authorization. flask.session['auth0_key'] = token # Now we need to make sure that on top of having a good token # They are authorized, and if not provide an error message is_authorized(app.cache, profile['email']) is_active(app.cache, token) return f(*args, **kwargs) return decorated return requires_auth
<SYSTEM_TASK:> Logs out the current session by removing it from the cache. This is <END_TASK> <USER_TASK:> Description: def logout(cache): """ Logs out the current session by removing it from the cache. This is expected to only occur when a session has """
cache.set(flask.session['auth0_key'], None) flask.session.clear() return True
<SYSTEM_TASK:> Adds the specified reference to this ReferenceSet. <END_TASK> <USER_TASK:> Description: def addReference(self, reference): """ Adds the specified reference to this ReferenceSet. """
id_ = reference.getId() self._referenceIdMap[id_] = reference self._referenceNameMap[reference.getLocalId()] = reference self._referenceIds.append(id_)
<SYSTEM_TASK:> Sets the species, an OntologyTerm, to the specified value, given as <END_TASK> <USER_TASK:> Description: def setSpeciesFromJson(self, speciesJson): """ Sets the species, an OntologyTerm, to the specified value, given as a JSON string. See the documentation for details of this field. """
try: parsed = protocol.fromJson(speciesJson, protocol.OntologyTerm) except: raise exceptions.InvalidJsonException(speciesJson) self._species = protocol.toJsonDict(parsed)
<SYSTEM_TASK:> Returns the reference with the specified name. <END_TASK> <USER_TASK:> Description: def getReferenceByName(self, name): """ Returns the reference with the specified name. """
if name not in self._referenceNameMap: raise exceptions.ReferenceNameNotFoundException(name) return self._referenceNameMap[name]
<SYSTEM_TASK:> Returns the Reference with the specified ID or raises a <END_TASK> <USER_TASK:> Description: def getReference(self, id_): """ Returns the Reference with the specified ID or raises a ReferenceNotFoundException if it does not exist. """
if id_ not in self._referenceIdMap: raise exceptions.ReferenceNotFoundException(id_) return self._referenceIdMap[id_]
<SYSTEM_TASK:> Returns the MD5 checksum for this reference set. This checksum is <END_TASK> <USER_TASK:> Description: def getMd5Checksum(self): """ Returns the MD5 checksum for this reference set. This checksum is calculated by making a list of `Reference.md5checksum` for all `Reference`s in this set. We then sort this list, and take the MD5 hash of all the strings concatenated together. """
references = sorted( self.getReferences(), key=lambda ref: ref.getMd5Checksum()) checksums = ''.join([ref.getMd5Checksum() for ref in references]) md5checksum = hashlib.md5(checksums).hexdigest() return md5checksum
<SYSTEM_TASK:> Returns the GA4GH protocol representation of this ReferenceSet. <END_TASK> <USER_TASK:> Description: def toProtocolElement(self): """ Returns the GA4GH protocol representation of this ReferenceSet. """
ret = protocol.ReferenceSet() ret.assembly_id = pb.string(self.getAssemblyId()) ret.description = pb.string(self.getDescription()) ret.id = self.getId() ret.is_derived = self.getIsDerived() ret.md5checksum = self.getMd5Checksum() if self.getSpecies(): term = protocol.fromJson( json.dumps(self.getSpecies()), protocol.OntologyTerm) ret.species.term_id = term.term_id ret.species.term = term.term ret.source_accessions.extend(self.getSourceAccessions()) ret.source_uri = pb.string(self.getSourceUri()) ret.name = self.getLocalId() self.serializeAttributes(ret) return ret
<SYSTEM_TASK:> Returns the GA4GH protocol representation of this Reference. <END_TASK> <USER_TASK:> Description: def toProtocolElement(self): """ Returns the GA4GH protocol representation of this Reference. """
reference = protocol.Reference() reference.id = self.getId() reference.is_derived = self.getIsDerived() reference.length = self.getLength() reference.md5checksum = self.getMd5Checksum() reference.name = self.getName() if self.getSpecies(): term = protocol.fromJson( json.dumps(self.getSpecies()), protocol.OntologyTerm) reference.species.term_id = term.term_id reference.species.term = term.term reference.source_accessions.extend(self.getSourceAccessions()) reference.source_divergence = pb.int(self.getSourceDivergence()) reference.source_uri = self.getSourceUri() self.serializeAttributes(reference) return reference
<SYSTEM_TASK:> Checks to ensure that the query range is valid within this reference. <END_TASK> <USER_TASK:> Description: def checkQueryRange(self, start, end): """ Checks to ensure that the query range is valid within this reference. If not, raise ReferenceRangeErrorException. """
condition = ( (start < 0 or end > self.getLength()) or start > end or start == end) if condition: raise exceptions.ReferenceRangeErrorException( self.getId(), start, end)
<SYSTEM_TASK:> Populates the instance variables of this ReferencSet from the <END_TASK> <USER_TASK:> Description: def populateFromFile(self, dataUrl): """ Populates the instance variables of this ReferencSet from the data URL. """
self._dataUrl = dataUrl fastaFile = self.getFastaFile() for referenceName in fastaFile.references: reference = HtslibReference(self, referenceName) # TODO break this up into chunks and calculate the MD5 # in bits (say, 64K chunks?) bases = fastaFile.fetch(referenceName) md5checksum = hashlib.md5(bases).hexdigest() reference.setMd5checksum(md5checksum) reference.setLength(len(bases)) self.addReference(reference)
<SYSTEM_TASK:> Populates this reference set from the values in the specified DB <END_TASK> <USER_TASK:> Description: def populateFromRow(self, referenceSetRecord): """ Populates this reference set from the values in the specified DB row. """
self._dataUrl = referenceSetRecord.dataurl self._description = referenceSetRecord.description self._assemblyId = referenceSetRecord.assemblyid self._isDerived = bool(referenceSetRecord.isderived) self._md5checksum = referenceSetRecord.md5checksum species = referenceSetRecord.species if species is not None and species != 'null': self.setSpeciesFromJson(species) self._sourceAccessions = json.loads( referenceSetRecord.sourceaccessions) self._sourceUri = referenceSetRecord.sourceuri
<SYSTEM_TASK:> Populates this reference from the values in the specified DB row. <END_TASK> <USER_TASK:> Description: def populateFromRow(self, referenceRecord): """ Populates this reference from the values in the specified DB row. """
self._length = referenceRecord.length self._isDerived = bool(referenceRecord.isderived) self._md5checksum = referenceRecord.md5checksum species = referenceRecord.species if species is not None and species != 'null': self.setSpeciesFromJson(species) self._sourceAccessions = json.loads(referenceRecord.sourceaccessions) self._sourceDivergence = referenceRecord.sourcedivergence self._sourceUri = referenceRecord.sourceuri
<SYSTEM_TASK:> Given a binding from the sparql query result, <END_TASK> <USER_TASK:> Description: def _bindingsToDict(self, bindings): """ Given a binding from the sparql query result, create a dict of plain text """
myDict = {} for key, val in bindings.iteritems(): myDict[key.toPython().replace('?', '')] = val.toPython() return myDict
<SYSTEM_TASK:> Given a filename, add it to the graph <END_TASK> <USER_TASK:> Description: def _addDataFile(self, filename): """ Given a filename, add it to the graph """
if filename.endswith('.ttl'): self._rdfGraph.parse(filename, format='n3') else: self._rdfGraph.parse(filename, format='xml')
<SYSTEM_TASK:> Given a uriRef, return a dict of all the details for that Ref <END_TASK> <USER_TASK:> Description: def _getDetails(self, uriRef, associations_details): """ Given a uriRef, return a dict of all the details for that Ref use the uriRef as the 'id' of the dict """
associationDetail = {} for detail in associations_details: if detail['subject'] == uriRef: associationDetail[detail['predicate']] = detail['object'] associationDetail['id'] = uriRef return associationDetail
<SYSTEM_TASK:> Formats several external identifiers for query <END_TASK> <USER_TASK:> Description: def _formatExternalIdentifiers(self, element, element_type): """ Formats several external identifiers for query """
elementClause = None elements = [] if not issubclass(element.__class__, dict): element = protocol.toJsonDict(element) if element['externalIdentifiers']: for _id in element['externalIdentifiers']: elements.append(self._formatExternalIdentifier( _id, element_type)) elementClause = "({})".format(" || ".join(elements)) return elementClause
<SYSTEM_TASK:> Formats a single external identifier for query <END_TASK> <USER_TASK:> Description: def _formatExternalIdentifier(self, element, element_type): """ Formats a single external identifier for query """
if "http" not in element['database']: term = "{}:{}".format(element['database'], element['identifier']) namespaceTerm = self._toNamespaceURL(term) else: namespaceTerm = "{}{}".format( element['database'], element['identifier']) comparison = '?{} = <{}> '.format(element_type, namespaceTerm) return comparison
<SYSTEM_TASK:> Formats the ontology terms for query <END_TASK> <USER_TASK:> Description: def _formatOntologyTerm(self, element, element_type): """ Formats the ontology terms for query """
elementClause = None if isinstance(element, dict) and element.get('terms'): elements = [] for _term in element['terms']: if _term.get('id'): elements.append('?{} = <{}> '.format( element_type, _term['id'])) else: elements.append('?{} = <{}> '.format( element_type, self._toNamespaceURL(_term['term']))) elementClause = "({})".format(" || ".join(elements)) return elementClause
<SYSTEM_TASK:> Formats the ontology term object for query <END_TASK> <USER_TASK:> Description: def _formatOntologyTermObject(self, terms, element_type): """ Formats the ontology term object for query """
elementClause = None if not isinstance(terms, collections.Iterable): terms = [terms] elements = [] for term in terms: if term.term_id: elements.append('?{} = <{}> '.format( element_type, term.term_id)) else: elements.append('?{} = <{}> '.format( element_type, self._toNamespaceURL(term.term))) elementClause = "({})".format(" || ".join(elements)) return elementClause
<SYSTEM_TASK:> Formats a set of identifiers for query <END_TASK> <USER_TASK:> Description: def _formatIds(self, element, element_type): """ Formats a set of identifiers for query """
elementClause = None if isinstance(element, collections.Iterable): elements = [] for _id in element: elements.append('?{} = <{}> '.format( element_type, _id)) elementClause = "({})".format(" || ".join(elements)) return elementClause
<SYSTEM_TASK:> Formats elements passed into parts of a query for filtering <END_TASK> <USER_TASK:> Description: def _formatEvidence(self, elements): """ Formats elements passed into parts of a query for filtering """
elementClause = None filters = [] for evidence in elements: if evidence.description: elementClause = 'regex(?{}, "{}")'.format( 'environment_label', evidence.description) if (hasattr(evidence, 'externalIdentifiers') and evidence.externalIdentifiers): # TODO will this pick up > 1 externalIdentifiers ? for externalIdentifier in evidence['externalIdentifiers']: exid_clause = self._formatExternalIdentifier( externalIdentifier, 'environment') # cleanup parens from _formatExternalIdentifier method elementClause = exid_clause[1:-1] if elementClause: filters.append(elementClause) elementClause = "({})".format(" || ".join(filters)) return elementClause
<SYSTEM_TASK:> given an association dict, <END_TASK> <USER_TASK:> Description: def _toGA4GH(self, association, featureSets=[]): """ given an association dict, return a protocol.FeaturePhenotypeAssociation """
# The association dict has the keys: environment, environment # label, evidence, feature label, phenotype and sources. Each # key's value is a dict with the RDF predicates as keys and # subject as values # 1) map a GA4GH FeaturePhenotypeAssociation # from the association dict passed to us feature = association['feature'] fpa = protocol.FeaturePhenotypeAssociation() fpa.id = association['id'] feature_id = feature['id'] for feature_set in featureSets: if self.getLocalId() in feature_set.getLocalId(): feature_id = feature_set.getCompoundIdForFeatureId(feature_id) fpa.feature_ids.extend([feature_id]) msg = 'Association: genotype:[{}] phenotype:[{}] environment:[{}] ' \ 'evidence:[{}] publications:[{}]' fpa.description = msg.format( association['feature_label'], association['phenotype_label'], association['environment_label'], self._getIdentifier(association['evidence']), association['sources'] ) # 2) map a GA4GH Evidence # from the association's phenotype & evidence evidence = protocol.Evidence() phenotype = association['phenotype'] term = protocol.OntologyTerm() term.term = association['evidence_type'] term.term_id = phenotype['id'] evidence.evidence_type.MergeFrom(term) evidence.description = self._getIdentifier(association['evidence']) # 3) Store publications from the list of sources for source in association['sources'].split("|"): evidence.info['publications'].values.add().string_value = source fpa.evidence.extend([evidence]) # 4) map environment (drug) to environmentalContext environmentalContext = protocol.EnvironmentalContext() environment = association['environment'] environmentalContext.id = environment['id'] environmentalContext.description = association['environment_label'] term = protocol.OntologyTerm() term.term = environment['id'] term.term_id = 'http://purl.obolibrary.org/obo/RO_0002606' environmentalContext.environment_type.MergeFrom(term) fpa.environmental_contexts.extend([environmentalContext]) # 5) map the phenotype phenotypeInstance = protocol.PhenotypeInstance() term = protocol.OntologyTerm() term.term = phenotype[TYPE] term.term_id = phenotype['id'] phenotypeInstance.type.MergeFrom(term) phenotypeInstance.description = phenotype[LABEL] phenotypeInstance.id = phenotype['id'] fpa.phenotype.MergeFrom(phenotypeInstance) fpa.phenotype_association_set_id = self.getId() return fpa
<SYSTEM_TASK:> Generate a formatted sparql query with appropriate filters <END_TASK> <USER_TASK:> Description: def _formatFilterQuery(self, request=None, featureSets=[]): """ Generate a formatted sparql query with appropriate filters """
query = self._baseQuery() filters = [] if issubclass(request.__class__, protocol.SearchGenotypePhenotypeRequest): filters += self._filterSearchGenotypePhenotypeRequest( request, featureSets) if issubclass(request.__class__, protocol.SearchPhenotypesRequest): filters += self._filterSearchPhenotypesRequest(request) # apply filters filter = "FILTER ({})".format(' && '.join(filters)) if len(filters) == 0: filter = "" query = query.replace("#%FILTER%", filter) return query
<SYSTEM_TASK:> Filters request for phenotype search requests <END_TASK> <USER_TASK:> Description: def _filterSearchPhenotypesRequest(self, request): """ Filters request for phenotype search requests """
filters = [] if request.id: filters.append("?phenotype = <{}>".format(request.id)) if request.description: filters.append( 'regex(?phenotype_label, "{}")'.format(request.description)) # OntologyTerms # TODO: refactor this repetitive code if hasattr(request.type, 'id') and request.type.id: ontolgytermsClause = self._formatOntologyTermObject( request.type, 'phenotype') if ontolgytermsClause: filters.append(ontolgytermsClause) if len(request.qualifiers) > 0: ontolgytermsClause = self._formatOntologyTermObject( request.qualifiers, 'phenotype_quality') if ontolgytermsClause: filters.append(ontolgytermsClause) if hasattr(request.age_of_onset, 'id') and request.age_of_onset.id: ontolgytermsClause = self._formatOntologyTermObject( request.age_of_onset, 'phenotype_quality') if ontolgytermsClause: filters.append(ontolgytermsClause) return filters
<SYSTEM_TASK:> Parse the line describing the mode. <END_TASK> <USER_TASK:> Description: def parseStep(self, line): """ Parse the line describing the mode. One of: variableStep chrom=<reference> [span=<window_size>] fixedStep chrom=<reference> start=<position> step=<step_interval> [span=<window_size>] Span is optional, defaulting to 1. It indicates that each value applies to region, starting at the given position and extending <span> positions. """
fields = dict([field.split('=') for field in line.split()[1:]]) if 'chrom' in fields: self._reference = fields['chrom'] else: raise ValueError("Missing chrom field in %s" % line.strip()) if line.startswith("fixedStep"): if 'start' in fields: self._start = int(fields['start']) - 1 # to 0-based else: raise ValueError("Missing start field in %s" % line.strip()) if 'span' in fields: self._span = int(fields['span']) if 'step' in fields: self._step = int(fields['step'])
<SYSTEM_TASK:> Read a wiggle line. If it is a data line, add values to the <END_TASK> <USER_TASK:> Description: def readWiggleLine(self, line): """ Read a wiggle line. If it is a data line, add values to the protocol object. """
if(line.isspace() or line.startswith("#") or line.startswith("browser") or line.startswith("track")): return elif line.startswith("variableStep"): self._mode = self._VARIABLE_STEP self.parseStep(line) return elif line.startswith("fixedStep"): self._mode = self._FIXED_STEP self.parseStep(line) return elif self._mode is None: raise ValueError("Unexpected input line: %s" % line.strip()) if self._queryReference != self._reference: return # read data lines fields = line.split() if self._mode == self._VARIABLE_STEP: start = int(fields[0])-1 # to 0-based val = float(fields[1]) else: start = self._start self._start += self._step val = float(fields[0]) if start < self._queryEnd and start > self._queryStart: if self._position is None: self._position = start self._data.start = start # fill gap while self._position < start: self._data.values.append(float('NaN')) self._position += 1 for _ in xrange(self._span): self._data.values.append(val) self._position += self._span
<SYSTEM_TASK:> Return a continuous protocol object satsifiying the given query <END_TASK> <USER_TASK:> Description: def wiggleFileHandleToProtocol(self, fileHandle): """ Return a continuous protocol object satsifiying the given query parameters from the given wiggle file handle. """
for line in fileHandle: self.readWiggleLine(line) return self._data
<SYSTEM_TASK:> Check the reference for security. Tries to avoid any characters <END_TASK> <USER_TASK:> Description: def checkReference(self, reference): """ Check the reference for security. Tries to avoid any characters necessary for doing a script injection. """
pattern = re.compile(r'[\s,;"\'&\\]') if pattern.findall(reference.strip()): return False return True
<SYSTEM_TASK:> Use pyBigWig package to read a BigWig file for the <END_TASK> <USER_TASK:> Description: def readValuesPyBigWig(self, reference, start, end): """ Use pyBigWig package to read a BigWig file for the given range and return a protocol object. pyBigWig returns an array of values that fill the query range. Not sure if it is possible to get the step and span. This method trims NaN values from the start and end. pyBigWig throws an exception if end is outside of the reference range. This function checks the query range and throws its own exceptions to avoid the ones thrown by pyBigWig. """
if not self.checkReference(reference): raise exceptions.ReferenceNameNotFoundException(reference) if start < 0: start = 0 bw = pyBigWig.open(self._sourceFile) referenceLen = bw.chroms(reference) if referenceLen is None: raise exceptions.ReferenceNameNotFoundException(reference) if end > referenceLen: end = referenceLen if start >= end: raise exceptions.ReferenceRangeErrorException( reference, start, end) data = protocol.Continuous() curStart = start curEnd = curStart + self._INCREMENT while curStart < end: if curEnd > end: curEnd = end for i, val in enumerate(bw.values(reference, curStart, curEnd)): if not math.isnan(val): if len(data.values) == 0: data.start = curStart + i data.values.append(val) if len(data.values) == self._MAX_VALUES: yield data data = protocol.Continuous() elif len(data.values) > 0: # data.values.append(float('NaN')) yield data data = protocol.Continuous() curStart = curEnd curEnd = curStart + self._INCREMENT bw.close() if len(data.values) > 0: yield data
<SYSTEM_TASK:> Read a bigwig file and return a protocol object with values <END_TASK> <USER_TASK:> Description: def readValuesBigWigToWig(self, reference, start, end): """ Read a bigwig file and return a protocol object with values within the query range. This method uses the bigWigToWig command line tool from UCSC GoldenPath. The tool is used to return values within a query region. The output is in wiggle format, which is processed by the WiggleReader class. There could be memory issues if the returned results are large. The input reference can be a security problem (script injection). Ideally, it should be checked against a list of known chromosomes. Start and end should not be problems since they are integers. """
if not self.checkReference(reference): raise exceptions.ReferenceNameNotFoundException(reference) if start < 0: raise exceptions.ReferenceRangeErrorException( reference, start, end) # TODO: CHECK IF QUERY IS BEYOND END cmd = ["bigWigToWig", self._sourceFile, "stdout", "-chrom="+reference, "-start="+str(start), "-end="+str(end)] wiggleReader = WiggleReader(reference, start, end) try: # run command and grab output simultaneously process = subprocess.Popen(cmd, stdout=subprocess.PIPE) while True: line = process.stdout.readline() if line == '' and process.poll() is not None: break wiggleReader.readWiggleLine(line.strip()) except ValueError: raise except: raise Exception("bigWigToWig failed to run") return wiggleReader.getData()
<SYSTEM_TASK:> Returns the representation of this ContinuousSet as the corresponding <END_TASK> <USER_TASK:> Description: def toProtocolElement(self): """ Returns the representation of this ContinuousSet as the corresponding ProtocolElement. """
gaContinuousSet = protocol.ContinuousSet() gaContinuousSet.id = self.getId() gaContinuousSet.dataset_id = self.getParentContainer().getId() gaContinuousSet.reference_set_id = pb.string( self._referenceSet.getId()) gaContinuousSet.name = self._name gaContinuousSet.source_uri = self._sourceUri attributes = self.getAttributes() for key in attributes: gaContinuousSet.attributes.attr[key] \ .values.extend(protocol.encodeValue(attributes[key])) return gaContinuousSet
<SYSTEM_TASK:> Populates the instance variables of this ContinuousSet from the <END_TASK> <USER_TASK:> Description: def populateFromRow(self, continuousSetRecord): """ Populates the instance variables of this ContinuousSet from the specified DB row. """
self._filePath = continuousSetRecord.dataurl self.setAttributesJson(continuousSetRecord.attributes)
<SYSTEM_TASK:> Method passed to runSearchRequest to fulfill the request to <END_TASK> <USER_TASK:> Description: def getContinuous(self, referenceName=None, start=None, end=None): """ Method passed to runSearchRequest to fulfill the request to yield continuous protocol objects that satisfy the given query. :param str referenceName: name of reference (ex: "chr1") :param start: castable to int, start position on reference :param end: castable to int, end position on reference :return: yields a protocol.Continuous at a time """
bigWigReader = BigWigDataSource(self._filePath) for continuousObj in bigWigReader.bigWigToProtocol( referenceName, start, end): yield continuousObj
<SYSTEM_TASK:> Returns a set number of simulated continuous data. <END_TASK> <USER_TASK:> Description: def getContinuousData(self, referenceName=None, start=None, end=None): """ Returns a set number of simulated continuous data. :param referenceName: name of reference to "search" on :param start: start coordinate of query :param end: end coordinate of query :return: Yields continuous list """
randomNumberGenerator = random.Random() randomNumberGenerator.seed(self._randomSeed) for i in range(100): gaContinuous = self._generateSimulatedContinuous( randomNumberGenerator) match = ( gaContinuous.start < end and gaContinuous.end > start and gaContinuous.reference_name == referenceName) if match: yield gaContinuous
<SYSTEM_TASK:> Executes ping on the device and returns a dictionary with the result <END_TASK> <USER_TASK:> Description: def ping(self, destination, source=c.PING_SOURCE, ttl=c.PING_TTL, timeout=c.PING_TIMEOUT, size=c.PING_SIZE, count=c.PING_COUNT, vrf=c.PING_VRF): """ Executes ping on the device and returns a dictionary with the result :param destination: Host or IP Address of the destination :param source (optional): Source address of echo request :param ttl (optional): Maximum number of hops :param timeout (optional): Maximum seconds to wait after sending final packet :param size (optional): Size of request (bytes) :param count (optional): Number of ping request to send Output dictionary has one of following keys: * success * error In case of success, inner dictionary will have the followin keys: * probes_sent (int) * packet_loss (int) * rtt_min (float) * rtt_max (float) * rtt_avg (float) * rtt_stddev (float) * results (list) 'results' is a list of dictionaries with the following keys: * ip_address (str) * rtt (float) Example:: { 'success': { 'probes_sent': 5, 'packet_loss': 0, 'rtt_min': 72.158, 'rtt_max': 72.433, 'rtt_avg': 72.268, 'rtt_stddev': 0.094, 'results': [ { 'ip_address': u'1.1.1.1', 'rtt': 72.248 }, { 'ip_address': '2.2.2.2', 'rtt': 72.299 } ] } } OR { 'error': 'unknown host 8.8.8.8.8' } """
raise NotImplementedError
<SYSTEM_TASK:> Only useful for EOS <END_TASK> <USER_TASK:> Description: def run_commands(self, commands): """Only useful for EOS"""
if "eos" in self.profile: return list(self.parent.cli(commands).values())[0] else: raise AttributeError("MockedDriver instance has not attribute '_rpc'")
<SYSTEM_TASK:> Extracts the text value from an XML tree, using XPath. <END_TASK> <USER_TASK:> Description: def find_txt(xml_tree, path, default=''): """ Extracts the text value from an XML tree, using XPath. In case of error, will return a default value. :param xml_tree: the XML Tree object. Assumed is <type 'lxml.etree._Element'>. :param path: XPath to be applied, in order to extract the desired data. :param default: Value to be returned in case of error. :return: a str value. """
value = '' try: xpath_applied = xml_tree.xpath(path) # will consider the first match only if len(xpath_applied) and xpath_applied[0] is not None: xpath_result = xpath_applied[0] if isinstance(xpath_result, type(xml_tree)): value = xpath_result.text.strip() else: value = xpath_result except Exception: # in case of any exception, returns default value = default return py23_compat.text_type(value)
<SYSTEM_TASK:> Converts data to a specific datatype. <END_TASK> <USER_TASK:> Description: def convert(to, who, default=u''): """ Converts data to a specific datatype. In case of error, will return a default value. :param to: datatype to be casted to. :param who: value to cast. :param default: value to return in case of error. :return: a str value. """
if who is None: return default try: return to(who) except: # noqa return default
<SYSTEM_TASK:> Converts a raw string to a standardised MAC Address EUI Format. <END_TASK> <USER_TASK:> Description: def mac(raw): """ Converts a raw string to a standardised MAC Address EUI Format. :param raw: the raw string containing the value of the MAC Address :return: a string with the MAC Address in EUI format Example: .. code-block:: python >>> mac('0123.4567.89ab') u'01:23:45:67:89:AB' Some vendors like Cisco return MAC addresses like a9:c5:2e:7b:6: which is not entirely valid (with respect to EUI48 or EUI64 standards). Therefore we need to stuff with trailing zeros Example >>> mac('a9:c5:2e:7b:6:') u'A9:C5:2E:7B:60:00' If Cisco or other obscure vendors use their own standards, will throw an error and we can fix later, however, still works with weird formats like: >>> mac('123.4567.89ab') u'01:23:45:67:89:AB' >>> mac('23.4567.89ab') u'00:23:45:67:89:AB' """
if raw.endswith(':'): flat_raw = raw.replace(':', '') raw = '{flat_raw}{zeros_stuffed}'.format( flat_raw=flat_raw, zeros_stuffed='0'*(12-len(flat_raw)) ) return py23_compat.text_type(EUI(raw, dialect=_MACFormat))
<SYSTEM_TASK:> onnx slice provides slicing on multiple axis. Adding multiple slice_axis operator <END_TASK> <USER_TASK:> Description: def _fix_slice(self, inputs, new_attr): """onnx slice provides slicing on multiple axis. Adding multiple slice_axis operator for multiple axes from mxnet"""
begin = new_attr.get('begin') end = new_attr.get('end') axes = new_attr.get('axis', tuple(range(len(begin)))) slice_op = mx.sym.slice_axis(inputs[0], axis=axes[0], begin=begin[0], end=end[0]) if len(axes) > 1: for i, axis in enumerate(axes): slice_op = mx.sym.slice_axis(slice_op, axis=axis, begin=begin[i], end=end[i]) return slice_op
<SYSTEM_TASK:> A workaround to handle dropout or similar operator that have more than one out <END_TASK> <USER_TASK:> Description: def _fix_outputs(self, op, outputs): """A workaround to handle dropout or similar operator that have more than one out in ONNX. """
if op == 'Dropout': assert len(outputs) == 2, "ONNX have two outputs for dropout layer." outputs = outputs[:-1] return outputs
<SYSTEM_TASK:> Run model inference and return the result <END_TASK> <USER_TASK:> Description: def run(self, inputs, **kwargs): """Run model inference and return the result Parameters ---------- inputs : numpy array input to run a layer on Returns ------- params : numpy array result obtained after running the inference on mxnet """
input_data = np.asarray(inputs[0], dtype='f') # create module, passing cpu context if self.device == 'CPU': ctx = mx.cpu() else: raise NotImplementedError("Only CPU context is supported for now") mod = mx.mod.Module(symbol=self.symbol, data_names=['input_0'], context=ctx, label_names=None) mod.bind(for_training=False, data_shapes=[('input_0', input_data.shape)], label_shapes=None) mod.set_params(arg_params=self.params, aux_params=None) # run inference batch = namedtuple('Batch', ['data']) mod.forward(batch([mx.nd.array(input_data)])) result = mod.get_outputs()[0].asnumpy() return [result]
<SYSTEM_TASK:> Wrapper for getting required attributes. <END_TASK> <USER_TASK:> Description: def _required_attr(self, attr, key): """Wrapper for getting required attributes."""
assert isinstance(attr, dict) if key not in attr: raise AttributeError("Required attribute {} not found.".format(key)) return attr[key]
<SYSTEM_TASK:> Running individual node inference on mxnet engine and <END_TASK> <USER_TASK:> Description: def run_node(cls, node, inputs, device='CPU'): # pylint: disable=arguments-differ """Running individual node inference on mxnet engine and return the result to onnx test infrastructure. Parameters ---------- node : onnx node object loaded onnx node (individual layer) inputs : numpy array input to run a node on device : 'CPU' device to run a node on Returns ------- params : numpy array result obtained after running the operator """
graph = GraphProto() sym, _ = graph.from_onnx(MXNetBackend.make_graph(node, inputs)) data_names = [i for i in sym.get_internals().list_inputs()] data_shapes = [] reduce_op_types = set(['ReduceMin', 'ReduceMax', 'ReduceMean', 'ReduceProd', 'ReduceSum', 'Slice', 'Pad', 'Squeeze', 'Upsample', 'Reshape', 'Conv', 'ConvTranspose']) # Adding extra dimension of batch_size 1 if the batch_size is different for multiple inputs. for idx, input_name in enumerate(data_names): batch_size = 1 if len(inputs[idx].shape) < 4 and len(inputs) > 1 and \ len(set(x.shape[0] for x in inputs)) != 1: tuples = ((batch_size,), inputs[idx].shape) new_shape = sum(tuples, ()) data_shapes.append((input_name, new_shape)) else: data_shapes.append((input_name, inputs[idx].shape)) # create module, passing cpu context if device == 'CPU': ctx = mx.cpu() else: raise NotImplementedError("Only CPU context is supported for now") # create a module mod = mx.mod.Module(symbol=sym, data_names=data_names, context=ctx, label_names=None) mod.bind(for_training=False, data_shapes=data_shapes, label_shapes=None) # initializing parameters for calculating result of each individual node mod.init_params() data_forward = [] for idx, input_name in enumerate(data_names): # slice and pad operator tests needs 1 less dimension in forward pass # otherwise it will throw an error. # for squeeze operator, need to retain shape of input as provided val = inputs[idx] if node.op_type in reduce_op_types: data_forward.append(mx.nd.array(val)) else: data_forward.append(mx.nd.array([val])) mod.forward(mx.io.DataBatch(data_forward)) result = mod.get_outputs()[0].asnumpy() if node.op_type in reduce_op_types: return [result] return result
<SYSTEM_TASK:> Imports the supplied ONNX model file into MXNet symbol and parameters. <END_TASK> <USER_TASK:> Description: def import_model(model_file): """Imports the supplied ONNX model file into MXNet symbol and parameters. Parameters ---------- model_file : ONNX model file name Returns ------- sym : mx.symbol Compatible mxnet symbol params : dict of str to mx.ndarray Dict of converted parameters stored in mx.ndarray format """
graph = GraphProto() # loads model file and returns ONNX protobuf object model_proto = onnx.load(model_file) sym, params = graph.from_onnx(model_proto.graph) return sym, params
<SYSTEM_TASK:> Public function that reads a local file and generates a SHA256 hash digest for it <END_TASK> <USER_TASK:> Description: def generate_hash(filepath): """Public function that reads a local file and generates a SHA256 hash digest for it"""
fr = FileReader(filepath) data = fr.read_bin() return _calculate_sha256(data)
<SYSTEM_TASK:> Public function that reads a list of local directories and generates tar archives from them <END_TASK> <USER_TASK:> Description: def generate_tar_files(directory_list): """Public function that reads a list of local directories and generates tar archives from them"""
tar_file_list = [] for directory in directory_list: if dir_exists(directory): _generate_tar(directory) # create the tar archive tar_file_list.append(directory + '.tar') # append the tar archive filename to the returned tar_file_list list else: stderr("The directory '" + directory + "' does not exist and a tar archive could not be created from it.", exit=1) return tar_file_list
<SYSTEM_TASK:> Public function that removes temporary tar archive files in a local directory <END_TASK> <USER_TASK:> Description: def remove_tar_files(file_list): """Public function that removes temporary tar archive files in a local directory"""
for f in file_list: if file_exists(f) and f.endswith('.tar'): os.remove(f)
<SYSTEM_TASK:> Private function that reads a local directory and generates a tar archive from it <END_TASK> <USER_TASK:> Description: def _generate_tar(dir_path): """Private function that reads a local directory and generates a tar archive from it"""
try: with tarfile.open(dir_path + '.tar', 'w') as tar: tar.add(dir_path) except tarfile.TarError as e: stderr("Error: tar archive creation failed [" + str(e) + "]", exit=1)
<SYSTEM_TASK:> public method for single file encryption with optional compression, ASCII armored formatting, and file hash digest generation <END_TASK> <USER_TASK:> Description: def encrypt_file(self, inpath, force_nocompress=False, force_compress=False, armored=False, checksum=False): """public method for single file encryption with optional compression, ASCII armored formatting, and file hash digest generation"""
if armored: if force_compress: command_stub = self.command_maxcompress_armored elif force_nocompress: command_stub = self.command_nocompress_armored else: if self._is_compress_filetype(inpath): command_stub = self.command_default_armored else: command_stub = self.command_nocompress_armored else: if force_compress: command_stub = self.command_maxcompress elif force_nocompress: command_stub = self.command_nocompress else: if self._is_compress_filetype(inpath): command_stub = self.command_default else: command_stub = self.command_nocompress encrypted_outpath = self._create_outfilepath(inpath) system_command = command_stub + encrypted_outpath + " --passphrase " + quote(self.passphrase) + " --symmetric " + quote(inpath) try: response = muterun(system_command) # check returned status code if response.exitcode == 0: stdout(encrypted_outpath + " was generated from " + inpath) if checksum: # add a SHA256 hash digest of the encrypted file - requested by user --hash flag in command from crypto.library import hash encrypted_file_hash = hash.generate_hash(encrypted_outpath) if len(encrypted_file_hash) == 64: stdout("SHA256 hash digest for " + encrypted_outpath + " :") stdout(encrypted_file_hash) else: stdout("Unable to generate a SHA256 hash digest for the file " + encrypted_outpath) else: stderr(response.stderr, 0) stderr("Encryption failed") sys.exit(1) except Exception as e: stderr("There was a problem with the execution of gpg. Encryption failed. Error: [" + str(e) + "]") sys.exit(1)
<SYSTEM_TASK:> public method for multiple file encryption with optional compression, ASCII armored formatting, and file hash digest generation <END_TASK> <USER_TASK:> Description: def encrypt_files(self, file_list, force_nocompress=False, force_compress=False, armored=False, checksum=False): """public method for multiple file encryption with optional compression, ASCII armored formatting, and file hash digest generation"""
for the_file in file_list: self.encrypt_file(the_file, force_nocompress, force_compress, armored, checksum)
<SYSTEM_TASK:> private method that performs magic number and size check on file to determine whether to compress the file <END_TASK> <USER_TASK:> Description: def _is_compress_filetype(self, inpath): """private method that performs magic number and size check on file to determine whether to compress the file"""
# check for common file type suffixes in order to avoid the need for file reads to check magic number for binary vs. text file if self._is_common_binary(inpath): return False elif self._is_common_text(inpath): return True else: # files > 10kB get checked for compression (arbitrary decision to skip compression on small files) the_file_size = file_size(inpath) if the_file_size > 10240: if the_file_size > 512000: # seems to be a break point at ~ 500kb where file compression offset by additional file read, so limit tests to files > 500kB try: system_command = "file --mime-type -b " + quote(inpath) response = muterun(system_command) if response.stdout[0:5] == "text/": # check for a text file mime type return True # appropriate size, appropriate file mime type else: return False # appropriate size, inappropriate file mime type except Exception: return False else: return True # if file size is < 500kB, skip the additional file read and just go with compression else: return False
<SYSTEM_TASK:> private method to compare file path mime type to common binary file types <END_TASK> <USER_TASK:> Description: def _is_common_binary(self, inpath): """private method to compare file path mime type to common binary file types"""
# make local variables for the available char numbers in the suffix types to be tested two_suffix = inpath[-3:] three_suffix = inpath[-4:] four_suffix = inpath[-5:] # test for inclusion in the instance variable common_binaries (defined in __init__) if two_suffix in self.common_binaries: return True elif three_suffix in self.common_binaries: return True elif four_suffix in self.common_binaries: return True else: return False
<SYSTEM_TASK:> private method to compare file path mime type to common text file types <END_TASK> <USER_TASK:> Description: def _is_common_text(self, inpath): """private method to compare file path mime type to common text file types"""
# make local variables for the available char numbers in the suffix types to be tested one_suffix = inpath[-2:] two_suffix = inpath[-3:] three_suffix = inpath[-4:] four_suffix = inpath[-5:] # test for inclusion in the instance variable common_text (defined in __init__) if one_suffix in self.common_text: return True elif two_suffix in self.common_text: return True elif three_suffix in self.common_text: return True elif four_suffix in self.common_text: return True else: return False
<SYSTEM_TASK:> Seems to be the fastest kNN implementation. Pre-sorts each rows neighbors <END_TASK> <USER_TASK:> Description: def knn_impute_few_observed( X, missing_mask, k, verbose=False, print_interval=100): """ Seems to be the fastest kNN implementation. Pre-sorts each rows neighbors and then filters these sorted indices using each columns mask of observed values. Important detail: If k observed values are not available then uses fewer than k neighboring rows. Parameters ---------- X : np.ndarray Matrix to fill of shape (n_samples, n_features) missing_mask : np.ndarray Boolean array of same shape as X k : int verbose : bool """
start_t = time.time() n_rows, n_cols = X.shape # put the missing mask in column major order since it's accessed # one column at a time missing_mask_column_major = np.asarray(missing_mask, order="F") observed_mask_column_major = ~missing_mask_column_major X_column_major = X.copy(order="F") X_row_major, D, effective_infinity = \ knn_initialize(X, missing_mask, verbose=verbose) # get rid of infinities, replace them with a very large number D_sorted = np.argsort(D, axis=1) inv_D = 1.0 / D D_valid_mask = D < effective_infinity valid_distances_per_row = D_valid_mask.sum(axis=1) # trim the number of other rows we consider to exclude those # with infinite distances D_sorted = [ D_sorted[i, :count] for i, count in enumerate(valid_distances_per_row) ] dot = np.dot for i in range(n_rows): missing_row = missing_mask[i, :] missing_indices = np.where(missing_row)[0] row_weights = inv_D[i, :] if verbose and i % print_interval == 0: print( "Imputing row %d/%d with %d missing, elapsed time: %0.3f" % ( i + 1, n_rows, len(missing_indices), time.time() - start_t)) candidate_neighbor_indices = D_sorted[i] for j in missing_indices: observed = observed_mask_column_major[:, j] sorted_observed = observed[candidate_neighbor_indices] observed_neighbor_indices = candidate_neighbor_indices[sorted_observed] k_nearest_indices = observed_neighbor_indices[:k] weights = row_weights[k_nearest_indices] weight_sum = weights.sum() if weight_sum > 0: column = X_column_major[:, j] values = column[k_nearest_indices] X_row_major[i, j] = dot(values, weights) / weight_sum return X_row_major
<SYSTEM_TASK:> Fill X with NaN values if necessary, construct the n_samples x n_samples <END_TASK> <USER_TASK:> Description: def knn_initialize( X, missing_mask, verbose=False, min_dist=1e-6, max_dist_multiplier=1e6): """ Fill X with NaN values if necessary, construct the n_samples x n_samples distance matrix and set the self-distance of each row to infinity. Returns contents of X laid out in row-major, the distance matrix, and an "effective infinity" which is larger than any entry of the distance matrix. """
X_row_major = X.copy("C") if missing_mask.sum() != np.isnan(X_row_major).sum(): # if the missing values have already been zero-filled need # to put NaN's back in the data matrix for the distances function X_row_major[missing_mask] = np.nan D = all_pairs_normalized_distances(X_row_major) D_finite_flat = D[np.isfinite(D)] if len(D_finite_flat) > 0: max_dist = max_dist_multiplier * max(1, D_finite_flat.max()) else: max_dist = max_dist_multiplier # set diagonal of distance matrix to a large value since we don't want # points considering themselves as neighbors np.fill_diagonal(D, max_dist) D[D < min_dist] = min_dist # prevents 0s D[D > max_dist] = max_dist # prevents infinities return X_row_major, D, max_dist
<SYSTEM_TASK:> Reference implementation of normalized all-pairs distance, used <END_TASK> <USER_TASK:> Description: def all_pairs_normalized_distances_reference(X): """ Reference implementation of normalized all-pairs distance, used for testing the more efficient implementation above for equivalence. """
n_samples, n_cols = X.shape # matrix of mean squared difference between between samples D = np.ones((n_samples, n_samples), dtype="float32") * np.inf for i in range(n_samples): diffs = X - X[i, :].reshape((1, n_cols)) missing_diffs = np.isnan(diffs) missing_counts_per_row = missing_diffs.sum(axis=1) valid_rows = missing_counts_per_row < n_cols D[i, valid_rows] = np.nanmean( diffs[valid_rows, :] ** 2, axis=1) return D
<SYSTEM_TASK:> Takes an object and returns a corresponding API class. <END_TASK> <USER_TASK:> Description: def _make_obj(obj): """Takes an object and returns a corresponding API class. The names and values of the data will match exactly with those found in the online docs at https://pokeapi.co/docsv2/ . In some cases, the data may be of a standard type, such as an integer or string. For those cases, the input value is simply returned, unchanged. :param obj: the object to be converted :return either the same value, if it does not need to be converted, or a APIResource or APIMetadata instance, depending on the data inputted. """
if isinstance(obj, dict): if 'url' in obj.keys(): url = obj['url'] id_ = int(url.split('/')[-2]) # ID of the data. endpoint = url.split('/')[-3] # Where the data is located. return APIResource(endpoint, id_, lazy_load=True) return APIMetadata(obj) return obj
<SYSTEM_TASK:> Function to collect reference data and connect it to the instance as <END_TASK> <USER_TASK:> Description: def _load(self): """Function to collect reference data and connect it to the instance as attributes. Internal function, does not usually need to be called by the user, as it is called automatically when an attribute is requested. :return None """
data = get_data(self.endpoint, self.id_, force_lookup=self.__force_lookup) # Make our custom objects from the data. for key, val in data.items(): if key == 'location_area_encounters' \ and self.endpoint == 'pokemon': params = val.split('/')[-3:] ep, id_, subr = params encounters = get_data(ep, int(id_), subr) data[key] = [_make_obj(enc) for enc in encounters] continue if isinstance(val, dict): data[key] = _make_obj(val) elif isinstance(val, list): data[key] = [_make_obj(i) for i in val] self.__dict__.update(data) return None
<SYSTEM_TASK:> Create a leaf directory and all intermediate ones in a safe way. <END_TASK> <USER_TASK:> Description: def safe_make_dirs(path, mode=0o777): """Create a leaf directory and all intermediate ones in a safe way. A wrapper to os.makedirs() that handles existing leaf directories while avoiding os.path.exists() race conditions. :param path: relative or absolute directory tree to create :param mode: directory permissions in octal :return: The newly-created path """
try: os.makedirs(path, mode) except OSError as error: if error.errno != 17: # File exists raise return path
<SYSTEM_TASK:> Get the default cache location. <END_TASK> <USER_TASK:> Description: def get_default_cache(): """Get the default cache location. Adheres to the XDG Base Directory specification, as described in https://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html :return: the default cache directory absolute path """
xdg_cache_home = os.environ.get('XDG_CACHE_HOME') or \ os.path.join(os.path.expanduser('~'), '.cache') return os.path.join(xdg_cache_home, 'pokebase')
<SYSTEM_TASK:> Simple function to change the cache location. <END_TASK> <USER_TASK:> Description: def set_cache(new_path=None): """Simple function to change the cache location. `new_path` can be an absolute or relative path. If the directory does not exist yet, this function will create it. If None it will set the cache to the default cache directory. If you are going to change the cache directory, this function should be called at the top of your script, before you make any calls to the API. This is to avoid duplicate files and excess API calls. :param new_path: relative or absolute path to the desired new cache directory :return: str, str """
global CACHE_DIR, API_CACHE, SPRITE_CACHE if new_path is None: new_path = get_default_cache() CACHE_DIR = safe_make_dirs(os.path.abspath(new_path)) API_CACHE = os.path.join(CACHE_DIR, 'api.cache') SPRITE_CACHE = safe_make_dirs(os.path.join(CACHE_DIR, 'sprite')) return CACHE_DIR, API_CACHE, SPRITE_CACHE
<SYSTEM_TASK:> Attaches lun, snap or member snap of cg snap to host. <END_TASK> <USER_TASK:> Description: def attach(self, lun_or_snap, skip_hlu_0=False): """ Attaches lun, snap or member snap of cg snap to host. Don't pass cg snapshot in as `lun_or_snap`. :param lun_or_snap: the lun, snap, or a member snap of cg snap :param skip_hlu_0: whether to skip hlu 0 :return: the hlu number """
# `UnityResourceAlreadyAttachedError` check was removed due to there # is a host cache existing in Cinder driver. If the lun was attached to # the host and the info was stored in the cache, wrong hlu would be # returned. # And attaching a lun to a host twice would success, if Cinder retry # triggers another attachment of same lun to the host, the cost would # be one more rest request of `modifyLun` and one for host instance # query. try: return self._attach_with_retry(lun_or_snap, skip_hlu_0) except ex.SystemAPINotSupported: # Attaching snap to host not support before 4.1. raise except ex.UnityAttachExceedLimitError: # The number of luns exceeds system limit raise except: # noqa # other attach error, remove this lun if already attached self.detach(lun_or_snap) raise
<SYSTEM_TASK:> Returns True if `lun_or_snap` is attached to the host. <END_TASK> <USER_TASK:> Description: def has_hlu(self, lun_or_snap, cg_member=None): """Returns True if `lun_or_snap` is attached to the host. :param lun_or_snap: can be lun, lun snap, cg snap or a member snap of cg snap. :param cg_member: the member lun of cg if `lun_or_snap` is cg snap. :return: True - if `lun_or_snap` is attached, otherwise False. """
hlu = self.get_hlu(lun_or_snap, cg_member=cg_member) return hlu is not None
<SYSTEM_TASK:> Gets the host lun of a lun, lun snap, cg snap or a member snap of cg <END_TASK> <USER_TASK:> Description: def get_host_lun(self, lun_or_snap, cg_member=None): """Gets the host lun of a lun, lun snap, cg snap or a member snap of cg snap. :param lun_or_snap: can be lun, lun snap, cg snap or a member snap of cg snap. :param cg_member: the member lun of cg if `lun_or_snap` is cg snap. :return: the host lun object. """
import storops.unity.resource.lun as lun_module import storops.unity.resource.snap as snap_module which = None if isinstance(lun_or_snap, lun_module.UnityLun): which = self._get_host_luns(lun=lun_or_snap) elif isinstance(lun_or_snap, snap_module.UnitySnap): if lun_or_snap.is_cg_snap(): if cg_member is None: log.debug('None host lun for CG snap {}. ' 'Use its member snap instead or pass in ' 'cg_member.'.format(lun_or_snap.id)) return None lun_or_snap = lun_or_snap.get_member_snap(cg_member) which = self._get_host_luns(lun=cg_member, snap=lun_or_snap) else: which = self._get_host_luns(snap=lun_or_snap) if not which: log.debug('Resource(LUN or Snap) {} is not attached to host {}' .format(lun_or_snap.name, self.name)) return None return which[0]
<SYSTEM_TASK:> Gets the hlu number of a lun, lun snap, cg snap or a member snap of <END_TASK> <USER_TASK:> Description: def get_hlu(self, resource, cg_member=None): """Gets the hlu number of a lun, lun snap, cg snap or a member snap of cg snap. :param resource: can be lun, lun snap, cg snap or a member snap of cg snap. :param cg_member: the member lun of cg if `lun_or_snap` is cg snap. :return: the hlu number. """
host_lun = self.get_host_lun(resource, cg_member=cg_member) return host_lun if host_lun is None else host_lun.hlu
<SYSTEM_TASK:> Return a Pandas Series of every file for chosen satellite data. <END_TASK> <USER_TASK:> Description: def list_files(tag=None, sat_id=None, data_path=None, format_str=None, supported_tags=None, fake_daily_files_from_monthly=False, two_digit_year_break=None): """Return a Pandas Series of every file for chosen satellite data. This routine is intended to be used by pysat instrument modules supporting a particular NASA CDAWeb dataset. Parameters ----------- tag : (string or NoneType) Denotes type of file to load. Accepted types are <tag strings>. (default=None) sat_id : (string or NoneType) Specifies the satellite ID for a constellation. Not used. (default=None) data_path : (string or NoneType) Path to data directory. If None is specified, the value previously set in Instrument.files.data_path is used. (default=None) format_str : (string or NoneType) User specified file format. If None is specified, the default formats associated with the supplied tags are used. (default=None) supported_tags : (dict or NoneType) keys are tags supported by list_files routine. Values are the default format_str values for key. (default=None) fake_daily_files_from_monthly : bool Some CDAWeb instrument data files are stored by month, interfering with pysat's functionality of loading by day. This flag, when true, appends daily dates to monthly files internally. These dates are used by load routine in this module to provide data by day. Returns -------- pysat.Files.from_os : (pysat._files.Files) A class containing the verified available files Examples -------- :: fname = 'cnofs_vefi_bfield_1sec_{year:04d}{month:02d}{day:02d}_v05.cdf' supported_tags = {'dc_b':fname} list_files = functools.partial(nasa_cdaweb_methods.list_files, supported_tags=supported_tags) ivm_fname = 'cnofs_cindi_ivm_500ms_{year:4d}{month:02d}{day:02d}_v01.cdf' supported_tags = {'':ivm_fname} list_files = functools.partial(cdw.list_files, supported_tags=supported_tags) """
if data_path is not None: if format_str is None: try: format_str = supported_tags[sat_id][tag] except KeyError: raise ValueError('Unknown tag') out = pysat.Files.from_os(data_path=data_path, format_str=format_str) if (not out.empty) and fake_daily_files_from_monthly: out.ix[out.index[-1] + pds.DateOffset(months=1) - pds.DateOffset(days=1)] = out.iloc[-1] out = out.asfreq('D', 'pad') out = out + '_' + out.index.strftime('%Y-%m-%d') return out return out else: estr = 'A directory must be passed to the loading routine for <Instrument Code>' raise ValueError (estr)
<SYSTEM_TASK:> Load NASA CDAWeb CDF files. <END_TASK> <USER_TASK:> Description: def load(fnames, tag=None, sat_id=None, fake_daily_files_from_monthly=False, flatten_twod=True): """Load NASA CDAWeb CDF files. This routine is intended to be used by pysat instrument modules supporting a particular NASA CDAWeb dataset. Parameters ------------ fnames : (pandas.Series) Series of filenames tag : (str or NoneType) tag or None (default=None) sat_id : (str or NoneType) satellite id or None (default=None) fake_daily_files_from_monthly : bool Some CDAWeb instrument data files are stored by month, interfering with pysat's functionality of loading by day. This flag, when true, parses of daily dates to monthly files that were added internally by the list_files routine, when flagged. These dates are used here to provide data by day. Returns --------- data : (pandas.DataFrame) Object containing satellite data meta : (pysat.Meta) Object containing metadata such as column names and units Examples -------- :: # within the new instrument module, at the top level define # a new variable named load, and set it equal to this load method # code below taken from cnofs_ivm.py. # support load routine # use the default CDAWeb method load = cdw.load """
import pysatCDF if len(fnames) <= 0 : return pysat.DataFrame(None), None else: # going to use pysatCDF to load the CDF and format # data and metadata for pysat using some assumptions. # Depending upon your needs the resulting pandas DataFrame may # need modification # currently only loads one file, which handles more situations via pysat # than you may initially think if fake_daily_files_from_monthly: # parse out date from filename fname = fnames[0][0:-11] date = pysat.datetime.strptime(fnames[0][-10:], '%Y-%m-%d') with pysatCDF.CDF(fname) as cdf: # convert data to pysat format data, meta = cdf.to_pysat(flatten_twod=flatten_twod) # select data from monthly data = data.ix[date:date+pds.DateOffset(days=1) - pds.DateOffset(microseconds=1),:] return data, meta else: # basic data return with pysatCDF.CDF(fnames[0]) as cdf: return cdf.to_pysat(flatten_twod=flatten_twod)
<SYSTEM_TASK:> Routine to download NASA CDAWeb CDF data. <END_TASK> <USER_TASK:> Description: def download(supported_tags, date_array, tag, sat_id, ftp_site='cdaweb.gsfc.nasa.gov', data_path=None, user=None, password=None, fake_daily_files_from_monthly=False): """Routine to download NASA CDAWeb CDF data. This routine is intended to be used by pysat instrument modules supporting a particular NASA CDAWeb dataset. Parameters ----------- supported_tags : dict dict of dicts. Keys are supported tag names for download. Value is a dict with 'dir', 'remote_fname', 'local_fname'. Inteded to be pre-set with functools.partial then assigned to new instrument code. date_array : array_like Array of datetimes to download data for. Provided by pysat. tag : (str or NoneType) tag or None (default=None) sat_id : (str or NoneType) satellite id or None (default=None) data_path : (string or NoneType) Path to data directory. If None is specified, the value previously set in Instrument.files.data_path is used. (default=None) user : (string or NoneType) Username to be passed along to resource with relevant data. (default=None) password : (string or NoneType) User password to be passed along to resource with relevant data. (default=None) fake_daily_files_from_monthly : bool Some CDAWeb instrument data files are stored by month.This flag, when true, accomodates this reality with user feedback on a monthly time frame. Returns -------- Void : (NoneType) Downloads data to disk. Examples -------- :: # download support added to cnofs_vefi.py using code below rn = '{year:4d}/cnofs_vefi_bfield_1sec_{year:4d}{month:02d}{day:02d}_v05.cdf' ln = 'cnofs_vefi_bfield_1sec_{year:4d}{month:02d}{day:02d}_v05.cdf' dc_b_tag = {'dir':'/pub/data/cnofs/vefi/bfield_1sec', 'remote_fname':rn, 'local_fname':ln} supported_tags = {'dc_b':dc_b_tag} download = functools.partial(nasa_cdaweb_methods.download, supported_tags=supported_tags) """
import os import ftplib # connect to CDAWeb default port ftp = ftplib.FTP(ftp_site) # user anonymous, passwd anonymous@ ftp.login() try: ftp_dict = supported_tags[tag] except KeyError: raise ValueError('Tag name unknown.') # path to relevant file on CDAWeb ftp.cwd(ftp_dict['dir']) # naming scheme for files on the CDAWeb server remote_fname = ftp_dict['remote_fname'] # naming scheme for local files, should be closely related # to CDAWeb scheme, though directory structures may be reduced # if desired local_fname = ftp_dict['local_fname'] for date in date_array: # format files for specific dates and download location formatted_remote_fname = remote_fname.format(year=date.year, month=date.month, day=date.day) formatted_local_fname = local_fname.format(year=date.year, month=date.month, day=date.day) saved_local_fname = os.path.join(data_path,formatted_local_fname) # perform download try: print('Attempting to download file for '+date.strftime('%x')) sys.stdout.flush() ftp.retrbinary('RETR '+formatted_remote_fname, open(saved_local_fname,'wb').write) print('Finished.') except ftplib.error_perm as exception: # if exception[0][0:3] != '550': if str(exception.args[0]).split(" ", 1)[0] != '550': raise else: os.remove(saved_local_fname) print('File not available for '+ date.strftime('%x')) ftp.close()