text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def removeIndividual(self): """ Removes an individual from this repo """
self._openRepo() dataset = self._repo.getDatasetByName(self._args.datasetName) individual = dataset.getIndividualByName(self._args.individualName) def func(): self._updateRepo(self._repo.removeIndividual, individual) self._confirmDelete("Individual", individual.getLocalId(), func)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def addPeer(self): """ Adds a new peer into this repo """
self._openRepo() try: peer = peers.Peer( self._args.url, json.loads(self._args.attributes)) except exceptions.BadUrlException: raise exceptions.RepoManagerException("The URL for the peer was " "malformed.") except ValueError as e: raise exceptions.RepoManagerException( "The attributes message " "was malformed. {}".format(e)) self._updateRepo(self._repo.insertPeer, peer)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def removePeer(self): """ Removes a peer by URL from this repo """
self._openRepo() def func(): self._updateRepo(self._repo.removePeer, self._args.url) self._confirmDelete("Peer", self._args.url, func)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def removeOntology(self): """ Removes an ontology from the repo. """
self._openRepo() ontology = self._repo.getOntologyByName(self._args.ontologyName) def func(): self._updateRepo(self._repo.removeOntology, ontology) self._confirmDelete("Ontology", ontology.getName(), func)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def addRnaQuantification(self): """ Adds an rnaQuantification into this repo """
self._openRepo() dataset = self._repo.getDatasetByName(self._args.datasetName) biosampleId = "" if self._args.biosampleName: biosample = dataset.getBiosampleByName(self._args.biosampleName) biosampleId = biosample.getId() if self._args.name is None: name = getNameFromPath(self._args.quantificationFilePath) else: name = self._args.name # TODO: programs not fully supported by GA4GH yet programs = "" featureType = "gene" if self._args.transcript: featureType = "transcript" rnaseq2ga.rnaseq2ga( self._args.quantificationFilePath, self._args.filePath, name, self._args.format, dataset=dataset, featureType=featureType, description=self._args.description, programs=programs, featureSetNames=self._args.featureSetNames, readGroupSetNames=self._args.readGroupSetName, biosampleId=biosampleId)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def initRnaQuantificationSet(self): """ Initialize an empty RNA quantification set """
store = rnaseq2ga.RnaSqliteStore(self._args.filePath) store.createTables()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def addRnaQuantificationSet(self): """ Adds an rnaQuantificationSet into this repo """
self._openRepo() dataset = self._repo.getDatasetByName(self._args.datasetName) if self._args.name is None: name = getNameFromPath(self._args.filePath) else: name = self._args.name rnaQuantificationSet = rna_quantification.SqliteRnaQuantificationSet( dataset, name) referenceSetName = self._args.referenceSetName if referenceSetName is None: raise exceptions.RepoManagerException( "A reference set name must be provided") referenceSet = self._repo.getReferenceSetByName(referenceSetName) rnaQuantificationSet.setReferenceSet(referenceSet) rnaQuantificationSet.populateFromFile(self._args.filePath) rnaQuantificationSet.setAttributes(json.loads(self._args.attributes)) self._updateRepo( self._repo.insertRnaQuantificationSet, rnaQuantificationSet)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def removeRnaQuantificationSet(self): """ Removes an rnaQuantificationSet from this repo """
self._openRepo() dataset = self._repo.getDatasetByName(self._args.datasetName) rnaQuantSet = dataset.getRnaQuantificationSetByName( self._args.rnaQuantificationSetName) def func(): self._updateRepo(self._repo.removeRnaQuantificationSet, rnaQuantSet) self._confirmDelete( "RnaQuantificationSet", rnaQuantSet.getLocalId(), func)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def rnaseq2ga(quantificationFilename, sqlFilename, localName, rnaType, dataset=None, featureType="gene", description="", programs="", featureSetNames="", readGroupSetNames="", biosampleId=""): """ Reads RNA Quantification data in one of several formats and stores the data in a sqlite database for use by the GA4GH reference server. Supports the following quantification output types: Cufflinks, kallisto, RSEM. """
readGroupSetName = "" if readGroupSetNames: readGroupSetName = readGroupSetNames.strip().split(",")[0] featureSetIds = "" readGroupIds = "" if dataset: featureSetIdList = [] if featureSetNames: for annotationName in featureSetNames.split(","): featureSet = dataset.getFeatureSetByName(annotationName) featureSetIdList.append(featureSet.getId()) featureSetIds = ",".join(featureSetIdList) # TODO: multiple readGroupSets if readGroupSetName: readGroupSet = dataset.getReadGroupSetByName(readGroupSetName) readGroupIds = ",".join( [x.getId() for x in readGroupSet.getReadGroups()]) if rnaType not in SUPPORTED_RNA_INPUT_FORMATS: raise exceptions.UnsupportedFormatException(rnaType) rnaDB = RnaSqliteStore(sqlFilename) if rnaType == "cufflinks": writer = CufflinksWriter(rnaDB, featureType, dataset=dataset) elif rnaType == "kallisto": writer = KallistoWriter(rnaDB, featureType, dataset=dataset) elif rnaType == "rsem": writer = RsemWriter(rnaDB, featureType, dataset=dataset) writeRnaseqTable(rnaDB, [localName], description, featureSetIds, readGroupId=readGroupIds, programs=programs, biosampleId=biosampleId) writeExpressionTable(writer, [(localName, quantificationFilename)])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def createIndices(self): """ Index columns that are queried. The expression index can take a long time. """
sql = '''CREATE INDEX name_index ON Expression (name)''' self._cursor.execute(sql) self._dbConn.commit() sql = '''CREATE INDEX expression_index ON Expression (expression)''' self._cursor.execute(sql) self._dbConn.commit()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def writeExpression(self, rnaQuantificationId, quantfilename): """ Reads the quantification results file and adds entries to the specified database. """
isNormalized = self._isNormalized units = self._units with open(quantfilename, "r") as quantFile: quantificationReader = csv.reader(quantFile, delimiter=b"\t") header = next(quantificationReader) expressionLevelColNum = self.setColNum( header, self._expressionLevelCol) nameColNum = self.setColNum(header, self._nameCol) countColNum = self.setColNum(header, self._countCol, -1) confColLowNum = self.setColNum(header, self._confColLow, -1) confColHiNum = self.setColNum(header, self._confColHi, -1) expressionId = 0 for expression in quantificationReader: expressionLevel = expression[expressionLevelColNum] name = expression[nameColNum] rawCount = 0.0 if countColNum != -1: rawCount = expression[countColNum] confidenceLow = 0.0 confidenceHi = 0.0 score = 0.0 if confColLowNum != -1 and confColHiNum != -1: confidenceLow = float(expression[confColLowNum]) confidenceHi = float(expression[confColHiNum]) score = (confidenceLow + confidenceHi)/2 datafields = (expressionId, rnaQuantificationId, name, expressionLevel, isNormalized, rawCount, score, units, confidenceLow, confidenceHi) self._db.addExpression(datafields) expressionId += 1 self._db.batchAddExpression()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _fetchSequence(ac, startIndex=None, endIndex=None): """Fetch sequences from NCBI using the eself interface. An interbase interval may be optionally provided with startIndex and endIndex. NCBI eself will return just the requested subsequence, which might greatly reduce payload sizes (especially with chromosome-scale sequences). When wrapped is True, return list of sequence lines rather than concatenated sequence. 1596 Pass the desired interval rather than using Python's [] slice operator. 'MESRETLSSS' 'MESRETLSSS' """
urlFmt = ( "http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?" "db=nucleotide&id={ac}&rettype=fasta&retmode=text") if startIndex is None or endIndex is None: url = urlFmt.format(ac=ac) else: urlFmt += "&seq_start={start}&seq_stop={stop}" url = urlFmt.format(ac=ac, start=startIndex + 1, stop=endIndex) resp = requests.get(url) resp.raise_for_status() seqlines = resp.content.splitlines()[1:] print("{ac}[{s},{e}) => {n} lines ({u})".format( ac=ac, s=startIndex, e=endIndex, n=len(seqlines), u=url)) # return response as list of lines, already line wrapped return seqlines
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def createBamHeader(self, baseHeader): """ Creates a new bam header based on the specified header from the parent BAM file. """
header = dict(baseHeader) newSequences = [] for index, referenceInfo in enumerate(header['SQ']): if index < self.numChromosomes: referenceName = referenceInfo['SN'] # The sequence dictionary in the BAM file has to match up # with the sequence ids in the data, so we must be sure # that these still match up. assert referenceName == self.chromosomes[index] newReferenceInfo = { 'AS': self.referenceSetName, 'SN': referenceName, 'LN': 0, # FIXME 'UR': 'http://example.com', 'M5': 'dbb6e8ece0b5de29da56601613007c2a', # FIXME 'SP': 'Human' } newSequences.append(newReferenceInfo) header['SQ'] = newSequences return header
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def createRepo(self): """ Creates the repository for all the data we've just downloaded. """
repo = datarepo.SqlDataRepository(self.repoPath) repo.open("w") repo.initialise() referenceSet = references.HtslibReferenceSet("GRCh37-subset") referenceSet.populateFromFile(self.fastaFilePath) referenceSet.setDescription("Subset of GRCh37 used for demonstration") referenceSet.setSpeciesFromJson( '{"id": "9606",' + '"term": "Homo sapiens", "source_name": "NCBI"}') for reference in referenceSet.getReferences(): reference.setSpeciesFromJson( '{"id": "9606",' + '"term": "Homo sapiens", "source_name": "NCBI"}') reference.setSourceAccessions( self.accessions[reference.getName()] + ".subset") repo.insertReferenceSet(referenceSet) dataset = datasets.Dataset("1kg-p3-subset") dataset.setDescription("Sample data from 1000 Genomes phase 3") repo.insertDataset(dataset) variantSet = variants.HtslibVariantSet(dataset, "mvncall") variantSet.setReferenceSet(referenceSet) dataUrls = [vcfFile for vcfFile, _ in self.vcfFilePaths] indexFiles = [indexFile for _, indexFile in self.vcfFilePaths] variantSet.populateFromFile(dataUrls, indexFiles) variantSet.checkConsistency() repo.insertVariantSet(variantSet) for sample, (bamFile, indexFile) in zip( self.samples, self.bamFilePaths): readGroupSet = reads.HtslibReadGroupSet(dataset, sample) readGroupSet.populateFromFile(bamFile, indexFile) readGroupSet.setReferenceSet(referenceSet) repo.insertReadGroupSet(readGroupSet) repo.commit() repo.close() self.log("Finished creating the repository; summary:\n") repo.open("r") repo.printSummary()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _configure_backend(app): """A helper function used just to help modularize the code a bit."""
# Allocate the backend # We use URLs to specify the backend. Currently we have file:// URLs (or # URLs with no scheme) for the SqlDataRepository, and special empty:// and # simulated:// URLs for empty or simulated data sources. dataSource = urlparse.urlparse(app.config["DATA_SOURCE"], "file") if dataSource.scheme == "simulated": # Ignore the query string randomSeed = app.config["SIMULATED_BACKEND_RANDOM_SEED"] numCalls = app.config["SIMULATED_BACKEND_NUM_CALLS"] variantDensity = app.config["SIMULATED_BACKEND_VARIANT_DENSITY"] numVariantSets = app.config["SIMULATED_BACKEND_NUM_VARIANT_SETS"] numReferenceSets = app.config[ "SIMULATED_BACKEND_NUM_REFERENCE_SETS"] numReferencesPerReferenceSet = app.config[ "SIMULATED_BACKEND_NUM_REFERENCES_PER_REFERENCE_SET"] numAlignmentsPerReadGroup = app.config[ "SIMULATED_BACKEND_NUM_ALIGNMENTS_PER_READ_GROUP"] numReadGroupsPerReadGroupSet = app.config[ "SIMULATED_BACKEND_NUM_READ_GROUPS_PER_READ_GROUP_SET"] numPhenotypeAssociations = app.config[ "SIMULATED_BACKEND_NUM_PHENOTYPE_ASSOCIATIONS"] numPhenotypeAssociationSets = app.config[ "SIMULATED_BACKEND_NUM_PHENOTYPE_ASSOCIATION_SETS"] numRnaQuantSets = app.config[ "SIMULATED_BACKEND_NUM_RNA_QUANTIFICATION_SETS"] numExpressionLevels = app.config[ "SIMULATED_BACKEND_NUM_EXPRESSION_LEVELS_PER_RNA_QUANT_SET"] dataRepository = datarepo.SimulatedDataRepository( randomSeed=randomSeed, numCalls=numCalls, variantDensity=variantDensity, numVariantSets=numVariantSets, numReferenceSets=numReferenceSets, numReferencesPerReferenceSet=numReferencesPerReferenceSet, numReadGroupsPerReadGroupSet=numReadGroupsPerReadGroupSet, numAlignments=numAlignmentsPerReadGroup, numPhenotypeAssociations=numPhenotypeAssociations, numPhenotypeAssociationSets=numPhenotypeAssociationSets, numRnaQuantSets=numRnaQuantSets, numExpressionLevels=numExpressionLevels) elif dataSource.scheme == "empty": dataRepository = datarepo.EmptyDataRepository() elif dataSource.scheme == "file": path = os.path.join(dataSource.netloc, dataSource.path) dataRepository = datarepo.SqlDataRepository(path) dataRepository.open(datarepo.MODE_READ) else: raise exceptions.ConfigurationException( "Unsupported data source scheme: " + dataSource.scheme) theBackend = backend.Backend(dataRepository) theBackend.setRequestValidation(app.config["REQUEST_VALIDATION"]) theBackend.setDefaultPageSize(app.config["DEFAULT_PAGE_SIZE"]) theBackend.setMaxResponseLength(app.config["MAX_RESPONSE_LENGTH"]) return theBackend
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getFlaskResponse(responseString, httpStatus=200): """ Returns a Flask response object for the specified data and HTTP status. """
return flask.Response(responseString, status=httpStatus, mimetype=MIMETYPE)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def handleHttpPost(request, endpoint): """ Handles the specified HTTP POST request, which maps to the specified protocol handler endpoint and protocol request class. """
if request.mimetype and request.mimetype != MIMETYPE: raise exceptions.UnsupportedMediaTypeException() request = request.get_data() if request == '' or request is None: request = '{}' responseStr = endpoint(request) return getFlaskResponse(responseStr)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def handleException(exception): """ Handles an exception that occurs somewhere in the process of handling a request. """
serverException = exception if not isinstance(exception, exceptions.BaseServerException): with app.test_request_context(): app.log_exception(exception) serverException = exceptions.getServerError(exception) error = serverException.toProtocolElement() # If the exception is being viewed by a web browser, we can render a nicer # view. if flask.request and 'Accept' in flask.request.headers and \ flask.request.headers['Accept'].find('text/html') != -1: message = "<h1>Error {}</h1><pre>{}</pre>".format( serverException.httpStatus, protocol.toJson(error)) if serverException.httpStatus == 401 \ or serverException.httpStatus == 403: message += "Please try <a href=\"/login\">logging in</a>." return message else: responseStr = protocol.toJson(error) return getFlaskResponse(responseStr, serverException.httpStatus)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def checkAuthentication(): """ The request will have a parameter 'key' if it came from the command line client, or have a session key of 'key' if it's the browser. If the token is not found, start the login process. If there is no oidcClient, we are running naked and we don't check. If we're being redirected to the oidcCallback we don't check. :returns None if all is ok (and the request handler continues as usual). Otherwise if the key was in the session (therefore we're in a browser) then startLogin() will redirect to the OIDC provider. If the key was in the request arguments, we're using the command line and just raise an exception. """
if app.oidcClient is None: return if flask.request.endpoint == 'oidcCallback': return key = flask.session.get('key') or flask.request.args.get('key') if key is None or not app.cache.get(key): if 'key' in flask.request.args: raise exceptions.NotAuthenticatedException() else: return startLogin()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def handleFlaskGetRequest(id_, flaskRequest, endpoint): """ Handles the specified flask request for one of the GET URLs Invokes the specified endpoint to generate a response. """
if flaskRequest.method == "GET": return handleHttpGet(id_, endpoint) else: raise exceptions.MethodNotAllowedException()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def handleFlaskPostRequest(flaskRequest, endpoint): """ Handles the specified flask request for one of the POST URLS Invokes the specified endpoint to generate a response. """
if flaskRequest.method == "POST": return handleHttpPost(flaskRequest, endpoint) elif flaskRequest.method == "OPTIONS": return handleHttpOptions() else: raise exceptions.MethodNotAllowedException()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getVariantAnnotationSets(self, datasetId): """ Returns the list of ReferenceSets for this server. """
# TODO this should be displayed per-variant set, not per dataset. variantAnnotationSets = [] dataset = app.backend.getDataRepository().getDataset(datasetId) for variantSet in dataset.getVariantSets(): variantAnnotationSets.extend( variantSet.getVariantAnnotationSets()) return variantAnnotationSets
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def auth_decorator(app=None): """ This decorator wraps a view function so that it is protected when Auth0 is enabled. This means that any request will be expected to have a signed token in the authorization header if the `AUTH0_ENABLED` configuration setting is True. The authorization header will have the form: If a request is not properly signed, an attempt is made to provide the client with useful error messages. This means that if a request is not authorized the underlying view function will not be executed. When `AUTH0_ENABLED` is false, this decorator will simply execute the decorated view without observing the authorization header. :param app: :return: Flask view decorator """
def requires_auth(f): @functools.wraps(f) def decorated(*args, **kwargs): # This decorator will only apply with AUTH0_ENABLED set to True. if app.config.get('AUTH0_ENABLED', False): client_id = app.config.get("AUTH0_CLIENT_ID") client_secret = app.config.get("AUTH0_CLIENT_SECRET") auth_header = flask.request.headers.get('Authorization', None) # Each of these functions will throw a 401 is there is a # problem decoding the token with some helpful error message. if auth_header: token, profile = decode_header( auth_header, client_id, client_secret) else: raise exceptions.NotAuthorizedException() # We store the token in the session so that later # stages can use it to connect identity and authorization. flask.session['auth0_key'] = token # Now we need to make sure that on top of having a good token # They are authorized, and if not provide an error message is_authorized(app.cache, profile['email']) is_active(app.cache, token) return f(*args, **kwargs) return decorated return requires_auth
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def logout(cache): """ Logs out the current session by removing it from the cache. This is expected to only occur when a session has """
cache.set(flask.session['auth0_key'], None) flask.session.clear() return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def addReference(self, reference): """ Adds the specified reference to this ReferenceSet. """
id_ = reference.getId() self._referenceIdMap[id_] = reference self._referenceNameMap[reference.getLocalId()] = reference self._referenceIds.append(id_)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def setSpeciesFromJson(self, speciesJson): """ Sets the species, an OntologyTerm, to the specified value, given as a JSON string. See the documentation for details of this field. """
try: parsed = protocol.fromJson(speciesJson, protocol.OntologyTerm) except: raise exceptions.InvalidJsonException(speciesJson) self._species = protocol.toJsonDict(parsed)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getReferenceByName(self, name): """ Returns the reference with the specified name. """
if name not in self._referenceNameMap: raise exceptions.ReferenceNameNotFoundException(name) return self._referenceNameMap[name]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getReference(self, id_): """ Returns the Reference with the specified ID or raises a ReferenceNotFoundException if it does not exist. """
if id_ not in self._referenceIdMap: raise exceptions.ReferenceNotFoundException(id_) return self._referenceIdMap[id_]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getMd5Checksum(self): """ Returns the MD5 checksum for this reference set. This checksum is calculated by making a list of `Reference.md5checksum` for all `Reference`s in this set. We then sort this list, and take the MD5 hash of all the strings concatenated together. """
references = sorted( self.getReferences(), key=lambda ref: ref.getMd5Checksum()) checksums = ''.join([ref.getMd5Checksum() for ref in references]) md5checksum = hashlib.md5(checksums).hexdigest() return md5checksum
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def toProtocolElement(self): """ Returns the GA4GH protocol representation of this ReferenceSet. """
ret = protocol.ReferenceSet() ret.assembly_id = pb.string(self.getAssemblyId()) ret.description = pb.string(self.getDescription()) ret.id = self.getId() ret.is_derived = self.getIsDerived() ret.md5checksum = self.getMd5Checksum() if self.getSpecies(): term = protocol.fromJson( json.dumps(self.getSpecies()), protocol.OntologyTerm) ret.species.term_id = term.term_id ret.species.term = term.term ret.source_accessions.extend(self.getSourceAccessions()) ret.source_uri = pb.string(self.getSourceUri()) ret.name = self.getLocalId() self.serializeAttributes(ret) return ret
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def toProtocolElement(self): """ Returns the GA4GH protocol representation of this Reference. """
reference = protocol.Reference() reference.id = self.getId() reference.is_derived = self.getIsDerived() reference.length = self.getLength() reference.md5checksum = self.getMd5Checksum() reference.name = self.getName() if self.getSpecies(): term = protocol.fromJson( json.dumps(self.getSpecies()), protocol.OntologyTerm) reference.species.term_id = term.term_id reference.species.term = term.term reference.source_accessions.extend(self.getSourceAccessions()) reference.source_divergence = pb.int(self.getSourceDivergence()) reference.source_uri = self.getSourceUri() self.serializeAttributes(reference) return reference
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def checkQueryRange(self, start, end): """ Checks to ensure that the query range is valid within this reference. If not, raise ReferenceRangeErrorException. """
condition = ( (start < 0 or end > self.getLength()) or start > end or start == end) if condition: raise exceptions.ReferenceRangeErrorException( self.getId(), start, end)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def populateFromFile(self, dataUrl): """ Populates the instance variables of this ReferencSet from the data URL. """
self._dataUrl = dataUrl fastaFile = self.getFastaFile() for referenceName in fastaFile.references: reference = HtslibReference(self, referenceName) # TODO break this up into chunks and calculate the MD5 # in bits (say, 64K chunks?) bases = fastaFile.fetch(referenceName) md5checksum = hashlib.md5(bases).hexdigest() reference.setMd5checksum(md5checksum) reference.setLength(len(bases)) self.addReference(reference)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def populateFromRow(self, referenceSetRecord): """ Populates this reference set from the values in the specified DB row. """
self._dataUrl = referenceSetRecord.dataurl self._description = referenceSetRecord.description self._assemblyId = referenceSetRecord.assemblyid self._isDerived = bool(referenceSetRecord.isderived) self._md5checksum = referenceSetRecord.md5checksum species = referenceSetRecord.species if species is not None and species != 'null': self.setSpeciesFromJson(species) self._sourceAccessions = json.loads( referenceSetRecord.sourceaccessions) self._sourceUri = referenceSetRecord.sourceuri
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def populateFromRow(self, referenceRecord): """ Populates this reference from the values in the specified DB row. """
self._length = referenceRecord.length self._isDerived = bool(referenceRecord.isderived) self._md5checksum = referenceRecord.md5checksum species = referenceRecord.species if species is not None and species != 'null': self.setSpeciesFromJson(species) self._sourceAccessions = json.loads(referenceRecord.sourceaccessions) self._sourceDivergence = referenceRecord.sourcedivergence self._sourceUri = referenceRecord.sourceuri
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _bindingsToDict(self, bindings): """ Given a binding from the sparql query result, create a dict of plain text """
myDict = {} for key, val in bindings.iteritems(): myDict[key.toPython().replace('?', '')] = val.toPython() return myDict
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _addDataFile(self, filename): """ Given a filename, add it to the graph """
if filename.endswith('.ttl'): self._rdfGraph.parse(filename, format='n3') else: self._rdfGraph.parse(filename, format='xml')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _getDetails(self, uriRef, associations_details): """ Given a uriRef, return a dict of all the details for that Ref use the uriRef as the 'id' of the dict """
associationDetail = {} for detail in associations_details: if detail['subject'] == uriRef: associationDetail[detail['predicate']] = detail['object'] associationDetail['id'] = uriRef return associationDetail
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _formatExternalIdentifiers(self, element, element_type): """ Formats several external identifiers for query """
elementClause = None elements = [] if not issubclass(element.__class__, dict): element = protocol.toJsonDict(element) if element['externalIdentifiers']: for _id in element['externalIdentifiers']: elements.append(self._formatExternalIdentifier( _id, element_type)) elementClause = "({})".format(" || ".join(elements)) return elementClause
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _formatExternalIdentifier(self, element, element_type): """ Formats a single external identifier for query """
if "http" not in element['database']: term = "{}:{}".format(element['database'], element['identifier']) namespaceTerm = self._toNamespaceURL(term) else: namespaceTerm = "{}{}".format( element['database'], element['identifier']) comparison = '?{} = <{}> '.format(element_type, namespaceTerm) return comparison
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _formatOntologyTerm(self, element, element_type): """ Formats the ontology terms for query """
elementClause = None if isinstance(element, dict) and element.get('terms'): elements = [] for _term in element['terms']: if _term.get('id'): elements.append('?{} = <{}> '.format( element_type, _term['id'])) else: elements.append('?{} = <{}> '.format( element_type, self._toNamespaceURL(_term['term']))) elementClause = "({})".format(" || ".join(elements)) return elementClause
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _formatOntologyTermObject(self, terms, element_type): """ Formats the ontology term object for query """
elementClause = None if not isinstance(terms, collections.Iterable): terms = [terms] elements = [] for term in terms: if term.term_id: elements.append('?{} = <{}> '.format( element_type, term.term_id)) else: elements.append('?{} = <{}> '.format( element_type, self._toNamespaceURL(term.term))) elementClause = "({})".format(" || ".join(elements)) return elementClause
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _formatIds(self, element, element_type): """ Formats a set of identifiers for query """
elementClause = None if isinstance(element, collections.Iterable): elements = [] for _id in element: elements.append('?{} = <{}> '.format( element_type, _id)) elementClause = "({})".format(" || ".join(elements)) return elementClause
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _formatEvidence(self, elements): """ Formats elements passed into parts of a query for filtering """
elementClause = None filters = [] for evidence in elements: if evidence.description: elementClause = 'regex(?{}, "{}")'.format( 'environment_label', evidence.description) if (hasattr(evidence, 'externalIdentifiers') and evidence.externalIdentifiers): # TODO will this pick up > 1 externalIdentifiers ? for externalIdentifier in evidence['externalIdentifiers']: exid_clause = self._formatExternalIdentifier( externalIdentifier, 'environment') # cleanup parens from _formatExternalIdentifier method elementClause = exid_clause[1:-1] if elementClause: filters.append(elementClause) elementClause = "({})".format(" || ".join(filters)) return elementClause
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _toGA4GH(self, association, featureSets=[]): """ given an association dict, return a protocol.FeaturePhenotypeAssociation """
# The association dict has the keys: environment, environment # label, evidence, feature label, phenotype and sources. Each # key's value is a dict with the RDF predicates as keys and # subject as values # 1) map a GA4GH FeaturePhenotypeAssociation # from the association dict passed to us feature = association['feature'] fpa = protocol.FeaturePhenotypeAssociation() fpa.id = association['id'] feature_id = feature['id'] for feature_set in featureSets: if self.getLocalId() in feature_set.getLocalId(): feature_id = feature_set.getCompoundIdForFeatureId(feature_id) fpa.feature_ids.extend([feature_id]) msg = 'Association: genotype:[{}] phenotype:[{}] environment:[{}] ' \ 'evidence:[{}] publications:[{}]' fpa.description = msg.format( association['feature_label'], association['phenotype_label'], association['environment_label'], self._getIdentifier(association['evidence']), association['sources'] ) # 2) map a GA4GH Evidence # from the association's phenotype & evidence evidence = protocol.Evidence() phenotype = association['phenotype'] term = protocol.OntologyTerm() term.term = association['evidence_type'] term.term_id = phenotype['id'] evidence.evidence_type.MergeFrom(term) evidence.description = self._getIdentifier(association['evidence']) # 3) Store publications from the list of sources for source in association['sources'].split("|"): evidence.info['publications'].values.add().string_value = source fpa.evidence.extend([evidence]) # 4) map environment (drug) to environmentalContext environmentalContext = protocol.EnvironmentalContext() environment = association['environment'] environmentalContext.id = environment['id'] environmentalContext.description = association['environment_label'] term = protocol.OntologyTerm() term.term = environment['id'] term.term_id = 'http://purl.obolibrary.org/obo/RO_0002606' environmentalContext.environment_type.MergeFrom(term) fpa.environmental_contexts.extend([environmentalContext]) # 5) map the phenotype phenotypeInstance = protocol.PhenotypeInstance() term = protocol.OntologyTerm() term.term = phenotype[TYPE] term.term_id = phenotype['id'] phenotypeInstance.type.MergeFrom(term) phenotypeInstance.description = phenotype[LABEL] phenotypeInstance.id = phenotype['id'] fpa.phenotype.MergeFrom(phenotypeInstance) fpa.phenotype_association_set_id = self.getId() return fpa
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _formatFilterQuery(self, request=None, featureSets=[]): """ Generate a formatted sparql query with appropriate filters """
query = self._baseQuery() filters = [] if issubclass(request.__class__, protocol.SearchGenotypePhenotypeRequest): filters += self._filterSearchGenotypePhenotypeRequest( request, featureSets) if issubclass(request.__class__, protocol.SearchPhenotypesRequest): filters += self._filterSearchPhenotypesRequest(request) # apply filters filter = "FILTER ({})".format(' && '.join(filters)) if len(filters) == 0: filter = "" query = query.replace("#%FILTER%", filter) return query
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _filterSearchPhenotypesRequest(self, request): """ Filters request for phenotype search requests """
filters = [] if request.id: filters.append("?phenotype = <{}>".format(request.id)) if request.description: filters.append( 'regex(?phenotype_label, "{}")'.format(request.description)) # OntologyTerms # TODO: refactor this repetitive code if hasattr(request.type, 'id') and request.type.id: ontolgytermsClause = self._formatOntologyTermObject( request.type, 'phenotype') if ontolgytermsClause: filters.append(ontolgytermsClause) if len(request.qualifiers) > 0: ontolgytermsClause = self._formatOntologyTermObject( request.qualifiers, 'phenotype_quality') if ontolgytermsClause: filters.append(ontolgytermsClause) if hasattr(request.age_of_onset, 'id') and request.age_of_onset.id: ontolgytermsClause = self._formatOntologyTermObject( request.age_of_onset, 'phenotype_quality') if ontolgytermsClause: filters.append(ontolgytermsClause) return filters
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parseStep(self, line): """ Parse the line describing the mode. One of: variableStep chrom=<reference> [span=<window_size>] fixedStep chrom=<reference> start=<position> step=<step_interval> [span=<window_size>] Span is optional, defaulting to 1. It indicates that each value applies to region, starting at the given position and extending <span> positions. """
fields = dict([field.split('=') for field in line.split()[1:]]) if 'chrom' in fields: self._reference = fields['chrom'] else: raise ValueError("Missing chrom field in %s" % line.strip()) if line.startswith("fixedStep"): if 'start' in fields: self._start = int(fields['start']) - 1 # to 0-based else: raise ValueError("Missing start field in %s" % line.strip()) if 'span' in fields: self._span = int(fields['span']) if 'step' in fields: self._step = int(fields['step'])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def readWiggleLine(self, line): """ Read a wiggle line. If it is a data line, add values to the protocol object. """
if(line.isspace() or line.startswith("#") or line.startswith("browser") or line.startswith("track")): return elif line.startswith("variableStep"): self._mode = self._VARIABLE_STEP self.parseStep(line) return elif line.startswith("fixedStep"): self._mode = self._FIXED_STEP self.parseStep(line) return elif self._mode is None: raise ValueError("Unexpected input line: %s" % line.strip()) if self._queryReference != self._reference: return # read data lines fields = line.split() if self._mode == self._VARIABLE_STEP: start = int(fields[0])-1 # to 0-based val = float(fields[1]) else: start = self._start self._start += self._step val = float(fields[0]) if start < self._queryEnd and start > self._queryStart: if self._position is None: self._position = start self._data.start = start # fill gap while self._position < start: self._data.values.append(float('NaN')) self._position += 1 for _ in xrange(self._span): self._data.values.append(val) self._position += self._span
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def wiggleFileHandleToProtocol(self, fileHandle): """ Return a continuous protocol object satsifiying the given query parameters from the given wiggle file handle. """
for line in fileHandle: self.readWiggleLine(line) return self._data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def checkReference(self, reference): """ Check the reference for security. Tries to avoid any characters necessary for doing a script injection. """
pattern = re.compile(r'[\s,;"\'&\\]') if pattern.findall(reference.strip()): return False return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def readValuesPyBigWig(self, reference, start, end): """ Use pyBigWig package to read a BigWig file for the given range and return a protocol object. pyBigWig returns an array of values that fill the query range. Not sure if it is possible to get the step and span. This method trims NaN values from the start and end. pyBigWig throws an exception if end is outside of the reference range. This function checks the query range and throws its own exceptions to avoid the ones thrown by pyBigWig. """
if not self.checkReference(reference): raise exceptions.ReferenceNameNotFoundException(reference) if start < 0: start = 0 bw = pyBigWig.open(self._sourceFile) referenceLen = bw.chroms(reference) if referenceLen is None: raise exceptions.ReferenceNameNotFoundException(reference) if end > referenceLen: end = referenceLen if start >= end: raise exceptions.ReferenceRangeErrorException( reference, start, end) data = protocol.Continuous() curStart = start curEnd = curStart + self._INCREMENT while curStart < end: if curEnd > end: curEnd = end for i, val in enumerate(bw.values(reference, curStart, curEnd)): if not math.isnan(val): if len(data.values) == 0: data.start = curStart + i data.values.append(val) if len(data.values) == self._MAX_VALUES: yield data data = protocol.Continuous() elif len(data.values) > 0: # data.values.append(float('NaN')) yield data data = protocol.Continuous() curStart = curEnd curEnd = curStart + self._INCREMENT bw.close() if len(data.values) > 0: yield data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def readValuesBigWigToWig(self, reference, start, end): """ Read a bigwig file and return a protocol object with values within the query range. This method uses the bigWigToWig command line tool from UCSC GoldenPath. The tool is used to return values within a query region. The output is in wiggle format, which is processed by the WiggleReader class. There could be memory issues if the returned results are large. The input reference can be a security problem (script injection). Ideally, it should be checked against a list of known chromosomes. Start and end should not be problems since they are integers. """
if not self.checkReference(reference): raise exceptions.ReferenceNameNotFoundException(reference) if start < 0: raise exceptions.ReferenceRangeErrorException( reference, start, end) # TODO: CHECK IF QUERY IS BEYOND END cmd = ["bigWigToWig", self._sourceFile, "stdout", "-chrom="+reference, "-start="+str(start), "-end="+str(end)] wiggleReader = WiggleReader(reference, start, end) try: # run command and grab output simultaneously process = subprocess.Popen(cmd, stdout=subprocess.PIPE) while True: line = process.stdout.readline() if line == '' and process.poll() is not None: break wiggleReader.readWiggleLine(line.strip()) except ValueError: raise except: raise Exception("bigWigToWig failed to run") return wiggleReader.getData()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def toProtocolElement(self): """ Returns the representation of this ContinuousSet as the corresponding ProtocolElement. """
gaContinuousSet = protocol.ContinuousSet() gaContinuousSet.id = self.getId() gaContinuousSet.dataset_id = self.getParentContainer().getId() gaContinuousSet.reference_set_id = pb.string( self._referenceSet.getId()) gaContinuousSet.name = self._name gaContinuousSet.source_uri = self._sourceUri attributes = self.getAttributes() for key in attributes: gaContinuousSet.attributes.attr[key] \ .values.extend(protocol.encodeValue(attributes[key])) return gaContinuousSet
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def populateFromRow(self, continuousSetRecord): """ Populates the instance variables of this ContinuousSet from the specified DB row. """
self._filePath = continuousSetRecord.dataurl self.setAttributesJson(continuousSetRecord.attributes)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getContinuous(self, referenceName=None, start=None, end=None): """ Method passed to runSearchRequest to fulfill the request to yield continuous protocol objects that satisfy the given query. :param str referenceName: name of reference (ex: "chr1") :param start: castable to int, start position on reference :param end: castable to int, end position on reference :return: yields a protocol.Continuous at a time """
bigWigReader = BigWigDataSource(self._filePath) for continuousObj in bigWigReader.bigWigToProtocol( referenceName, start, end): yield continuousObj
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getContinuousData(self, referenceName=None, start=None, end=None): """ Returns a set number of simulated continuous data. :param referenceName: name of reference to "search" on :param start: start coordinate of query :param end: end coordinate of query :return: Yields continuous list """
randomNumberGenerator = random.Random() randomNumberGenerator.seed(self._randomSeed) for i in range(100): gaContinuous = self._generateSimulatedContinuous( randomNumberGenerator) match = ( gaContinuous.start < end and gaContinuous.end > start and gaContinuous.reference_name == referenceName) if match: yield gaContinuous
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ping(self, destination, source=c.PING_SOURCE, ttl=c.PING_TTL, timeout=c.PING_TIMEOUT, size=c.PING_SIZE, count=c.PING_COUNT, vrf=c.PING_VRF): """ Executes ping on the device and returns a dictionary with the result :param destination: Host or IP Address of the destination :param source (optional): Source address of echo request :param ttl (optional): Maximum number of hops :param timeout (optional): Maximum seconds to wait after sending final packet :param size (optional): Size of request (bytes) :param count (optional): Number of ping request to send Output dictionary has one of following keys: * success * error In case of success, inner dictionary will have the followin keys: * probes_sent (int) * packet_loss (int) * rtt_min (float) * rtt_max (float) * rtt_avg (float) * rtt_stddev (float) * results (list) 'results' is a list of dictionaries with the following keys: * ip_address (str) * rtt (float) Example:: { 'success': { 'probes_sent': 5, 'packet_loss': 0, 'rtt_min': 72.158, 'rtt_max': 72.433, 'rtt_avg': 72.268, 'rtt_stddev': 0.094, 'results': [ { 'ip_address': u'1.1.1.1', 'rtt': 72.248 }, { 'ip_address': '2.2.2.2', 'rtt': 72.299 } ] } } OR { 'error': 'unknown host 8.8.8.8.8' } """
raise NotImplementedError
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run_commands(self, commands): """Only useful for EOS"""
if "eos" in self.profile: return list(self.parent.cli(commands).values())[0] else: raise AttributeError("MockedDriver instance has not attribute '_rpc'")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_txt(xml_tree, path, default=''): """ Extracts the text value from an XML tree, using XPath. In case of error, will return a default value. :param xml_tree: the XML Tree object. Assumed is <type 'lxml.etree._Element'>. :param path: XPath to be applied, in order to extract the desired data. :param default: Value to be returned in case of error. :return: a str value. """
value = '' try: xpath_applied = xml_tree.xpath(path) # will consider the first match only if len(xpath_applied) and xpath_applied[0] is not None: xpath_result = xpath_applied[0] if isinstance(xpath_result, type(xml_tree)): value = xpath_result.text.strip() else: value = xpath_result except Exception: # in case of any exception, returns default value = default return py23_compat.text_type(value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def convert(to, who, default=u''): """ Converts data to a specific datatype. In case of error, will return a default value. :param to: datatype to be casted to. :param who: value to cast. :param default: value to return in case of error. :return: a str value. """
if who is None: return default try: return to(who) except: # noqa return default
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def mac(raw): """ Converts a raw string to a standardised MAC Address EUI Format. :param raw: the raw string containing the value of the MAC Address :return: a string with the MAC Address in EUI format Example: .. code-block:: python u'01:23:45:67:89:AB' Some vendors like Cisco return MAC addresses like a9:c5:2e:7b:6: which is not entirely valid (with respect to EUI48 or EUI64 standards). Therefore we need to stuff with trailing zeros Example u'A9:C5:2E:7B:60:00' If Cisco or other obscure vendors use their own standards, will throw an error and we can fix later, however, still works with weird formats like: u'01:23:45:67:89:AB' u'00:23:45:67:89:AB' """
if raw.endswith(':'): flat_raw = raw.replace(':', '') raw = '{flat_raw}{zeros_stuffed}'.format( flat_raw=flat_raw, zeros_stuffed='0'*(12-len(flat_raw)) ) return py23_compat.text_type(EUI(raw, dialect=_MACFormat))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def convert_uptime_string_seconds(uptime): '''Convert uptime strings to seconds. The string can be formatted various ways.''' regex_list = [ # n years, n weeks, n days, n hours, n minutes where each of the fields except minutes # is optional. Additionally, can be either singular or plural (r"((?P<years>\d+) year(s)?,\s+)?((?P<weeks>\d+) week(s)?,\s+)?" r"((?P<days>\d+) day(s)?,\s+)?((?P<hours>\d+) " r"hour(s)?,\s+)?((?P<minutes>\d+) minute(s)?)"), # n days, HH:MM:SS where each field is required (except for days) (r"((?P<days>\d+) day(s)?,\s+)?" r"((?P<hours>\d+)):((?P<minutes>\d+)):((?P<seconds>\d+))"), # 7w6d5h4m3s where each field is optional (r"((?P<weeks>\d+)w)?((?P<days>\d+)d)?((?P<hours>\d+)h)?" r"((?P<minutes>\d+)m)?((?P<seconds>\d+)s)?"), ] regex_list = [re.compile(x) for x in regex_list] uptime_dict = {} for regex in regex_list: match = regex.search(uptime) if match: uptime_dict = match.groupdict() break uptime_seconds = 0 for unit, value in uptime_dict.items(): if value is not None: if unit == 'years': uptime_seconds += int(value) * 31536000 elif unit == 'weeks': uptime_seconds += int(value) * 604800 elif unit == 'days': uptime_seconds += int(value) * 86400 elif unit == 'hours': uptime_seconds += int(value) * 3600 elif unit == 'minutes': uptime_seconds += int(value) * 60 elif unit == 'seconds': uptime_seconds += int(value) else: raise Exception('Unrecognized unit "{}" in uptime:{}'.format(unit, uptime)) if not uptime_dict: raise Exception('Unrecognized uptime string:{}'.format(uptime)) return uptime_seconds
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def str2hashalgo(description): '''Convert the name of a hash algorithm as described in the OATH specifications, to a python object handling the digest algorithm interface, PEP-xxx. :param description the name of the hash algorithm, example :rtype: a hash algorithm class constructor ''' algo = getattr(hashlib, description.lower(), None) if not callable(algo): raise ValueError('Unknown hash algorithm %s' % description) return algo
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def str2cryptofunction(crypto_function_description): ''' Convert an OCRA crypto function description into a CryptoFunction instance :param crypto_function_description: :returns: the CryptoFunction object :rtype: CryptoFunction ''' s = crypto_function_description.split('-') if len(s) != 3: raise ValueError('CryptoFunction description must be triplet separated by -') if s[0] != HOTP: raise ValueError('Unknown CryptoFunction kind %s' % s[0]) algo = str2hashalgo(s[1]) try: truncation_length = int(s[2]) if truncation_length < 0 or truncation_length > 10: raise ValueError() except ValueError: raise ValueError('Invalid truncation length %s' % s[2]) return CryptoFunction(algo, truncation_length)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def hotp(key,counter,format='dec6',hash=hashlib.sha1): ''' Compute a HOTP value as prescribed by RFC4226 :param key: the HOTP secret key given as an hexadecimal string :param counter: the OTP generation counter :param format: the output format, can be: - hex, for a variable length hexadecimal format, - hex-notrunc, for a 40 characters hexadecimal non-truncated format, - dec4, for a 4 characters decimal format, - dec6, - dec7, or - dec8 it defaults to dec6. :param hash: the hash module (usually from the hashlib package) to use, it defaults to hashlib.sha1. :returns: a string representation of the OTP value (as instructed by the format parameter). Examples: >>> hotp('343434', 2, format='dec6') '791903' ''' bin_hotp = __hotp(key, counter, hash) if format == 'dec4': return dec(bin_hotp, 4) elif format == 'dec6': return dec(bin_hotp, 6) elif format == 'dec7': return dec(bin_hotp, 7) elif format == 'dec8': return dec(bin_hotp, 8) elif format == 'hex': return '%x' % truncated_value(bin_hotp) elif format == 'hex-notrunc': return _utils.tohex(bin_hotp) elif format == 'bin': return bin_hotp elif format == 'dec': return str(truncated_value(bin_hotp)) else: raise ValueError('unknown format')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def totp(key, format='dec6', period=30, t=None, hash=hashlib.sha1): ''' Compute a TOTP value as prescribed by OATH specifications. :param key: the TOTP key given as an hexadecimal string :param format: the output format, can be: - hex, for a variable length hexadecimal format, - hex-notrunc, for a 40 characters hexadecimal non-truncated format, - dec4, for a 4 characters decimal format, - dec6, - dec7, or - dec8 it defaults to dec6. :param period: a positive integer giving the period between changes of the OTP value, as seconds, it defaults to 30. :param t: a positive integer giving the current time as seconds since EPOCH (1st January 1970 at 00:00 GMT), if None we use time.time(); it defaults to None; :param hash: the hash module (usually from the hashlib package) to use, it defaults to hashlib.sha1. :returns: a string representation of the OTP value (as instructed by the format parameter). :type: str ''' if t is None: t = int(time.time()) else: if isinstance(t, datetime.datetime): t = calendar.timegm(t.utctimetuple()) else: t = int(t) T = int(t/period) return hotp(key, T, format=format, hash=hash)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def from_b32key(b32_key, state=None): '''Some phone app directly accept a partial b32 encoding, we try to emulate that''' try: lenient_b32decode(b32_key) except TypeError: raise ValueError('invalid base32 value') return GoogleAuthenticator('otpauth://totp/xxx?%s' % urlencode({'secret': b32_key}), state=state)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _fix_slice(self, inputs, new_attr): """onnx slice provides slicing on multiple axis. Adding multiple slice_axis operator for multiple axes from mxnet"""
begin = new_attr.get('begin') end = new_attr.get('end') axes = new_attr.get('axis', tuple(range(len(begin)))) slice_op = mx.sym.slice_axis(inputs[0], axis=axes[0], begin=begin[0], end=end[0]) if len(axes) > 1: for i, axis in enumerate(axes): slice_op = mx.sym.slice_axis(slice_op, axis=axis, begin=begin[i], end=end[i]) return slice_op
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _fix_outputs(self, op, outputs): """A workaround to handle dropout or similar operator that have more than one out in ONNX. """
if op == 'Dropout': assert len(outputs) == 2, "ONNX have two outputs for dropout layer." outputs = outputs[:-1] return outputs
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run(self, inputs, **kwargs): """Run model inference and return the result Parameters inputs : numpy array input to run a layer on Returns ------- params : numpy array result obtained after running the inference on mxnet """
input_data = np.asarray(inputs[0], dtype='f') # create module, passing cpu context if self.device == 'CPU': ctx = mx.cpu() else: raise NotImplementedError("Only CPU context is supported for now") mod = mx.mod.Module(symbol=self.symbol, data_names=['input_0'], context=ctx, label_names=None) mod.bind(for_training=False, data_shapes=[('input_0', input_data.shape)], label_shapes=None) mod.set_params(arg_params=self.params, aux_params=None) # run inference batch = namedtuple('Batch', ['data']) mod.forward(batch([mx.nd.array(input_data)])) result = mod.get_outputs()[0].asnumpy() return [result]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _parse_default(self, target): """Helper function to parse default values."""
if not isinstance(target, (list, tuple)): k, v, t = target, None, lambda x: x elif len(target) == 1: k, v, t = target[0], None, lambda x: x elif len(target) == 2: k, v, t = target[0], target[1], lambda x: x elif len(target) > 2: k, v, t = target[0], target[1], target[2] else: k = None # should raise if not isinstance(k, string_types): msg = "{} is not a valid target, (name, default) expected.".format(target) raise ValueError(msg) return k, v, t
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _parse_bool(self, value): """Helper function to parse default boolean values."""
if isinstance(value, string_types): return value.strip().lower() in ['true', '1', 't', 'y', 'yes'] return bool(value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _required_attr(self, attr, key): """Wrapper for getting required attributes."""
assert isinstance(attr, dict) if key not in attr: raise AttributeError("Required attribute {} not found.".format(key)) return attr[key]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def make_graph(node, inputs): """ Created ONNX GraphProto from node"""
initializer = [] tensor_input_info = [] tensor_output_info = [] # Adding input tensor info. for index in range(len(node.input)): tensor_input_info.append( helper.make_tensor_value_info(str(node.input[index]), TensorProto.FLOAT, [1])) # Creating an initializer for Weight params. # Assumes that weight params is named as 'W'. # TODO: Handle multiple weight params. # TODO: Add for "bias" if needed if node.input[index] == 'W': dim = inputs[index].shape param_tensor = helper.make_tensor( name=node.input[index], data_type=TensorProto.FLOAT, dims=dim, vals=inputs[index].flatten()) initializer.append(param_tensor) # Adding output tensor info. for index in range(len(node.output)): tensor_output_info.append( helper.make_tensor_value_info(str(node.output[index]), TensorProto.FLOAT, [1])) # creating graph proto object. graph_proto = helper.make_graph( [node], "test", tensor_input_info, tensor_output_info, initializer=initializer) return graph_proto
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run_node(cls, node, inputs, device='CPU'): # pylint: disable=arguments-differ """Running individual node inference on mxnet engine and return the result to onnx test infrastructure. Parameters node : onnx node object loaded onnx node (individual layer) inputs : numpy array input to run a node on device : 'CPU' device to run a node on Returns ------- params : numpy array result obtained after running the operator """
graph = GraphProto() sym, _ = graph.from_onnx(MXNetBackend.make_graph(node, inputs)) data_names = [i for i in sym.get_internals().list_inputs()] data_shapes = [] reduce_op_types = set(['ReduceMin', 'ReduceMax', 'ReduceMean', 'ReduceProd', 'ReduceSum', 'Slice', 'Pad', 'Squeeze', 'Upsample', 'Reshape', 'Conv', 'ConvTranspose']) # Adding extra dimension of batch_size 1 if the batch_size is different for multiple inputs. for idx, input_name in enumerate(data_names): batch_size = 1 if len(inputs[idx].shape) < 4 and len(inputs) > 1 and \ len(set(x.shape[0] for x in inputs)) != 1: tuples = ((batch_size,), inputs[idx].shape) new_shape = sum(tuples, ()) data_shapes.append((input_name, new_shape)) else: data_shapes.append((input_name, inputs[idx].shape)) # create module, passing cpu context if device == 'CPU': ctx = mx.cpu() else: raise NotImplementedError("Only CPU context is supported for now") # create a module mod = mx.mod.Module(symbol=sym, data_names=data_names, context=ctx, label_names=None) mod.bind(for_training=False, data_shapes=data_shapes, label_shapes=None) # initializing parameters for calculating result of each individual node mod.init_params() data_forward = [] for idx, input_name in enumerate(data_names): # slice and pad operator tests needs 1 less dimension in forward pass # otherwise it will throw an error. # for squeeze operator, need to retain shape of input as provided val = inputs[idx] if node.op_type in reduce_op_types: data_forward.append(mx.nd.array(val)) else: data_forward.append(mx.nd.array([val])) mod.forward(mx.io.DataBatch(data_forward)) result = mod.get_outputs()[0].asnumpy() if node.op_type in reduce_op_types: return [result] return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _revert_caffe2_pad(attr): """Removing extra padding from Caffe2."""
if len(attr) == 4: attr = attr[:2] elif len(attr) == 2: pass else: raise ValueError("Invalid caffe2 type padding: {}".format(attr)) return attr
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def import_model(model_file): """Imports the supplied ONNX model file into MXNet symbol and parameters. Parameters model_file : ONNX model file name Returns ------- sym : mx.symbol Compatible mxnet symbol params : dict of str to mx.ndarray Dict of converted parameters stored in mx.ndarray format """
graph = GraphProto() # loads model file and returns ONNX protobuf object model_proto = onnx.load(model_file) sym, params = graph.from_onnx(model_proto.graph) return sym, params
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def generate_hash(filepath): """Public function that reads a local file and generates a SHA256 hash digest for it"""
fr = FileReader(filepath) data = fr.read_bin() return _calculate_sha256(data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def generate_tar_files(directory_list): """Public function that reads a list of local directories and generates tar archives from them"""
tar_file_list = [] for directory in directory_list: if dir_exists(directory): _generate_tar(directory) # create the tar archive tar_file_list.append(directory + '.tar') # append the tar archive filename to the returned tar_file_list list else: stderr("The directory '" + directory + "' does not exist and a tar archive could not be created from it.", exit=1) return tar_file_list
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def remove_tar_files(file_list): """Public function that removes temporary tar archive files in a local directory"""
for f in file_list: if file_exists(f) and f.endswith('.tar'): os.remove(f)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _generate_tar(dir_path): """Private function that reads a local directory and generates a tar archive from it"""
try: with tarfile.open(dir_path + '.tar', 'w') as tar: tar.add(dir_path) except tarfile.TarError as e: stderr("Error: tar archive creation failed [" + str(e) + "]", exit=1)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def encrypt_file(self, inpath, force_nocompress=False, force_compress=False, armored=False, checksum=False): """public method for single file encryption with optional compression, ASCII armored formatting, and file hash digest generation"""
if armored: if force_compress: command_stub = self.command_maxcompress_armored elif force_nocompress: command_stub = self.command_nocompress_armored else: if self._is_compress_filetype(inpath): command_stub = self.command_default_armored else: command_stub = self.command_nocompress_armored else: if force_compress: command_stub = self.command_maxcompress elif force_nocompress: command_stub = self.command_nocompress else: if self._is_compress_filetype(inpath): command_stub = self.command_default else: command_stub = self.command_nocompress encrypted_outpath = self._create_outfilepath(inpath) system_command = command_stub + encrypted_outpath + " --passphrase " + quote(self.passphrase) + " --symmetric " + quote(inpath) try: response = muterun(system_command) # check returned status code if response.exitcode == 0: stdout(encrypted_outpath + " was generated from " + inpath) if checksum: # add a SHA256 hash digest of the encrypted file - requested by user --hash flag in command from crypto.library import hash encrypted_file_hash = hash.generate_hash(encrypted_outpath) if len(encrypted_file_hash) == 64: stdout("SHA256 hash digest for " + encrypted_outpath + " :") stdout(encrypted_file_hash) else: stdout("Unable to generate a SHA256 hash digest for the file " + encrypted_outpath) else: stderr(response.stderr, 0) stderr("Encryption failed") sys.exit(1) except Exception as e: stderr("There was a problem with the execution of gpg. Encryption failed. Error: [" + str(e) + "]") sys.exit(1)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def encrypt_files(self, file_list, force_nocompress=False, force_compress=False, armored=False, checksum=False): """public method for multiple file encryption with optional compression, ASCII armored formatting, and file hash digest generation"""
for the_file in file_list: self.encrypt_file(the_file, force_nocompress, force_compress, armored, checksum)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _is_compress_filetype(self, inpath): """private method that performs magic number and size check on file to determine whether to compress the file"""
# check for common file type suffixes in order to avoid the need for file reads to check magic number for binary vs. text file if self._is_common_binary(inpath): return False elif self._is_common_text(inpath): return True else: # files > 10kB get checked for compression (arbitrary decision to skip compression on small files) the_file_size = file_size(inpath) if the_file_size > 10240: if the_file_size > 512000: # seems to be a break point at ~ 500kb where file compression offset by additional file read, so limit tests to files > 500kB try: system_command = "file --mime-type -b " + quote(inpath) response = muterun(system_command) if response.stdout[0:5] == "text/": # check for a text file mime type return True # appropriate size, appropriate file mime type else: return False # appropriate size, inappropriate file mime type except Exception: return False else: return True # if file size is < 500kB, skip the additional file read and just go with compression else: return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _is_common_binary(self, inpath): """private method to compare file path mime type to common binary file types"""
# make local variables for the available char numbers in the suffix types to be tested two_suffix = inpath[-3:] three_suffix = inpath[-4:] four_suffix = inpath[-5:] # test for inclusion in the instance variable common_binaries (defined in __init__) if two_suffix in self.common_binaries: return True elif three_suffix in self.common_binaries: return True elif four_suffix in self.common_binaries: return True else: return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _is_common_text(self, inpath): """private method to compare file path mime type to common text file types"""
# make local variables for the available char numbers in the suffix types to be tested one_suffix = inpath[-2:] two_suffix = inpath[-3:] three_suffix = inpath[-4:] four_suffix = inpath[-5:] # test for inclusion in the instance variable common_text (defined in __init__) if one_suffix in self.common_text: return True elif two_suffix in self.common_text: return True elif three_suffix in self.common_text: return True elif four_suffix in self.common_text: return True else: return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def knn_impute_few_observed( X, missing_mask, k, verbose=False, print_interval=100): """ Seems to be the fastest kNN implementation. Pre-sorts each rows neighbors and then filters these sorted indices using each columns mask of observed values. Important detail: If k observed values are not available then uses fewer than k neighboring rows. Parameters X : np.ndarray Matrix to fill of shape (n_samples, n_features) missing_mask : np.ndarray Boolean array of same shape as X k : int verbose : bool """
start_t = time.time() n_rows, n_cols = X.shape # put the missing mask in column major order since it's accessed # one column at a time missing_mask_column_major = np.asarray(missing_mask, order="F") observed_mask_column_major = ~missing_mask_column_major X_column_major = X.copy(order="F") X_row_major, D, effective_infinity = \ knn_initialize(X, missing_mask, verbose=verbose) # get rid of infinities, replace them with a very large number D_sorted = np.argsort(D, axis=1) inv_D = 1.0 / D D_valid_mask = D < effective_infinity valid_distances_per_row = D_valid_mask.sum(axis=1) # trim the number of other rows we consider to exclude those # with infinite distances D_sorted = [ D_sorted[i, :count] for i, count in enumerate(valid_distances_per_row) ] dot = np.dot for i in range(n_rows): missing_row = missing_mask[i, :] missing_indices = np.where(missing_row)[0] row_weights = inv_D[i, :] if verbose and i % print_interval == 0: print( "Imputing row %d/%d with %d missing, elapsed time: %0.3f" % ( i + 1, n_rows, len(missing_indices), time.time() - start_t)) candidate_neighbor_indices = D_sorted[i] for j in missing_indices: observed = observed_mask_column_major[:, j] sorted_observed = observed[candidate_neighbor_indices] observed_neighbor_indices = candidate_neighbor_indices[sorted_observed] k_nearest_indices = observed_neighbor_indices[:k] weights = row_weights[k_nearest_indices] weight_sum = weights.sum() if weight_sum > 0: column = X_column_major[:, j] values = column[k_nearest_indices] X_row_major[i, j] = dot(values, weights) / weight_sum return X_row_major
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def knn_initialize( X, missing_mask, verbose=False, min_dist=1e-6, max_dist_multiplier=1e6): """ Fill X with NaN values if necessary, construct the n_samples x n_samples distance matrix and set the self-distance of each row to infinity. Returns contents of X laid out in row-major, the distance matrix, and an "effective infinity" which is larger than any entry of the distance matrix. """
X_row_major = X.copy("C") if missing_mask.sum() != np.isnan(X_row_major).sum(): # if the missing values have already been zero-filled need # to put NaN's back in the data matrix for the distances function X_row_major[missing_mask] = np.nan D = all_pairs_normalized_distances(X_row_major) D_finite_flat = D[np.isfinite(D)] if len(D_finite_flat) > 0: max_dist = max_dist_multiplier * max(1, D_finite_flat.max()) else: max_dist = max_dist_multiplier # set diagonal of distance matrix to a large value since we don't want # points considering themselves as neighbors np.fill_diagonal(D, max_dist) D[D < min_dist] = min_dist # prevents 0s D[D > max_dist] = max_dist # prevents infinities return X_row_major, D, max_dist
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def all_pairs_normalized_distances_reference(X): """ Reference implementation of normalized all-pairs distance, used for testing the more efficient implementation above for equivalence. """
n_samples, n_cols = X.shape # matrix of mean squared difference between between samples D = np.ones((n_samples, n_samples), dtype="float32") * np.inf for i in range(n_samples): diffs = X - X[i, :].reshape((1, n_cols)) missing_diffs = np.isnan(diffs) missing_counts_per_row = missing_diffs.sum(axis=1) valid_rows = missing_counts_per_row < n_cols D[i, valid_rows] = np.nanmean( diffs[valid_rows, :] ** 2, axis=1) return D
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def knn_impute_reference( X, missing_mask, k, verbose=False, print_interval=100): """ Reference implementation of kNN imputation logic. """
n_rows, n_cols = X.shape X_result, D, effective_infinity = \ knn_initialize(X, missing_mask, verbose=verbose) for i in range(n_rows): for j in np.where(missing_mask[i, :])[0]: distances = D[i, :].copy() # any rows that don't have the value we're currently trying # to impute are set to infinite distances distances[missing_mask[:, j]] = effective_infinity neighbor_indices = np.argsort(distances) neighbor_distances = distances[neighbor_indices] # get rid of any infinite distance neighbors in the top k valid_distances = neighbor_distances < effective_infinity neighbor_distances = neighbor_distances[valid_distances][:k] neighbor_indices = neighbor_indices[valid_distances][:k] weights = 1.0 / neighbor_distances weight_sum = weights.sum() if weight_sum > 0: column = X[:, j] values = column[neighbor_indices] X_result[i, j] = np.dot(values, weights) / weight_sum return X_result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def dashboard(request): "Counts, aggregations and more!" end_time = now() start_time = end_time - timedelta(days=7) defaults = {'start': start_time, 'end': end_time} form = DashboardForm(data=request.GET or defaults) if form.is_valid(): start_time = form.cleaned_data['start'] end_time = form.cleaned_data['end'] # determine when tracking began try: obj = Visitor.objects.order_by('start_time')[0] track_start_time = obj.start_time except (IndexError, Visitor.DoesNotExist): track_start_time = now() # If the start_date is before tracking began, warn about incomplete data warn_incomplete = (start_time < track_start_time) # queries take `date` objects (for now) user_stats = Visitor.objects.user_stats(start_time, end_time) visitor_stats = Visitor.objects.stats(start_time, end_time) if TRACK_PAGEVIEWS: pageview_stats = Pageview.objects.stats(start_time, end_time) else: pageview_stats = None context = { 'form': form, 'track_start_time': track_start_time, 'warn_incomplete': warn_incomplete, 'user_stats': user_stats, 'visitor_stats': visitor_stats, 'pageview_stats': pageview_stats, } return render(request, 'tracking/dashboard.html', context)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def geoip_data(self): """Attempt to retrieve MaxMind GeoIP data based on visitor's IP."""
if not HAS_GEOIP or not TRACK_USING_GEOIP: return if not hasattr(self, '_geoip_data'): self._geoip_data = None try: gip = GeoIP(cache=GEOIP_CACHE_TYPE) self._geoip_data = gip.city(self.ip_address) except GeoIPException: msg = 'Error getting GeoIP data for IP "{0}"'.format( self.ip_address) log.exception(msg) return self._geoip_data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _make_obj(obj): """Takes an object and returns a corresponding API class. The names and values of the data will match exactly with those found in the online docs at https://pokeapi.co/docsv2/ . In some cases, the data may be of a standard type, such as an integer or string. For those cases, the input value is simply returned, unchanged. :param obj: the object to be converted :return either the same value, if it does not need to be converted, or a APIResource or APIMetadata instance, depending on the data inputted. """
if isinstance(obj, dict): if 'url' in obj.keys(): url = obj['url'] id_ = int(url.split('/')[-2]) # ID of the data. endpoint = url.split('/')[-3] # Where the data is located. return APIResource(endpoint, id_, lazy_load=True) return APIMetadata(obj) return obj
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _load(self): """Function to collect reference data and connect it to the instance as attributes. Internal function, does not usually need to be called by the user, as it is called automatically when an attribute is requested. :return None """
data = get_data(self.endpoint, self.id_, force_lookup=self.__force_lookup) # Make our custom objects from the data. for key, val in data.items(): if key == 'location_area_encounters' \ and self.endpoint == 'pokemon': params = val.split('/')[-3:] ep, id_, subr = params encounters = get_data(ep, int(id_), subr) data[key] = [_make_obj(enc) for enc in encounters] continue if isinstance(val, dict): data[key] = _make_obj(val) elif isinstance(val, list): data[key] = [_make_obj(i) for i in val] self.__dict__.update(data) return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def safe_make_dirs(path, mode=0o777): """Create a leaf directory and all intermediate ones in a safe way. A wrapper to os.makedirs() that handles existing leaf directories while avoiding os.path.exists() race conditions. :param path: relative or absolute directory tree to create :param mode: directory permissions in octal :return: The newly-created path """
try: os.makedirs(path, mode) except OSError as error: if error.errno != 17: # File exists raise return path
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_default_cache(): """Get the default cache location. Adheres to the XDG Base Directory specification, as described in https://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html :return: the default cache directory absolute path """
xdg_cache_home = os.environ.get('XDG_CACHE_HOME') or \ os.path.join(os.path.expanduser('~'), '.cache') return os.path.join(xdg_cache_home, 'pokebase')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_cache(new_path=None): """Simple function to change the cache location. `new_path` can be an absolute or relative path. If the directory does not exist yet, this function will create it. If None it will set the cache to the default cache directory. If you are going to change the cache directory, this function should be called at the top of your script, before you make any calls to the API. This is to avoid duplicate files and excess API calls. :param new_path: relative or absolute path to the desired new cache directory :return: str, str """
global CACHE_DIR, API_CACHE, SPRITE_CACHE if new_path is None: new_path = get_default_cache() CACHE_DIR = safe_make_dirs(os.path.abspath(new_path)) API_CACHE = os.path.join(CACHE_DIR, 'api.cache') SPRITE_CACHE = safe_make_dirs(os.path.join(CACHE_DIR, 'sprite')) return CACHE_DIR, API_CACHE, SPRITE_CACHE
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def attach(self, lun_or_snap, skip_hlu_0=False): """ Attaches lun, snap or member snap of cg snap to host. Don't pass cg snapshot in as `lun_or_snap`. :param lun_or_snap: the lun, snap, or a member snap of cg snap :param skip_hlu_0: whether to skip hlu 0 :return: the hlu number """
# `UnityResourceAlreadyAttachedError` check was removed due to there # is a host cache existing in Cinder driver. If the lun was attached to # the host and the info was stored in the cache, wrong hlu would be # returned. # And attaching a lun to a host twice would success, if Cinder retry # triggers another attachment of same lun to the host, the cost would # be one more rest request of `modifyLun` and one for host instance # query. try: return self._attach_with_retry(lun_or_snap, skip_hlu_0) except ex.SystemAPINotSupported: # Attaching snap to host not support before 4.1. raise except ex.UnityAttachExceedLimitError: # The number of luns exceeds system limit raise except: # noqa # other attach error, remove this lun if already attached self.detach(lun_or_snap) raise
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def has_hlu(self, lun_or_snap, cg_member=None): """Returns True if `lun_or_snap` is attached to the host. :param lun_or_snap: can be lun, lun snap, cg snap or a member snap of cg snap. :param cg_member: the member lun of cg if `lun_or_snap` is cg snap. :return: True - if `lun_or_snap` is attached, otherwise False. """
hlu = self.get_hlu(lun_or_snap, cg_member=cg_member) return hlu is not None