_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
31
13.1k
language
stringclasses
1 value
meta_information
dict
q276300
ConnectorDB.reset_apikey
test
def reset_apikey(self): """invalidates the device's current api key, and generates a new one. Resets current auth to use the new apikey, since the change would have future queries fail
python
{ "resource": "" }
q276301
ConnectorDB.users
test
def users(self): """Returns the list of users in the database""" result = self.db.read("", {"q": "ls"}) if result is None or result.json() is None: return [] users = []
python
{ "resource": "" }
q276302
run_bwa_index
test
def run_bwa_index(job, ref_id): """ Use BWA to create reference index files :param JobFunctionWrappingJob job: passed automatically by Toil :param str ref_id: FileStoreID for the reference genome :return: FileStoreIDs for BWA index files :rtype: tuple(str, str, str, str, str) """ job.fileStore.logToMaster('Created BWA index files') work_dir = job.fileStore.getLocalTempDir() job.fileStore.readGlobalFile(ref_id, os.path.join(work_dir, 'ref.fa')) command = ['index', '/data/ref.fa'] dockerCall(job=job, workDir=work_dir, parameters=command,
python
{ "resource": "" }
q276303
Logger.connectordb
test
def connectordb(self): """Returns the ConnectorDB object that the logger uses. Raises an error if Logger isn't able to connect""" if self.__cdb is None:
python
{ "resource": "" }
q276304
Logger.addStream
test
def addStream(self, streamname, schema=None, **kwargs): """Adds the given stream to the logger. Requires an active connection to the ConnectorDB database. If a schema is not specified, loads the stream from the database. If a schema is specified, and the stream does not exist, creates the stream. You can
python
{ "resource": "" }
q276305
Logger.addStream_force
test
def addStream_force(self, streamname, schema=None): """This function adds the given stream to the logger, but does not check with a ConnectorDB database to make sure
python
{ "resource": "" }
q276306
Logger.insert
test
def insert(self, streamname, value): """Insert the datapoint into the logger for the given stream name. The logger caches the datapoint and eventually synchronizes it with ConnectorDB""" if streamname not in self.streams: raise Exception("The stream '%s' was not found" % (streamname, )) # Validate the schema validate(value, self.streams[streamname])
python
{ "resource": "" }
q276307
Logger.sync
test
def sync(self): """Attempt to sync with the ConnectorDB server""" logging.debug("Logger: Syncing...") failed = False try: # Get the connectordb object cdb = self.connectordb # Ping the database - most connection errors will happen here cdb.ping() with self.synclock: c = self.database.cursor() for stream in self.streams: s = cdb[stream] c.execute( "SELECT * FROM cache WHERE stream=? ORDER BY timestamp ASC;", (stream, )) datapointArray = [] for dp in c.fetchall(): datapointArray.append( {"t": dp[1], "d": json.loads(dp[2])}) # First, check if the data already inserted has newer timestamps, # and in that case, assume that there was an error, and remove the datapoints # with an older timestamp, so that we don't have an error when syncing if len(s) > 0: newtime = s[-1]["t"] while (len(datapointArray) > 0 and datapointArray[0]["t"] < newtime): logging.debug("Datapoint exists with older timestamp. Removing the datapoint.") datapointArray = datapointArray[1:] if len(datapointArray) > 0: logging.debug("%s: syncing %i datapoints" % (stream, len(datapointArray))) while (len(datapointArray) > DATAPOINT_INSERT_LIMIT): # We insert datapoints in chunks of a couple # thousand so that they fit in the insert size # limit of ConnectorDB
python
{ "resource": "" }
q276308
Logger.start
test
def start(self): """Start the logger background synchronization service. This allows you to not need to worry about syncing with ConnectorDB - you just insert into the Logger, and the Logger will by
python
{ "resource": "" }
q276309
Logger.stop
test
def stop(self): """Stops the background synchronization thread""" with self.synclock: if self.syncthread is not None:
python
{ "resource": "" }
q276310
download_url_job
test
def download_url_job(job, url, name=None, s3_key_path=None, cghub_key_path=None): """Job version of `download_url`""" work_dir = job.fileStore.getLocalTempDir() fpath = download_url(job=job, url=url, work_dir=work_dir, name=name,
python
{ "resource": "" }
q276311
s3am_upload_job
test
def s3am_upload_job(job, file_id, file_name, s3_dir, s3_key_path=None): """Job version of s3am_upload""" work_dir = job.fileStore.getLocalTempDir()
python
{ "resource": "" }
q276312
labels
test
def labels(ontology, output, ols_base): """Output the names to the given file""" for label in get_labels(ontology=ontology,
python
{ "resource": "" }
q276313
tree
test
def tree(ontology, output, ols_base): """Output the parent-child relations to the given file""" for parent, child in get_hierarchy(ontology=ontology,
python
{ "resource": "" }
q276314
get_mean_insert_size
test
def get_mean_insert_size(work_dir, bam_name): """Function taken from MC3 Pipeline""" cmd = "docker run --log-driver=none --rm -v {}:/data quay.io/ucsc_cgl/samtools " \ "view -f66 {}".format(work_dir, os.path.join(work_dir, bam_name)) process = subprocess.Popen(args=cmd, shell=True, stdout=subprocess.PIPE) b_sum = 0.0 b_count = 0.0 while True: line = process.stdout.readline() if not line: break tmp = line.split("\t")
python
{ "resource": "" }
q276315
current_docker_container_id
test
def current_docker_container_id(): """ Returns a string that represents the container ID of the current Docker container. If this function is invoked outside of a container a NotInsideContainerError is raised. >>> import subprocess >>> import sys >>> a = subprocess.check_output(['docker', 'run', '-v', ... sys.modules[__name__].__file__ + ':/foo.py', ...
python
{ "resource": "" }
q276316
run_star
test
def run_star(job, r1_id, r2_id, star_index_url, wiggle=False, sort=True): """ Performs alignment of fastqs to bam via STAR --limitBAMsortRAM step added to deal with memory explosion when sorting certain samples. The value was chosen to complement the recommended amount of memory to have when running STAR (60G) :param JobFunctionWrappingJob job: passed automatically by Toil :param str r1_id: FileStoreID of fastq (pair 1) :param str r2_id: FileStoreID of fastq (pair 2 if applicable, else pass None) :param str star_index_url: STAR index tarball :param bool wiggle: If True, will output a wiggle file and return it :return: FileStoreID from RSEM :rtype: str """ work_dir = job.fileStore.getLocalTempDir() download_url(job, url=star_index_url, name='starIndex.tar.gz', work_dir=work_dir) subprocess.check_call(['tar', '-xvf', os.path.join(work_dir, 'starIndex.tar.gz'), '-C', work_dir]) os.remove(os.path.join(work_dir, 'starIndex.tar.gz')) # Determine tarball structure - star index contains are either in a subdir or in the tarball itself star_index = os.path.join('/data', os.listdir(work_dir)[0]) if len(os.listdir(work_dir)) == 1 else '/data' # Parameter handling for paired / single-end data parameters = ['--runThreadN', str(job.cores), '--genomeDir', star_index, '--outFileNamePrefix', 'rna', '--outSAMunmapped', 'Within', '--quantMode', 'TranscriptomeSAM', '--outSAMattributes', 'NH', 'HI', 'AS', 'NM', 'MD', '--outFilterType', 'BySJout', '--outFilterMultimapNmax', '20', '--outFilterMismatchNmax', '999', '--outFilterMismatchNoverReadLmax', '0.04', '--alignIntronMin', '20', '--alignIntronMax', '1000000', '--alignMatesGapMax', '1000000', '--alignSJoverhangMin', '8', '--alignSJDBoverhangMin', '1', '--sjdbScore', '1', '--limitBAMsortRAM', '49268954168'] # Modify paramaters based on function arguments if sort: parameters.extend(['--outSAMtype', 'BAM', 'SortedByCoordinate']) aligned_bam = 'rnaAligned.sortedByCoord.out.bam' else: parameters.extend(['--outSAMtype', 'BAM', 'Unsorted']) aligned_bam = 'rnaAligned.out.bam' if wiggle: parameters.extend(['--outWigType', 'bedGraph',
python
{ "resource": "" }
q276317
Stream.create
test
def create(self, schema="{}", **kwargs): """Creates a stream given an optional JSON schema encoded as a python dict. You can also add other properties of the stream, such as the icon, datatype or description. Create accepts both a string schema and
python
{ "resource": "" }
q276318
Stream.export
test
def export(self, directory): """Exports the stream to the given directory. The directory can't exist. You can later import this device by running import_stream on a device. """ if os.path.exists(directory): raise FileExistsError( "The stream export directory already exists") os.mkdir(directory) # Write the stream's info with open(os.path.join(directory, "stream.json"), "w") as f: json.dump(self.data, f) # Now write the stream's data # We sort it first, since older versions of ConnectorDB had a bug
python
{ "resource": "" }
q276319
Stream.device
test
def device(self): """returns the device which owns the given stream""" splitted_path = self.path.split("/")
python
{ "resource": "" }
q276320
get_labels
test
def get_labels(ontology, ols_base=None): """Iterates over the labels of terms in the ontology :param str ontology: The name of the ontology :param str ols_base: An optional, custom OLS base url
python
{ "resource": "" }
q276321
get_hierarchy
test
def get_hierarchy(ontology, ols_base=None): """Iterates over the parent-child relationships in an ontolog :param str ontology: The name of the ontology :param str ols_base: An optional, custom OLS base url
python
{ "resource": "" }
q276322
AbstractPipelineWrapper.run
test
def run(cls, name, desc): """ Prepares and runs the pipeline. Note this method must be invoked both from inside a Docker container and while the docker daemon is reachable. :param str name: The name of the command to start the workflow. :param str desc: The description of the workflow. """ wrapper = cls(name, desc) mount_path = wrapper._get_mount_path() # prepare parser arg_parser = wrapper._create_argument_parser() wrapper._extend_argument_parser(arg_parser) # prepare config file empty_config = wrapper.__get_empty_config() config_yaml = ruamel.yaml.load(empty_config) wrapper.__populate_parser_from_config(arg_parser, config_yaml) args = arg_parser.parse_args() for k,v in vars(args).items(): k = k.replace('_', '-') if k in config_yaml: config_yaml[k] = v config_path = wrapper._get_config_path() with open(config_path, 'w') as writable: ruamel.yaml.dump(config_yaml, stream=writable) # prepare workdir workdir_path = os.path.join(mount_path, 'Toil-' + wrapper._name) if os.path.exists(workdir_path): if args.restart: log.info('Reusing temporary directory: %s', workdir_path) else: raise UserError('Temporary directory {} already exists. Run with --restart '
python
{ "resource": "" }
q276323
AbstractPipelineWrapper.__populate_parser_from_config
test
def __populate_parser_from_config(self, arg_parser, config_data, prefix=''): """ Populates an ArgumentParser object with arguments where each argument is a key from the given config_data dictionary. :param str prefix: Prepends the key with this prefix delimited by a single '.' character. :param argparse.ArgumentParser arg_parser: :param dict config_data: The parsed yaml data from the config. >>> pw = AbstractPipelineWrapper('test', 'this is a test') >>> parser = argparse.ArgumentParser() >>> pw._PipelineWrapperBuilder__populate_parser_from_config(parser, {'a':None, 'b':2}) >>> vars(parser.parse_args(['--a', '1'])) {'a': '1', 'b': 2} >>> vars(parser.parse_args(['--b', '3'])) {'a': None, 'b': '3'} >>> parser = argparse.ArgumentParser() >>> pw._PipelineWrapperBuilder__populate_parser_from_config(parser, {}) >>> vars(parser.parse_args([])) {} >>> parser = argparse.ArgumentParser() >>> pw._PipelineWrapperBuilder__populate_parser_from_config(parser, ...
python
{ "resource": "" }
q276324
AbstractPipelineWrapper.__get_empty_config
test
def __get_empty_config(self): """ Returns the config file contents as a string. The config file is generated and then deleted. """ self._generate_config() path = self._get_config_path()
python
{ "resource": "" }
q276325
AbstractPipelineWrapper._get_mount_path
test
def _get_mount_path(self): """ Returns the path of the mount point of the current container. If this method is invoked outside of a Docker container a NotInsideContainerError is raised. Likewise if the docker daemon is unreachable from inside the container a UserError is raised. This method is idempotent. """ if self._mount_path is None: name = current_docker_container_id() if dockerd_is_reachable(): # Get name of mounted volume blob = json.loads(subprocess.check_output(['docker', 'inspect', name])) mounts = blob[0]['Mounts'] # Ensure docker.sock is mounted correctly sock_mnt = [x['Source'] == x['Destination'] for x in mounts if 'docker.sock' in x['Source']] require(len(sock_mnt) == 1, 'Missing socket mount. Requires the following: ' 'docker run -v /var/run/docker.sock:/var/run/docker.sock') # Ensure formatting of command for 2 mount points if len(mounts) == 2: require(all(x['Source'] == x['Destination'] for x in mounts), 'Docker Src/Dst mount points, invoked with the -v argument, ' 'must be the same if only using one mount point aside from the docker ' 'socket.') work_mount = [x['Source'] for x in mounts if 'docker.sock' not in x['Source']] else: # Ensure only one mirror mount exists aside from docker.sock
python
{ "resource": "" }
q276326
AbstractPipelineWrapper._add_option
test
def _add_option(self, arg_parser, name, *args, **kwargs): """ Add an argument to the given arg_parser with the given name. :param argparse.ArgumentParser arg_parser:
python
{ "resource": "" }
q276327
AbstractPipelineWrapper._create_argument_parser
test
def _create_argument_parser(self): """ Creates and returns an ArgumentParser object prepopulated with 'no clean', 'cores' and 'restart' arguments. """ parser = argparse.ArgumentParser(description=self._desc, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('--no-clean', action='store_true', help='If this flag is used, temporary work directory is not cleaned.') parser.add_argument('--restart', action='store_true',
python
{ "resource": "" }
q276328
AbstractPipelineWrapper._create_pipeline_command
test
def _create_pipeline_command(self, args, workdir_path, config_path): """ Creates and returns a list that represents a command for running the pipeline. """ return ([self._name, 'run', os.path.join(workdir_path, 'jobStore'),
python
{ "resource": "" }
q276329
DatabaseConnection.setauth
test
def setauth(self, user_or_apikey=None, user_password=None): """ setauth sets the authentication header for use in the session. It is for use when apikey is updated or something of the sort, such that there is a seamless experience. """ auth = None if user_or_apikey is not None: # ConnectorDB allows login using both basic auth or an apikey url param. # The python client uses basic auth for all logins if user_password is None: # Login by api key - the basic auth
python
{ "resource": "" }
q276330
DatabaseConnection.handleresult
test
def handleresult(self, r): """Handles HTTP error codes for the given request Raises: AuthenticationError on the appropriate 4** errors ServerError if the response is not an ok (2**) Arguments: r -- The request result """ if r.status_code >= 400 and r.status_code < 500: msg = r.json()
python
{ "resource": "" }
q276331
DatabaseConnection.ping
test
def ping(self): """Attempts to ping the server using current credentials, and responds with the path of the currently authenticated device"""
python
{ "resource": "" }
q276332
DatabaseConnection.create
test
def create(self, path, data=None): """Send a POST CRUD API request to the given path using the given data which will be converted to
python
{ "resource": "" }
q276333
DatabaseConnection.update
test
def update(self, path, data=None): """Send an update request to the given path of the CRUD API, with the given data dict, which will be converted into json""" return self.handleresult(self.r.put(urljoin(self.url + CRUD_PATH,
python
{ "resource": "" }
q276334
DatabaseConnection.delete
test
def delete(self, path): """Send a delete request to the given path of the CRUD API. This deletes the object. Or at least tries to."""
python
{ "resource": "" }
q276335
DatabaseConnection.subscribe
test
def subscribe(self, stream, callback, transform=""): """Subscribe to
python
{ "resource": "" }
q276336
User.create
test
def create(self, email, password, role="user", public=True, **kwargs): """Creates the given user - using the passed in email and password. You can also set other default properties by passing in the relevant information:: usr.create("my@email","mypass",description="I like trains.") Furthermore, ConnectorDB permits immediate initialization of an entire user tree, so that you can create all relevant devices and streams in one go:: usr.create("my@email","mypass",devices={ "device1": { "nickname": "My train", "streams": { "stream1": { "schema": "{\"type\":\"string\"}",
python
{ "resource": "" }
q276337
User.devices
test
def devices(self): """Returns the list of devices that belong to the user""" result = self.db.read(self.path, {"q": "ls"}) if result is None or result.json() is None: return [] devices = []
python
{ "resource": "" }
q276338
run_cutadapt
test
def run_cutadapt(job, r1_id, r2_id, fwd_3pr_adapter, rev_3pr_adapter): """ Adapter trimming for RNA-seq data :param JobFunctionWrappingJob job: passed automatically by Toil :param str r1_id: FileStoreID of fastq read 1 :param str r2_id: FileStoreID of fastq read 2 (if paired data) :param str fwd_3pr_adapter: Adapter sequence for the forward 3' adapter :param str rev_3pr_adapter: Adapter sequence for the reverse 3' adapter (second fastq pair) :return: R1 and R2 FileStoreIDs :rtype: tuple """ work_dir = job.fileStore.getLocalTempDir() if r2_id: require(rev_3pr_adapter, "Paired end data requires a reverse 3' adapter sequence.") # Retrieve files parameters = ['-a', fwd_3pr_adapter, '-m', '35'] if r1_id and r2_id: job.fileStore.readGlobalFile(r1_id, os.path.join(work_dir, 'R1.fastq')) job.fileStore.readGlobalFile(r2_id, os.path.join(work_dir, 'R2.fastq')) parameters.extend(['-A', rev_3pr_adapter, '-o', '/data/R1_cutadapt.fastq', '-p', '/data/R2_cutadapt.fastq', '/data/R1.fastq', '/data/R2.fastq']) else: job.fileStore.readGlobalFile(r1_id, os.path.join(work_dir, 'R1.fastq'))
python
{ "resource": "" }
q276339
run_samtools_faidx
test
def run_samtools_faidx(job, ref_id): """ Use SAMtools to create reference index file :param JobFunctionWrappingJob job: passed automatically by Toil :param str ref_id: FileStoreID for the reference genome :return: FileStoreID for reference index :rtype: str """ job.fileStore.logToMaster('Created reference index') work_dir = job.fileStore.getLocalTempDir() job.fileStore.readGlobalFile(ref_id, os.path.join(work_dir, 'ref.fasta'))
python
{ "resource": "" }
q276340
run_samtools_index
test
def run_samtools_index(job, bam): """ Runs SAMtools index to create a BAM index file :param JobFunctionWrappingJob job: passed automatically by Toil :param str bam: FileStoreID of the BAM file :return: FileStoreID for BAM index file :rtype: str """ work_dir = job.fileStore.getLocalTempDir() job.fileStore.readGlobalFile(bam, os.path.join(work_dir, 'sample.bam')) # Call: index the bam
python
{ "resource": "" }
q276341
run_sambamba_markdup
test
def run_sambamba_markdup(job, bam): """ Marks reads as PCR duplicates using Sambamba :param JobFunctionWrappingJob job: passed automatically by Toil :param str bam: FileStoreID for BAM file :return: FileStoreID for sorted BAM file :rtype: str """ work_dir = job.fileStore.getLocalTempDir() job.fileStore.readGlobalFile(bam, os.path.join(work_dir, 'input.bam')) command = ['/usr/local/bin/sambamba', 'markdup', '-t', str(int(job.cores)), '/data/input.bam', '/data/output.bam']
python
{ "resource": "" }
q276342
run_samblaster
test
def run_samblaster(job, sam): """ Marks reads as PCR duplicates using SAMBLASTER :param JobFunctionWrappingJob job: passed automatically by Toil :param str sam: FileStoreID for SAM file :return: FileStoreID for deduped SAM file :rtype: str """ work_dir = job.fileStore.getLocalTempDir() job.fileStore.readGlobalFile(sam, os.path.join(work_dir, 'input.sam'))
python
{ "resource": "" }
q276343
picard_mark_duplicates
test
def picard_mark_duplicates(job, bam, bai, validation_stringency='LENIENT'): """ Runs Picard MarkDuplicates on a BAM file. Requires that the BAM file be coordinate sorted. :param JobFunctionWrappingJob job: passed automatically by Toil :param str bam: FileStoreID for BAM file :param str bai: FileStoreID for BAM index file :param str validation_stringency: BAM file validation stringency, default is LENIENT :return: FileStoreIDs for BAM and BAI files :rtype: tuple """ work_dir = job.fileStore.getLocalTempDir() # Retrieve file path job.fileStore.readGlobalFile(bam, os.path.join(work_dir, 'sorted.bam')) job.fileStore.readGlobalFile(bai, os.path.join(work_dir, 'sorted.bai')) # Call: picardtools command = ['MarkDuplicates', 'INPUT=sorted.bam', 'OUTPUT=mkdups.bam', 'METRICS_FILE=metrics.txt', 'ASSUME_SORTED=true', 'CREATE_INDEX=true', 'VALIDATION_STRINGENCY=%s' % validation_stringency.upper()] # picard-tools container doesn't have JAVA_OPTS variable # Set TMPDIR to /data to prevent writing temporary files to /tmp docker_parameters = ['--rm',
python
{ "resource": "" }
q276344
run_picard_sort
test
def run_picard_sort(job, bam, sort_by_name=False): """ Sorts BAM file using Picard SortSam :param JobFunctionWrappingJob job: passed automatically by Toil :param str bam: FileStoreID for BAM file :param boolean sort_by_name: If true, sorts by read name instead of coordinate. :return: FileStoreID for sorted BAM file :rtype: str """ work_dir = job.fileStore.getLocalTempDir() job.fileStore.readGlobalFile(bam, os.path.join(work_dir, 'input.bam')) command = ['SortSam', 'O=/data/output.bam', 'I=/data/input.bam'] # picard-tools container doesn't have JAVA_OPTS variable
python
{ "resource": "" }
q276345
run_base_recalibration
test
def run_base_recalibration(job, bam, bai, ref, ref_dict, fai, dbsnp, mills, unsafe=False): """ Creates recalibration table for Base Quality Score Recalibration :param JobFunctionWrappingJob job: passed automatically by Toil :param str bam: FileStoreID for BAM file :param str bai: FileStoreID for BAM index file :param str ref: FileStoreID for reference genome fasta file :param str ref_dict: FileStoreID for reference genome sequence dictionary file :param str fai: FileStoreID for reference genome fasta index file :param str dbsnp: FileStoreID for dbSNP VCF file :param str mills: FileStoreID for Mills VCF file :param bool unsafe: If True, runs GATK in UNSAFE mode: "-U ALLOW_SEQ_DICT_INCOMPATIBILITY" :return: FileStoreID for the recalibration table file :rtype: str """ inputs = {'ref.fasta': ref, 'ref.fasta.fai': fai, 'ref.dict': ref_dict, 'input.bam': bam, 'input.bai': bai, 'dbsnp.vcf': dbsnp, 'mills.vcf': mills} work_dir = job.fileStore.getLocalTempDir() for name, file_store_id in inputs.iteritems(): job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name)) # Call: GATK -- BaseRecalibrator parameters = ['-T', 'BaseRecalibrator', '-nct', str(int(job.cores)), '-R', '/data/ref.fasta', '-I', '/data/input.bam', # Recommended known sites:
python
{ "resource": "" }
q276346
run_kallisto
test
def run_kallisto(job, r1_id, r2_id, kallisto_index_url): """ RNA quantification via Kallisto :param JobFunctionWrappingJob job: passed automatically by Toil :param str r1_id: FileStoreID of fastq (pair 1) :param str r2_id: FileStoreID of fastq (pair 2 if applicable, otherwise pass None for single-end) :param str kallisto_index_url: FileStoreID for Kallisto index file :return: FileStoreID from Kallisto output :rtype: str """ work_dir = job.fileStore.getLocalTempDir() download_url(job, url=kallisto_index_url, name='kallisto_hg38.idx', work_dir=work_dir) # Retrieve files parameters = ['quant', '-i', '/data/kallisto_hg38.idx', '-t', str(job.cores),
python
{ "resource": "" }
q276347
run_rsem
test
def run_rsem(job, bam_id, rsem_ref_url, paired=True): """ RNA quantification with RSEM :param JobFunctionWrappingJob job: Passed automatically by Toil :param str bam_id: FileStoreID of transcriptome bam for quantification :param str rsem_ref_url: URL of RSEM reference (tarball) :param bool paired: If True, uses parameters for paired end data :return: FileStoreIDs for RSEM's gene and isoform output :rtype: str """ work_dir = job.fileStore.getLocalTempDir() download_url(job, url=rsem_ref_url, name='rsem_ref.tar.gz', work_dir=work_dir) subprocess.check_call(['tar', '-xvf', os.path.join(work_dir, 'rsem_ref.tar.gz'), '-C', work_dir]) os.remove(os.path.join(work_dir, 'rsem_ref.tar.gz')) # Determine tarball structure - based on it, ascertain folder name and rsem reference prefix rsem_files = [] for root, directories, files in os.walk(work_dir): rsem_files.extend([os.path.join(root, x) for x in files]) # "grp" is a required RSEM extension that should exist in the RSEM reference ref_prefix = [os.path.basename(os.path.splitext(x)[0]) for x in rsem_files if 'grp' in x][0] ref_folder = os.path.join('/data', os.listdir(work_dir)[0]) if len(os.listdir(work_dir)) == 1 else '/data' # I/O job.fileStore.readGlobalFile(bam_id, os.path.join(work_dir, 'transcriptome.bam')) output_prefix = 'rsem' # Call: RSEM parameters = ['--quiet', '--no-qualities', '-p', str(job.cores),
python
{ "resource": "" }
q276348
SARPlus.get_user_affinity
test
def get_user_affinity(self, test): """Prepare test set for C++ SAR prediction code. Find all items the test users have seen in the past. Arguments: test (pySpark.DataFrame): input dataframe which contains test users. """ test.createOrReplaceTempView(self.f("{prefix}df_test")) query = self.f( "SELECT DISTINCT {col_user} FROM {prefix}df_test CLUSTER BY {col_user}" ) df_test_users = self.spark.sql(query) df_test_users.write.mode("overwrite").saveAsTable( self.f("{prefix}df_test_users") ) query = self.f(
python
{ "resource": "" }
q276349
WebsocketHandler.send
test
def send(self, cmd): """Send the given command thru the websocket"""
python
{ "resource": "" }
q276350
WebsocketHandler.subscribe
test
def subscribe(self, stream, callback, transform=""): """Given a stream, a callback and an optional transform, sets up the subscription""" if self.status == "disconnected" or self.status == "disconnecting" or self.status == "connecting": self.connect() if self.status is not "connected":
python
{ "resource": "" }
q276351
WebsocketHandler.connect
test
def connect(self): """Attempt to connect to the websocket - and returns either True or False depending on if the connection was successful or not""" # Wait for the lock to be available (ie, the websocket is not being used (yet)) self.ws_openlock.acquire() self.ws_openlock.release() if self.status == "connected": return True # Already connected if self.status == "disconnecting": # If currently disconnecting, wait a moment, and retry connect time.sleep(0.1) return self.connect() if self.status == "disconnected" or self.status == "reconnecting": self.ws = websocket.WebSocketApp(self.ws_url, header=self.headers, on_message=self.__on_message,
python
{ "resource": "" }
q276352
WebsocketHandler.__reconnect
test
def __reconnect(self): """This is called when a connection is lost - it attempts to reconnect to the server""" self.status = "reconnecting" # Reset the disconnect time after 15 minutes if self.disconnected_time - self.connected_time > 15 * 60: self.reconnect_time = self.reconnect_time_starting_seconds else: self.reconnect_time *= self.reconnect_time_backoff_multiplier if self.reconnect_time > self.reconnect_time_max_seconds: self.reconnect_time = self.reconnect_time_max_seconds # We want to add some randomness to the reconnect rate - necessary so that we don't pound the server # if it goes down self.reconnect_time *= 1 + random.uniform(-0.2, 0.2)
python
{ "resource": "" }
q276353
WebsocketHandler.__resubscribe
test
def __resubscribe(self): """Send subscribe command for all existing subscriptions. This allows to resume a connection that was closed""" with self.subscription_lock: for sub in self.subscriptions: logging.debug("Resubscribing to %s", sub)
python
{ "resource": "" }
q276354
WebsocketHandler.__on_open
test
def __on_open(self, ws): """Called when the websocket is opened""" logging.debug("ConnectorDB: Websocket opened") # Connection success - decrease the wait time for next connection self.reconnect_time /= self.reconnect_time_backoff_multiplier self.status = "connected" self.lastpingtime = time.time()
python
{ "resource": "" }
q276355
WebsocketHandler.__on_close
test
def __on_close(self, ws): """Called when the websocket is closed""" if self.status == "disconnected": return # This can be double-called on disconnect logging.debug("ConnectorDB:WS: Websocket closed") # Turn off the ping timer if self.pingtimer is not None:
python
{ "resource": "" }
q276356
WebsocketHandler.__on_error
test
def __on_error(self, ws, err): """Called when there is an error in the websocket""" logging.debug("ConnectorDB:WS: Connection Error")
python
{ "resource": "" }
q276357
WebsocketHandler.__on_message
test
def __on_message(self, ws, msg): """This function is called whenever there is a message received from the server""" msg = json.loads(msg) logging.debug("ConnectorDB:WS: Msg '%s'", msg["stream"]) # Build the subcription key stream_key = msg["stream"] + ":" if "transform" in msg: stream_key += msg["transform"] self.subscription_lock.acquire() if stream_key in self.subscriptions: subscription_function = self.subscriptions[stream_key] self.subscription_lock.release() fresult = subscription_function(msg["stream"], msg["data"]) if fresult is True: # This is a special result - if the subscription function of a downlink returns True, # then the datapoint is acknowledged automatically (ie, reinserted in non-downlink stream) fresult = msg["data"] if fresult is not False and fresult is not None and msg["stream"].endswith( "/downlink") and msg["stream"].count("/") ==
python
{ "resource": "" }
q276358
WebsocketHandler.__ensure_ping
test
def __ensure_ping(self): """Each time the server sends a ping message, we record the timestamp. If we haven't received a ping within the given interval, then we assume that the connection was lost, close the websocket and attempt to reconnect""" logging.debug("ConnectorDB:WS: pingcheck") if (time.time() - self.lastpingtime > self.connection_ping_timeout): logging.warn("ConnectorDB:WS: Websocket ping timed out!") if self.ws is not None: self.ws.close() self.__on_close(self.ws)
python
{ "resource": "" }
q276359
gatk_select_variants
test
def gatk_select_variants(job, mode, vcf_id, ref_fasta, ref_fai, ref_dict): """ Isolates a particular variant type from a VCF file using GATK SelectVariants :param JobFunctionWrappingJob job: passed automatically by Toil :param str mode: variant type (i.e. SNP or INDEL) :param str vcf_id: FileStoreID for input VCF file :param str ref_fasta: FileStoreID for reference genome fasta :param str ref_fai: FileStoreID for reference genome index file :param str ref_dict: FileStoreID for reference genome sequence dictionary file :return: FileStoreID for filtered VCF :rtype: str """ job.fileStore.logToMaster('Running GATK SelectVariants to select %ss' % mode) inputs = {'genome.fa': ref_fasta, 'genome.fa.fai': ref_fai, 'genome.dict': ref_dict, 'input.vcf': vcf_id} work_dir = job.fileStore.getLocalTempDir() for name, file_store_id in inputs.iteritems(): job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name)) command = ['-T', 'SelectVariants', '-R', 'genome.fa', '-V', 'input.vcf',
python
{ "resource": "" }
q276360
gatk_variant_filtration
test
def gatk_variant_filtration(job, vcf_id, filter_name, filter_expression, ref_fasta, ref_fai, ref_dict): """ Filters VCF file using GATK VariantFiltration. Fixes extra pair of quotation marks in VCF header that may interfere with other VCF tools. :param JobFunctionWrappingJob job: passed automatically by Toil :param str vcf_id: FileStoreID for input VCF file :param str filter_name: Name of filter for VCF header :param str filter_expression: JEXL filter expression :param str ref_fasta: FileStoreID for reference genome fasta :param str ref_fai: FileStoreID for reference genome index file :param str ref_dict: FileStoreID for reference genome sequence dictionary file :return: FileStoreID for filtered VCF file :rtype: str """ inputs = {'genome.fa': ref_fasta, 'genome.fa.fai': ref_fai, 'genome.dict': ref_dict, 'input.vcf': vcf_id} work_dir = job.fileStore.getLocalTempDir() for name, file_store_id in inputs.iteritems(): job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name)) command = ['-T', 'VariantFiltration', '-R', 'genome.fa', '-V', 'input.vcf', '--filterName', filter_name, # Documents filter name in header '--filterExpression', filter_expression, '-o', 'filtered_variants.vcf'] job.fileStore.logToMaster('Running GATK VariantFiltration using {name}: '
python
{ "resource": "" }
q276361
gatk_variant_recalibrator
test
def gatk_variant_recalibrator(job, mode, vcf, ref_fasta, ref_fai, ref_dict, annotations, hapmap=None, omni=None, phase=None, dbsnp=None, mills=None, max_gaussians=4, unsafe_mode=False): """ Runs either SNP or INDEL variant quality score recalibration using GATK VariantRecalibrator. Because the VQSR method models SNPs and INDELs differently, VQSR must be run separately for these variant types. :param JobFunctionWrappingJob job: passed automatically by Toil :param str mode: Determines variant recalibration mode (SNP or INDEL) :param str vcf: FileStoreID for input VCF file :param str ref_fasta: FileStoreID for reference genome fasta :param str ref_fai: FileStoreID for reference genome index file :param str ref_dict: FileStoreID for reference genome sequence dictionary file :param list[str] annotations: List of GATK variant annotations to filter on :param str hapmap: FileStoreID for HapMap resource file, required for SNP VQSR :param str omni: FileStoreID for Omni resource file, required for SNP VQSR :param str phase: FileStoreID for 1000G resource file, required for SNP VQSR :param str dbsnp: FilesStoreID for dbSNP resource file, required for SNP and INDEL VQSR :param str mills: FileStoreID for Mills resource file, required for INDEL VQSR :param int max_gaussians: Number of Gaussians used during training, default is 4 :param bool unsafe_mode: If True, runs gatk UNSAFE mode: "-U ALLOW_SEQ_DICT_INCOMPATIBILITY" :return: FileStoreID for the variant recalibration table, tranche file, and plots file :rtype: tuple """ mode = mode.upper() inputs = {'genome.fa': ref_fasta, 'genome.fa.fai': ref_fai, 'genome.dict': ref_dict, 'input.vcf': vcf} # Refer to GATK documentation for description of recommended parameters: # https://software.broadinstitute.org/gatk/documentation/article?id=1259 # https://software.broadinstitute.org/gatk/documentation/article?id=2805 # This base command includes parameters for both INDEL and SNP VQSR. command = ['-T', 'VariantRecalibrator', '-R', 'genome.fa', '-input', 'input.vcf', '-tranche', '100.0', '-tranche', '99.9', '-tranche', '99.0', '-tranche', '90.0', '--maxGaussians', str(max_gaussians), '-recalFile', 'output.recal', '-tranchesFile', 'output.tranches', '-rscriptFile', 'output.plots.R'] # Parameters and resource files for SNP VQSR. if mode == 'SNP': command.extend( ['-resource:hapmap,known=false,training=true,truth=true,prior=15.0', 'hapmap.vcf', '-resource:omni,known=false,training=true,truth=true,prior=12.0', 'omni.vcf', '-resource:dbsnp,known=true,training=false,truth=false,prior=2.0', 'dbsnp.vcf',
python
{ "resource": "" }
q276362
gatk_apply_variant_recalibration
test
def gatk_apply_variant_recalibration(job, mode, vcf, recal_table, tranches, ref_fasta, ref_fai, ref_dict, ts_filter_level=99.0, unsafe_mode=False): """ Applies variant quality score recalibration to VCF file using GATK ApplyRecalibration :param JobFunctionWrappingJob job: passed automatically by Toil :param str mode: Determines variant recalibration mode (SNP or INDEL) :param str vcf: FileStoreID for input VCF file :param str recal_table: FileStoreID for recalibration table file :param str tranches: FileStoreID for tranches file :param str ref_fasta: FileStoreID for reference genome fasta :param str ref_fai: FileStoreID for reference genome index file :param str ref_dict: FileStoreID for reference genome sequence dictionary file :param float ts_filter_level: Sensitivity expressed as a percentage, default is 99.0 :param bool unsafe_mode: If True, runs gatk UNSAFE mode: "-U ALLOW_SEQ_DICT_INCOMPATIBILITY" :return: FileStoreID for recalibrated VCF file :rtype: str
python
{ "resource": "" }
q276363
gatk_combine_variants
test
def gatk_combine_variants(job, vcfs, ref_fasta, ref_fai, ref_dict, merge_option='UNIQUIFY'): """ Merges VCF files using GATK CombineVariants :param JobFunctionWrappingJob job: Toil Job instance :param dict vcfs: Dictionary of VCF FileStoreIDs {sample identifier: FileStoreID} :param str ref_fasta: FileStoreID for reference genome fasta :param str ref_fai: FileStoreID for reference genome index file :param str ref_dict: FileStoreID for reference genome sequence dictionary file :param str merge_option: Value for --genotypemergeoption flag (Default: 'UNIQUIFY') 'UNIQUIFY': Multiple variants at a single site are merged into a single variant record. 'UNSORTED': Used to merge VCFs from the same sample :return: FileStoreID for merged VCF file :rtype: str """ job.fileStore.logToMaster('Running GATK CombineVariants') inputs = {'genome.fa': ref_fasta, 'genome.fa.fai': ref_fai, 'genome.dict': ref_dict} inputs.update(vcfs) work_dir = job.fileStore.getLocalTempDir() for name, file_store_id in inputs.iteritems(): job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name)) command = ['-T', 'CombineVariants', '-R', '/data/genome.fa',
python
{ "resource": "" }
q276364
bam_quickcheck
test
def bam_quickcheck(bam_path): """ Perform a quick check on a BAM via `samtools quickcheck`. This will detect obvious BAM errors such as truncation. :param str bam_path: path to BAM file to checked :rtype: boolean :return: True if the BAM is valid, False is BAM is invalid or something related to the call went wrong """ directory, bam_name = os.path.split(bam_path) exit_code = subprocess.call(['docker', 'run', '-v', directory + ':/data',
python
{ "resource": "" }
q276365
load_handlers
test
def load_handlers(handler_mapping): """ Given a dictionary mapping which looks like the following, import the objects based on the dotted path and yield the packet type and handler as pairs. If the special string '*' is passed, don't process that, pass it on as it is a wildcard. If an non-string object is given for either packet or handler (key or value) assume these are the objects to use and yield them. :: { 'rfxcom.protocol.Status': 'home.collect.logging_handler', 'rfxcom.protocol.Elec': 'home.collect.elec_handler', 'rfxcom.protocol.TempHumidity': 'home.collect.temp_humidity_handler', '*': 'home.collect.logging_handler' } """ handlers = {} for packet_type, handler in handler_mapping.items(): if packet_type ==
python
{ "resource": "" }
q276366
write_config
test
def write_config(configuration): """Helper to write the JSON configuration to
python
{ "resource": "" }
q276367
get_config
test
def get_config(): """Gets the configuration for this project from the default JSON file, or writes one if it doesn't exist :rtype: dict """
python
{ "resource": "" }
q276368
OlsClient.get_term
test
def get_term(self, ontology, iri): """Gets the data for a given term :param str ontology: The name of the ontology :param str iri: The IRI of a term :rtype: dict """
python
{ "resource": "" }
q276369
OlsClient.search
test
def search(self, name, query_fields=None): """Searches the OLS with the given term :param str name: :param list[str] query_fields: Fields to query :return: dict """ params = {'q': name} if query_fields is not None:
python
{ "resource": "" }
q276370
OlsClient.suggest
test
def suggest(self, name, ontology=None): """Suggest terms from an optional list of ontologies :param str name: :param list[str] ontology: :rtype: dict .. seealso:: https://www.ebi.ac.uk/ols/docs/api#_suggest_term """ params = {'q': name}
python
{ "resource": "" }
q276371
OlsClient.iter_descendants
test
def iter_descendants(self, ontology, iri, size=None, sleep=None): """Iterates over the descendants of a given term :param str ontology: The name of the ontology :param str iri: The IRI of a term :param int size: The size of each page. Defaults to 500, which is the maximum allowed by the EBI. :param int sleep: The amount of time to sleep between pages. Defaults to 0 seconds.
python
{ "resource": "" }
q276372
OlsClient.iter_descendants_labels
test
def iter_descendants_labels(self, ontology, iri, size=None, sleep=None): """Iterates over the labels for the descendants of a given term :param str ontology: The name of the ontology :param str iri: The IRI of a term :param int size: The size of each page. Defaults to 500, which is the maximum allowed by the EBI. :param int
python
{ "resource": "" }
q276373
OlsClient.iter_labels
test
def iter_labels(self, ontology, size=None, sleep=None): """Iterates over the labels of terms in the ontology. Automatically wraps the pager returned by the OLS. :param str ontology: The name of the ontology :param int size: The size of each page. Defaults to 500, which is the maximum allowed by the EBI. :param int sleep: The amount of time to sleep between
python
{ "resource": "" }
q276374
OlsClient.iter_hierarchy
test
def iter_hierarchy(self, ontology, size=None, sleep=None): """Iterates over parent-child relations :param str ontology: The name of the ontology :param int size: The size of each page. Defaults to 500, which is the maximum allowed by the EBI. :param int sleep: The amount of time to sleep between pages. Defaults to 0 seconds. :rtype: iter[tuple[str,str]] """ for term in self.iter_terms(ontology=ontology, size=size, sleep=sleep): try: hierarchy_children_link = term['_links'][HIERARCHICAL_CHILDREN]['href']
python
{ "resource": "" }
q276375
run_fastqc
test
def run_fastqc(job, r1_id, r2_id): """ Run Fastqc on the input reads :param JobFunctionWrappingJob job: passed automatically by Toil :param str r1_id: FileStoreID of fastq read 1 :param str r2_id: FileStoreID of fastq read 2 :return: FileStoreID of fastQC output (tarball) :rtype: str """ work_dir = job.fileStore.getLocalTempDir() job.fileStore.readGlobalFile(r1_id, os.path.join(work_dir, 'R1.fastq')) parameters = ['/data/R1.fastq'] output_names = ['R1_fastqc.html', 'R1_fastqc.zip'] if r2_id: job.fileStore.readGlobalFile(r2_id, os.path.join(work_dir, 'R2.fastq')) parameters.extend(['-t', '2',
python
{ "resource": "" }
q276376
Merge.addStream
test
def addStream(self, stream, t1=None, t2=None, limit=None, i1=None, i2=None, transform=None): """Adds the given stream to the query construction. The function supports both stream names and Stream objects.""" params = query_maker(t1, t2, limit, i1, i2, transform)
python
{ "resource": "" }
q276377
create_app
test
def create_app(config=None): """ This needs some tidying up. To avoid circular imports we import everything here but it makes this method a bit more gross. """ # Initialise the app from home.config import TEMPLATE_FOLDER, STATIC_FOLDER app = Flask(__name__, static_folder=STATIC_FOLDER, template_folder=TEMPLATE_FOLDER) app.config['SECRET_KEY'] = 'ssh, its a secret.' # Load the default config, the specified config file and then any # overwrites that are manually passed in. app.config.from_object('home.config') if 'HOME_SETTINGS' in environ: app.config.from_envvar('HOME_SETTINGS') app.config.from_object(config) # Register the web front end and the API. from home.dash.web import web from home.dash.api import api app.register_blueprint(web) app.register_blueprint(api, url_prefix='/api') login_manager.init_app(app) login_manager.login_view = 'Dashboard Web.login'
python
{ "resource": "" }
q276378
SparkService.start
test
def start(self, job): """ Start spark and hdfs master containers :param job: The underlying job. """ if self.hostname is None: self.hostname = subprocess.check_output(["hostname", "-f",])[:-1] _log.info("Started Spark master container.") self.sparkContainerID = dockerCheckOutput(job=job, defer=STOP, workDir=os.getcwd(), tool="quay.io/ucsc_cgl/apache-spark-master:1.5.2", dockerParameters=["--net=host", "-d", "-v", "/mnt/ephemeral/:/ephemeral/:rw", "-e", "SPARK_MASTER_IP=" + self.hostname, "-e", "SPARK_LOCAL_DIRS=/ephemeral/spark/local", "-e", "SPARK_WORKER_DIR=/ephemeral/spark/work"],
python
{ "resource": "" }
q276379
WorkerService.start
test
def start(self, job): """ Start spark and hdfs worker containers :param job: The underlying job. """ # start spark and our datanode self.sparkContainerID = dockerCheckOutput(job=job, defer=STOP, workDir=os.getcwd(), tool="quay.io/ucsc_cgl/apache-spark-worker:1.5.2", dockerParameters=["--net=host", "-d", "-v", "/mnt/ephemeral/:/ephemeral/:rw", "-e", "\"SPARK_MASTER_IP=" + self.masterIP + ":" + _SPARK_MASTER_PORT + "\"", "-e", "SPARK_LOCAL_DIRS=/ephemeral/spark/local", "-e", "SPARK_WORKER_DIR=/ephemeral/spark/work"], parameters=[self.masterIP + ":" + _SPARK_MASTER_PORT])[:-1] self.__start_datanode(job) # fake do/while to check if HDFS is up hdfs_down = True retries = 0 while hdfs_down and (retries < 5):
python
{ "resource": "" }
q276380
WorkerService.__start_datanode
test
def __start_datanode(self, job): """ Launches the Hadoop datanode. :param job: The underlying job. """ self.hdfsContainerID = dockerCheckOutput(job=job, defer=STOP, workDir=os.getcwd(),
python
{ "resource": "" }
q276381
WorkerService.stop
test
def stop(self, fileStore): """ Stop spark and hdfs worker containers :param job: The underlying job. """ subprocess.call(["docker", "exec", self.sparkContainerID, "rm", "-r", "/ephemeral/spark"]) subprocess.call(["docker", "stop", self.sparkContainerID]) subprocess.call(["docker", "rm", self.sparkContainerID]) _log.info("Stopped Spark worker.") subprocess.call(["docker", "exec", self.hdfsContainerID, "rm",
python
{ "resource": "" }
q276382
WorkerService.check
test
def check(self): """ Checks to see if Spark worker and HDFS datanode are still running. """ status = _checkContainerStatus(self.sparkContainerID, self.hdfsContainerID,
python
{ "resource": "" }
q276383
base_tokenizer
test
def base_tokenizer(fp): 'Tokenizer. Generates tokens stream from text' if isinstance(fp, StringIO): template_file = fp size = template_file.len else: #empty file check if os.fstat(fp.fileno()).st_size == 0: yield TOKEN_EOF, 'EOF', 0, 0 return template_file = mmap.mmap(fp.fileno(), 0, access=mmap.ACCESS_READ) size = template_file.size() lineno = 0 while 1: lineno += 1 pos = 1 # end of file if template_file.tell() == size: yield TOKEN_EOF, 'EOF', lineno, 0 break # now we tokinize line by line line = template_file.readline().decode('utf-8') line = line.replace('\r\n', '') line = line.replace('\n', '') # ignoring non XML comments
python
{ "resource": "" }
q276384
lookup_zone
test
def lookup_zone(conn, zone): """Look up a zone ID for a zone string. Args: conn: boto.route53.Route53Connection zone: string eg. foursquare.com Returns: zone ID eg. ZE2DYFZDWGSL4. Raises: ZoneNotFoundError if zone not found.""" all_zones = conn.get_all_hosted_zones() for resp in all_zones['ListHostedZonesResponse']['HostedZones']:
python
{ "resource": "" }
q276385
fetch_config
test
def fetch_config(zone, conn): """Fetch all pieces of a Route 53 config from Amazon. Args: zone: string, hosted zone id. conn: boto.route53.Route53Connection Returns: list of ElementTrees, one for each piece of config.""" more_to_fetch = True cfg_chunks = [] next_name = None next_type = None next_identifier = None while more_to_fetch == True: more_to_fetch = False getstr = '/%s/hostedzone/%s/rrset' % (R53_API_VERSION, zone) if next_name is not None: getstr += '?name=%s&type=%s' % (next_name, next_type) if next_identifier is not None: getstr += '&identifier=%s' % next_identifier log.debug('requesting %s' % getstr) resp = conn.make_request('GET', getstr) etree = lxml.etree.parse(resp) cfg_chunks.append(etree) root = etree.getroot() truncated = root.find('{%s}IsTruncated' % R53_XMLNS)
python
{ "resource": "" }
q276386
merge_config
test
def merge_config(cfg_chunks): """Merge a set of fetched Route 53 config Etrees into a canonical form. Args: cfg_chunks: [ lxml.etree.ETree ] Returns: lxml.etree.Element""" root = lxml.etree.XML('<ResourceRecordSets xmlns="%s"></ResourceRecordSets>' % R53_XMLNS, parser=XML_PARSER)
python
{ "resource": "" }
q276387
validate_changeset
test
def validate_changeset(changeset): """Validate a changeset is compatible with Amazon's API spec. Args: changeset: lxml.etree.Element (<ChangeResourceRecordSetsRequest>) Returns: [ errors ] list of error strings or [].""" errors = [] changes = changeset.findall('.//{%s}Change' % R53_XMLNS) num_changes = len(changes) if num_changes == 0: errors.append('changeset must have at least one <Change> element') if num_changes > 100: errors.append('changeset has %d <Change> elements: max is 100' % num_changes) rrs = changeset.findall('.//{%s}ResourceRecord' % R53_XMLNS) num_rrs =
python
{ "resource": "" }
q276388
minimize_best_n
test
def minimize_best_n(Members): ''' Orders population members from lowest fitness to highest fitness Args: Members (list): list of PyGenetics Member objects Returns: lsit: ordered
python
{ "resource": "" }
q276389
Population.fitness
test
def fitness(self): '''Population fitness == average member fitness score''' if len(self.__members) != 0: if self.__num_processes > 1: members = [m.get() for m in self.__members]
python
{ "resource": "" }
q276390
Population.ave_cost_fn_val
test
def ave_cost_fn_val(self): '''Returns average cost function return value for all members''' if len(self.__members) != 0: if self.__num_processes > 1: members = [m.get() for m in self.__members]
python
{ "resource": "" }
q276391
Population.med_cost_fn_val
test
def med_cost_fn_val(self): '''Returns median cost function return value for all members''' if len(self.__members) != 0: if self.__num_processes > 1: members = [m.get() for m in self.__members]
python
{ "resource": "" }
q276392
Population.parameters
test
def parameters(self): '''Population parameter vals == average member parameter vals''' if len(self.__members) != 0: if self.__num_processes > 1: members = [m.get() for m in self.__members] else: members = self.__members params = {} for p in self.__parameters:
python
{ "resource": "" }
q276393
Population.members
test
def members(self): '''Returns Member objects of population''' if self.__num_processes > 1: return [m.get() for m
python
{ "resource": "" }
q276394
Population.add_parameter
test
def add_parameter(self, name, min_val, max_val): '''Adds a paramber to the Population Args: name (str): name of the parameter min_val (int or float): minimum value for the parameter max_val (int or
python
{ "resource": "" }
q276395
Population.next_generation
test
def next_generation(self, mut_rate=0, max_mut_amt=0, log_base=10): '''Generates the next population from a previously evaluated generation Args: mut_rate (float): mutation rate for new members (0.0 - 1.0) max_mut_amt (float): how much the member is allowed to mutate (0.0 - 1.0, proportion change of mutated parameter) log_base (int): the higher this number, the more likely the first Members (chosen with supplied selection function) are chosen as parents for the next generation ''' if self.__num_processes > 1: process_pool = Pool(processes=self.__num_processes) members = [m.get() for m in self.__members] else: members = self.__members if len(members) == 0: raise Exception( 'Generation 0 not found: use generate_population() first' ) selected_members = self.__select_fn(members) reproduction_probs = list(reversed(logspace(0.0, 1.0, num=len(selected_members), base=log_base))) reproduction_probs = reproduction_probs / sum(reproduction_probs) self.__members = [] for _ in range(self.__pop_size): parent_1 = nrandom.choice(selected_members, p=reproduction_probs) parent_2 = nrandom.choice(selected_members, p=reproduction_probs) feed_dict = {} for param in self.__parameters:
python
{ "resource": "" }
q276396
ConfigOptionParser.normalize_keys
test
def normalize_keys(self, items): """Return a config dictionary with normalized keys regardless of whether the keys were specified in environment variables or in config
python
{ "resource": "" }
q276397
ConfigOptionParser.get_environ_vars
test
def get_environ_vars(self): """Returns a generator with all environmental vars with prefix PIP_""" for key, val in os.environ.items():
python
{ "resource": "" }
q276398
throws_exception
test
def throws_exception(callable, *exceptions): """ Return True if the callable throws the specified exception >>> throws_exception(lambda: int('3')) False
python
{ "resource": "" }
q276399
transform_hits
test
def transform_hits(hits): """ The list from pypi is really a list of versions. We want a list of packages with the list of versions stored inline. This converts the list from pypi into one we can use. """ packages = {} for hit in hits: name = hit['name'] summary = hit['summary'] version = hit['version'] score = hit['_pypi_ordering'] if score is None: score = 0 if name not in packages.keys(): packages[name] = { 'name': name, 'summary': summary, 'versions': [version], 'score': score, } else: packages[name]['versions'].append(version) # if this is the highest version, replace summary and score
python
{ "resource": "" }