text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def exon_count(job, job_vars): """ Produces exon counts job_vars: tuple Tuple of dictionaries: input_args and ids """
input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() uuid = input_args['uuid'] sudo = input_args['sudo'] # I/O sort_by_ref, normalize_pl, composite_bed = return_input_paths(job, work_dir, ids, 'sort_by_ref.bam', 'normalize.pl', 'composite_exons.bed') # Command tool = 'jvivian/bedtools' cmd_1 = ['coverage', '-split', '-abam', docker_path(sort_by_ref), '-b', docker_path(composite_bed)] cmd_2 = ['perl', os.path.join(work_dir, 'normalize.pl'), sort_by_ref, composite_bed] popen_docker = ['docker', 'run', '-v', '{}:/data'.format(work_dir), tool] if sudo: popen_docker = ['sudo'] + popen_docker p = subprocess.Popen(popen_docker + cmd_1, stdout=subprocess.PIPE) with open(os.path.join(work_dir, 'exon_quant'), 'w') as f: subprocess.check_call(cmd_2, stdin=p.stdout, stdout=f) p1 = subprocess.Popen(['cat', os.path.join(work_dir, 'exon_quant')], stdout=subprocess.PIPE) p2 = subprocess.Popen(['tr', '":"', '"\t"'], stdin=p1.stdout, stdout=subprocess.PIPE) p3 = subprocess.Popen(['tr', '"-"', '"\t"'], stdin=p2.stdout, stdout=subprocess.PIPE) with open(os.path.join(work_dir, 'exon_quant.bed'), 'w') as f: subprocess.check_call(['cut', '-f1-4'], stdin=p3.stdout, stdout=f) # Create zip, upload to fileStore, and move to output_dir as a backup output_files = ['exon_quant.bed', 'exon_quant'] tarball_files(work_dir, tar_name='exon.tar.gz', uuid=uuid, files=output_files) return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'exon.tar.gz'))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def transcriptome(job, job_vars): """ Creates a bam of just the transcriptome job_vars: tuple Tuple of dictionaries: input_args and ids """
input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() sudo = input_args['sudo'] # I/O sort_by_ref, bed, hg19_fa = return_input_paths(job, work_dir, ids, 'sort_by_ref.bam', 'unc.bed', 'hg19.transcripts.fa') output = os.path.join(work_dir, 'transcriptome.bam') # Command parameters = ['sam-xlate', '--bed', docker_path(bed), '--in', docker_path(sort_by_ref), '--order', docker_path(hg19_fa), '--out', docker_path(output), '--xgtag', '--reverse'] docker_call(tool='quay.io/ucsc_cgl/ubu:1.2--02806964cdf74bf5c39411b236b4c4e36d026843', tool_parameters=parameters, work_dir=work_dir, java_opts='-Xmx30g', sudo=sudo) # Write to FileStore ids['transcriptome.bam'] = job.fileStore.writeGlobalFile(output) # Run child job return job.addChildJobFn(filter_bam, job_vars, memory='30G', disk='30G').rv()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def filter_bam(job, job_vars): """ Performs filtering on the transcriptome bam job_vars: tuple Tuple of dictionaries: input_args and ids """
input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() cores = input_args['cpu_count'] sudo = input_args['sudo'] # I/O transcriptome_bam = return_input_paths(job, work_dir, ids, 'transcriptome.bam') output = os.path.join(work_dir, 'filtered.bam') # Command parameters = ['sam-filter', '--strip-indels', '--max-insert', '1000', '--mapq', '1', '--in', docker_path(transcriptome_bam), '--out', docker_path(output)] docker_call(tool='quay.io/ucsc_cgl/ubu:1.2--02806964cdf74bf5c39411b236b4c4e36d026843', tool_parameters=parameters, work_dir=os.path.dirname(output), java_opts='-Xmx30g', sudo=sudo) # Write to FileStore ids['filtered.bam'] = job.fileStore.writeGlobalFile(output) # Run child job return job.addChildJobFn(rsem, job_vars, cores=cores, disk='30 G').rv()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def rsem(job, job_vars): """ Runs RSEM to produce counts job_vars: tuple Tuple of dictionaries: input_args and ids """
input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() cpus = input_args['cpu_count'] sudo = input_args['sudo'] single_end_reads = input_args['single_end_reads'] # I/O filtered_bam, rsem_ref = return_input_paths(job, work_dir, ids, 'filtered.bam', 'rsem_ref.zip') subprocess.check_call(['unzip', '-o', os.path.join(work_dir, 'rsem_ref.zip'), '-d', work_dir]) output_prefix = 'rsem' # Make tool call to Docker parameters = ['--quiet', '--no-qualities', '-p', str(cpus), '--forward-prob', '0.5', '--seed-length', '25', '--fragment-length-mean', '-1.0', '--bam', docker_path(filtered_bam)] if not single_end_reads: parameters.extend(['--paired-end']) parameters.extend(['/data/rsem_ref/hg19_M_rCRS_ref', output_prefix]) docker_call(tool='quay.io/ucsc_cgl/rsem:1.2.25--4e8d1b31d4028f464b3409c6558fb9dfcad73f88', tool_parameters=parameters, work_dir=work_dir, sudo=sudo) os.rename(os.path.join(work_dir, output_prefix + '.genes.results'), os.path.join(work_dir, 'rsem_gene.tab')) os.rename(os.path.join(work_dir, output_prefix + '.isoforms.results'), os.path.join(work_dir, 'rsem_isoform.tab')) # Write to FileStore ids['rsem_gene.tab'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'rsem_gene.tab')) ids['rsem_isoform.tab'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'rsem_isoform.tab')) # Run child jobs return job.addChildJobFn(rsem_postprocess, job_vars).rv()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def consolidate_output(job, job_vars, output_ids): """ Combine the contents of separate zipped outputs into one via streaming job_vars: tuple Tuple of dictionaries: input_args and ids output_ids: tuple Nested tuple of all the output fileStore IDs """
input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() uuid = input_args['uuid'] # Retrieve IDs rseq_id, exon_id, rsem_id = flatten(output_ids) # Retrieve output file paths to consolidate # map_tar = job.fileStore.readGlobalFile(map_id, os.path.join(work_dir, 'map.tar.gz')) qc_tar = job.fileStore.readGlobalFile(rseq_id, os.path.join(work_dir, 'qc.tar.gz')) exon_tar = job.fileStore.readGlobalFile(exon_id, os.path.join(work_dir, 'exon.tar.gz')) rsem_tar = job.fileStore.readGlobalFile(rsem_id, os.path.join(work_dir, 'rsem.tar.gz')) # I/O out_tar = os.path.join(work_dir, uuid + '.tar.gz') # Consolidate separate tarballs with tarfile.open(os.path.join(work_dir, out_tar), 'w:gz') as f_out: for tar in [rsem_tar, exon_tar, qc_tar]: with tarfile.open(tar, 'r') as f_in: for tarinfo in f_in: with closing(f_in.extractfile(tarinfo)) as f_in_file: if tar == qc_tar: tarinfo.name = os.path.join(uuid, 'rseq_qc', os.path.basename(tarinfo.name)) else: tarinfo.name = os.path.join(uuid, os.path.basename(tarinfo.name)) f_out.addfile(tarinfo, fileobj=f_in_file) # Move to output directory of selected if input_args['output_dir']: output_dir = input_args['output_dir'] mkdir_p(output_dir) copy_to_output_dir(work_dir, output_dir, uuid=None, files=[uuid + '.tar.gz']) # Write output file to fileStore ids['uuid.tar.gz'] = job.fileStore.writeGlobalFile(out_tar) # If S3 bucket argument specified, upload to S3 if input_args['s3_dir']: job.addChildJobFn(upload_output_to_s3, job_vars)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def main(): """ This is a Toil pipeline for the UNC best practice RNA-Seq analysis. RNA-seq fastqs are combined, aligned, sorted, filtered, and quantified. Please read the README.md located in the same directory. """
# Define Parser object and add to toil parser = build_parser() Job.Runner.addToilOptions(parser) args = parser.parse_args() # Store inputs from argparse inputs = {'config': args.config, 'config_fastq': args.config_fastq, 'input': args.input, 'unc.bed': args.unc, 'hg19.transcripts.fa': args.fasta, 'composite_exons.bed': args.composite_exons, 'normalize.pl': args.normalize, 'output_dir': args.output_dir, 'rsem_ref.zip': args.rsem_ref, 'chromosomes.zip': args.chromosomes, 'ebwt.zip': args.ebwt, 'ssec': args.ssec, 's3_dir': args.s3_dir, 'sudo': args.sudo, 'single_end_reads': args.single_end_reads, 'upload_bam_to_s3': args.upload_bam_to_s3, 'uuid': None, 'sample.tar': None, 'cpu_count': None} # Launch jobs Job.Runner.startToil(Job.wrapJobFn(download_shared_files, inputs), args)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def remove_file(master_ip, filename, spark_on_toil): """ Remove the given file from hdfs with master at the given IP address :type masterIP: MasterAddress """
master_ip = master_ip.actual ssh_call = ['ssh', '-o', 'StrictHostKeyChecking=no', master_ip] if spark_on_toil: output = check_output(ssh_call + ['docker', 'ps']) container_id = next(line.split()[0] for line in output.splitlines() if 'apache-hadoop-master' in line) ssh_call += ['docker', 'exec', container_id] try: check_call(ssh_call + ['hdfs', 'dfs', '-rm', '-r', '/' + filename]) except: pass
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def download_data(job, master_ip, inputs, known_snps, bam, hdfs_snps, hdfs_bam): """ Downloads input data files from S3. :type masterIP: MasterAddress """
log.info("Downloading known sites file %s to %s.", known_snps, hdfs_snps) call_conductor(job, master_ip, known_snps, hdfs_snps, memory=inputs.memory) log.info("Downloading input BAM %s to %s.", bam, hdfs_bam) call_conductor(job, master_ip, bam, hdfs_bam, memory=inputs.memory)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def upload_data(job, master_ip, inputs, hdfs_name, upload_name, spark_on_toil): """ Upload file hdfsName from hdfs to s3 """
if mock_mode(): truncate_file(master_ip, hdfs_name, spark_on_toil) log.info("Uploading output BAM %s to %s.", hdfs_name, upload_name) call_conductor(job, master_ip, hdfs_name, upload_name, memory=inputs.memory) remove_file(master_ip, hdfs_name, spark_on_toil)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def static_adam_preprocessing_dag(job, inputs, sample, output_dir, suffix=''): """ A Toil job function performing ADAM preprocessing on a single sample """
inputs.sample = sample inputs.output_dir = output_dir inputs.suffix = suffix if inputs.master_ip is not None or inputs.run_local: if not inputs.run_local and inputs.master_ip == 'auto': # Static, standalone Spark cluster managed by uberscript spark_on_toil = False scale_up = job.wrapJobFn(scale_external_spark_cluster, 1) job.addChild(scale_up) spark_work = job.wrapJobFn(download_run_and_upload, inputs.master_ip, inputs, spark_on_toil) scale_up.addChild(spark_work) scale_down = job.wrapJobFn(scale_external_spark_cluster, -1) spark_work.addChild(scale_down) else: # Static, external Spark cluster spark_on_toil = False spark_work = job.wrapJobFn(download_run_and_upload, inputs.master_ip, inputs, spark_on_toil) job.addChild(spark_work) else: # Dynamic subclusters, i.e. Spark-on-Toil spark_on_toil = True cores = multiprocessing.cpu_count() master_ip = spawn_spark_cluster(job, False, # Sudo inputs.num_nodes-1, cores=cores, memory=inputs.memory) spark_work = job.wrapJobFn(download_run_and_upload, master_ip, inputs, spark_on_toil) job.addChild(spark_work)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def hard_filter_pipeline(job, uuid, vcf_id, config): """ Runs GATK Hard Filtering on a Genomic VCF file and uploads the results. 0: Start 0 --> 1 --> 3 --> 5 --> 6 1: Select SNPs | | 2: Select INDELs +-> 2 --> 4 + 3: Apply SNP Filter 4: Apply INDEL Filter 5: Merge SNP and INDEL VCFs 6: Write filtered VCF to output directory :param JobFunctionWrappingJob job: passed automatically by Toil :param str uuid: Unique sample identifier :param str vcf_id: VCF FileStoreID :param Namespace config: Pipeline configuration options and shared files Requires the following config attributes: config.genome_fasta FilesStoreID for reference genome fasta file config.genome_fai FilesStoreID for reference genome fasta index file config.genome_dict FilesStoreID for reference genome sequence dictionary file config.snp_filter_name Name of SNP filter for VCF header config.snp_filter_expression SNP JEXL filter expression config.indel_filter_name Name of INDEL filter for VCF header config.indel_filter_expression INDEL JEXL filter expression config.xmx Java heap size in bytes config.suffix Suffix added to output filename config.output_dir URL or local path to output directory config.ssec Path to key file for SSE-C encryption :return: SNP and INDEL FileStoreIDs :rtype: tuple """
job.fileStore.logToMaster('Running Hard Filter on {}'.format(uuid)) # Get the total size of the genome reference genome_ref_size = config.genome_fasta.size + config.genome_fai.size + config.genome_dict.size # The SelectVariants disk requirement depends on the input VCF, the genome reference files, # and the output VCF. The output VCF is smaller than the input VCF. The disk requirement # is identical for SNPs and INDELs. select_variants_disk = PromisedRequirement(lambda vcf, ref_size: 2 * vcf.size + ref_size, vcf_id, genome_ref_size) select_snps = job.wrapJobFn(gatk_select_variants, 'SNP', vcf_id, config.genome_fasta, config.genome_fai, config.genome_dict, memory=config.xmx, disk=select_variants_disk) # The VariantFiltration disk requirement depends on the input VCF, the genome reference files, # and the output VCF. The filtered VCF is smaller than the input VCF. snp_filter_disk = PromisedRequirement(lambda vcf, ref_size: 2 * vcf.size + ref_size, select_snps.rv(), genome_ref_size) snp_filter = job.wrapJobFn(gatk_variant_filtration, select_snps.rv(), config.snp_filter_name, config.snp_filter_expression, config.genome_fasta, config.genome_fai, config.genome_dict, memory=config.xmx, disk=snp_filter_disk) select_indels = job.wrapJobFn(gatk_select_variants, 'INDEL', vcf_id, config.genome_fasta, config.genome_fai, config.genome_dict, memory=config.xmx, disk=select_variants_disk) indel_filter_disk = PromisedRequirement(lambda vcf, ref_size: 2 * vcf.size + ref_size, select_indels.rv(), genome_ref_size) indel_filter = job.wrapJobFn(gatk_variant_filtration, select_indels.rv(), config.indel_filter_name, config.indel_filter_expression, config.genome_fasta, config.genome_fai, config.genome_dict, memory=config.xmx, disk=indel_filter_disk) # The CombineVariants disk requirement depends on the SNP and INDEL input VCFs and the # genome reference files. The combined VCF is approximately the same size as the input files. combine_vcfs_disk = PromisedRequirement(lambda vcf1, vcf2, ref_size: 2 * (vcf1.size + vcf2.size) + ref_size, indel_filter.rv(), snp_filter.rv(), genome_ref_size) combine_vcfs = job.wrapJobFn(gatk_combine_variants, {'SNPs': snp_filter.rv(), 'INDELs': indel_filter.rv()}, config.genome_fasta, config.genome_fai, config.genome_dict, merge_option='UNSORTED', # Merges variants from a single sample memory=config.xmx, disk=combine_vcfs_disk) job.addChild(select_snps) job.addChild(select_indels) select_snps.addChild(snp_filter) snp_filter.addChild(combine_vcfs) select_indels.addChild(indel_filter) indel_filter.addChild(combine_vcfs) # Output the hard filtered VCF output_dir = os.path.join(config.output_dir, uuid) output_filename = '%s.hard_filter%s.vcf' % (uuid, config.suffix) output_vcf = job.wrapJobFn(output_file_job, output_filename, combine_vcfs.rv(), output_dir, s3_key_path=config.ssec, disk=PromisedRequirement(lambda x: x.size, combine_vcfs.rv())) combine_vcfs.addChild(output_vcf) return combine_vcfs.rv()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def download_and_transfer_sample(job, sample, inputs): """ Downloads a sample from CGHub via GeneTorrent, then uses S3AM to transfer it to S3 input_args: dict Dictionary of input arguments analysis_id: str An analysis ID for a sample in CGHub """
analysis_id = sample[0] work_dir = job.fileStore.getLocalTempDir() folder_path = os.path.join(work_dir, os.path.basename(analysis_id)) # Acquire genetorrent key and download sample shutil.copy(inputs['genetorrent_key'], os.path.join(work_dir, 'cghub.key')) parameters = ['-vv', '-c', 'cghub.key', '-d', analysis_id] docker_call(job=job, tool='quay.io/ucsc_cgl/genetorrent:3.8.7--9911761265b6f08bc3ef09f53af05f56848d805b', work_dir=work_dir, parameters=parameters) try: sample = glob.glob(os.path.join(folder_path, '*tar*'))[0] except KeyError as e: print 'No tarfile found inside of folder: '.format(e) raise # Upload sample to S3AM key_path = inputs['ssec'] if sample.endswith('gz'): sample_name = analysis_id + '.tar.gz' shutil.move(sample, os.path.join(work_dir, sample_name)) else: sample_name = analysis_id + '.tar' shutil.move(sample, os.path.join(work_dir, sample_name)) # Parse s3_dir to get bucket and s3 path s3_dir = inputs['s3_dir'] bucket_name = s3_dir.lstrip('/').split('/')[0] base_url = 'https://s3-us-west-2.amazonaws.com/' url = os.path.join(base_url, bucket_name, sample_name) # Generate keyfile for upload with open(os.path.join(work_dir, 'temp.key'), 'wb') as f_out: f_out.write(generate_unique_key(key_path, url)) # Upload to S3 via S3AM s3am_command = ['s3am', 'upload', '--sse-key-file', os.path.join(work_dir, 'temp.key'), 'file://{}'.format(os.path.join(work_dir, sample_name)), 's3://' + bucket_name + '/'] subprocess.check_call(s3am_command)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def main(): """ This is a Toil pipeline to transfer TCGA data into an S3 Bucket Data is pulled down with Genetorrent and transferred to S3 via S3AM. """
# Define Parser object and add to toil parser = build_parser() Job.Runner.addToilOptions(parser) args = parser.parse_args() # Store inputs from argparse inputs = {'genetorrent': args.genetorrent, 'genetorrent_key': args.genetorrent_key, 'ssec': args.ssec, 's3_dir': args.s3_dir} # Sanity checks if args.ssec: assert os.path.isfile(args.ssec) if args.genetorrent: assert os.path.isfile(args.genetorrent) if args.genetorrent_key: assert os.path.isfile(args.genetorrent_key) samples = parse_genetorrent(args.genetorrent) # Start pipeline # map_job accepts a function, an iterable, and *args. The function is launched as a child # process with one element from the iterable and *args, which in turn spawns a tree of child jobs. Job.Runner.startToil(Job.wrapJobFn(map_job, download_and_transfer_sample, samples, inputs), args)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validate_ip(s): """Validate a hexidecimal IPv6 ip address. True True True True True True True True False False False Traceback (most recent call last): TypeError: expected string or buffer True :param s: String to validate as a hexidecimal IPv6 ip address. :type s: str :returns: ``True`` if a valid hexidecimal IPv6 ip address, ``False`` otherwise. :raises: TypeError """
if _HEX_RE.match(s): return len(s.split('::')) <= 2 if _DOTTED_QUAD_RE.match(s): halves = s.split('::') if len(halves) > 2: return False hextets = s.split(':') quads = hextets[-1].split('.') for q in quads: if int(q) > 255: return False return True return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ip2long(ip): """Convert a hexidecimal IPv6 address to a network byte order 128-bit integer. True True True True True True True True True True :param ip: Hexidecimal IPv6 address :type ip: str :returns: Network byte order 128-bit integer or ``None`` if ip is invalid. """
if not validate_ip(ip): return None if '.' in ip: # convert IPv4 suffix to hex chunks = ip.split(':') v4_int = ipv4.ip2long(chunks.pop()) if v4_int is None: return None chunks.append('%x' % ((v4_int >> 16) & 0xffff)) chunks.append('%x' % (v4_int & 0xffff)) ip = ':'.join(chunks) halves = ip.split('::') hextets = halves[0].split(':') if len(halves) == 2: h2 = halves[1].split(':') for z in range(8 - (len(hextets) + len(h2))): hextets.append('0') for h in h2: hextets.append(h) # end if lngip = 0 for h in hextets: if '' == h: h = '0' lngip = (lngip << 16) | int(h, 16) return lngip
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def long2ip(l, rfc1924=False): """Convert a network byte order 128-bit integer to a canonical IPv6 address. '::7f00:1' '2001:db8::1:0:0:1' '::' 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff' Traceback (most recent call last): TypeError: unsupported operand type(s) for >>: 'NoneType' and 'int' Traceback (most recent call last): TypeError: expected int between 0 and <really big int> inclusive Traceback (most recent call last): TypeError: expected int between 0 and <really big int> inclusive '4)+k&C#VzJ4br>0wv%Yp' '00000000000000000000' :param l: Network byte order 128-bit integer. :type l: int :param rfc1924: Encode in RFC 1924 notation (base 85) :type rfc1924: bool :returns: Canonical IPv6 address (eg. '::1'). :raises: TypeError """
if MAX_IP < l or l < MIN_IP: raise TypeError( "expected int between %d and %d inclusive" % (MIN_IP, MAX_IP)) if rfc1924: return long2rfc1924(l) # format as one big hex value hex_str = '%032x' % l # split into double octet chunks without padding zeros hextets = ['%x' % int(hex_str[x:x + 4], 16) for x in range(0, 32, 4)] # find and remove left most longest run of zeros dc_start, dc_len = (-1, 0) run_start, run_len = (-1, 0) for idx, hextet in enumerate(hextets): if '0' == hextet: run_len += 1 if -1 == run_start: run_start = idx if run_len > dc_len: dc_len, dc_start = (run_len, run_start) else: run_len, run_start = (0, -1) # end for if dc_len > 1: dc_end = dc_start + dc_len if dc_end == len(hextets): hextets += [''] hextets[dc_start:dc_end] = [''] if dc_start == 0: hextets = [''] + hextets # end if return ':'.join(hextets)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def long2rfc1924(l): """Convert a network byte order 128-bit integer to an rfc1924 IPv6 address. '4)+k&C#VzJ4br>0wv%Yp' '00000000000000000000' '=r54lj&NUUO~Hi%c2ym0' :param l: Network byte order 128-bit integer. :type l: int :returns: RFC 1924 IPv6 address :raises: TypeError """
if MAX_IP < l or l < MIN_IP: raise TypeError( "expected int between %d and %d inclusive" % (MIN_IP, MAX_IP)) o = [] r = l while r > 85: o.append(_RFC1924_ALPHABET[r % 85]) r = r // 85 o.append(_RFC1924_ALPHABET[r]) return ''.join(reversed(o)).zfill(20)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def rfc19242long(s): """Convert an RFC 1924 IPv6 address to a network byte order 128-bit integer. True True True True True :param ip: RFC 1924 IPv6 address :type ip: str :returns: Network byte order 128-bit integer or ``None`` if ip is invalid. """
global _RFC1924_REV if not _RFC1924_RE.match(s): return None if _RFC1924_REV is None: _RFC1924_REV = {v: k for k, v in enumerate(_RFC1924_ALPHABET)} x = 0 for c in s: x = x * 85 + _RFC1924_REV[c] if x > MAX_IP: return None return x
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validate_cidr(s): """Validate a CIDR notation ip address. The string is considered a valid CIDR address if it consists of a valid IPv6 address in hextet format followed by a forward slash (/) and a bit mask length (0-128). True True True True False False Traceback (most recent call last): TypeError: expected string or buffer :param s: String to validate as a CIDR notation ip address. :type s: str :returns: ``True`` if a valid CIDR address, ``False`` otherwise. :raises: TypeError """
if _CIDR_RE.match(s): ip, mask = s.split('/') if validate_ip(ip): if int(mask) > 128: return False else: return False return True return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def download_sample(job, sample, inputs): """ Download the input sample :param JobFunctionWrappingJob job: passed by Toil automatically :param tuple sample: Tuple containing (UUID,URL) of a sample :param Namespace inputs: Stores input arguments (see main) """
uuid, url = sample job.fileStore.logToMaster('Downloading sample: {}'.format(uuid)) # Download sample tar_id = job.addChildJobFn(download_url_job, url, s3_key_path=inputs.ssec, disk='30G').rv() # Create copy of inputs for each sample sample_inputs = argparse.Namespace(**vars(inputs)) sample_inputs.uuid = uuid sample_inputs.cores = multiprocessing.cpu_count() # Call children and follow-on jobs job.addFollowOnJobFn(process_sample, sample_inputs, tar_id, cores=2, disk='60G')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cutadapt(job, inputs, r1_id, r2_id): """ Filters out adapters that may be left in the RNA-seq files :param JobFunctionWrappingJob job: passed by Toil automatically :param Namespace inputs: Stores input arguments (see main) :param str r1_id: FileStore ID of read 1 fastq :param str r2_id: FileStore ID of read 2 fastq """
job.fileStore.logToMaster('Running CutAdapt: {}'.format(inputs.uuid)) work_dir = job.fileStore.getLocalTempDir() inputs.improper_pair = None # Retrieve files job.fileStore.readGlobalFile(r1_id, os.path.join(work_dir, 'R1.fastq')) job.fileStore.readGlobalFile(r2_id, os.path.join(work_dir, 'R2.fastq')) # Cutadapt parameters parameters = ['-a', inputs.fwd_3pr_adapter, '-m', '35', '-A', inputs.rev_3pr_adapter, '-o', '/data/R1_cutadapt.fastq', '-p', '/data/R2_cutadapt.fastq', '/data/R1.fastq', '/data/R2.fastq'] # Call: CutAdapt base_docker_call = 'docker run --log-driver=none --rm -v {}:/data'.format(work_dir).split() if inputs.sudo: base_docker_call = ['sudo'] + base_docker_call tool = 'quay.io/ucsc_cgl/cutadapt:1.9--6bd44edd2b8f8f17e25c5a268fedaab65fa851d2' p = subprocess.Popen(base_docker_call + [tool] + parameters, stderr=subprocess.PIPE, stdout=subprocess.PIPE) stdout, stderr = p.communicate() if p.returncode != 0: if 'improperly paired' in stderr: inputs.improper_pair = True shutil.move(os.path.join(work_dir, 'R1.fastq'), os.path.join(work_dir, 'R1_cutadapt.fastq')) shutil.move(os.path.join(work_dir, 'R2.fastq'), os.path.join(work_dir, 'R2_cutadapt.fastq')) # Write to fileStore if inputs.improper_pair: r1_cutadapt = r1_id r2_cutadapt = r2_id else: r1_cutadapt = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R1_cutadapt.fastq')) r2_cutadapt = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R2_cutadapt.fastq')) job.fileStore.deleteGlobalFile(r1_id) job.fileStore.deleteGlobalFile(r2_id) # start STAR cores = min(inputs.cores, 16) job.addChildJobFn(star, inputs, r1_cutadapt, r2_cutadapt, cores=cores, disk='100G', memory='40G').rv()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def star(job, inputs, r1_cutadapt, r2_cutadapt): """ Performs alignment of fastqs to BAM via STAR :param JobFunctionWrappingJob job: passed by Toil automatically :param Namespace inputs: Stores input arguments (see main) :param str r1_cutadapt: FileStore ID of read 1 fastq :param str r2_cutadapt: FileStore ID of read 2 fastq """
job.fileStore.logToMaster('Aligning with STAR: {}'.format(inputs.uuid)) work_dir = job.fileStore.getLocalTempDir() cores = min(inputs.cores, 16) # Retrieve files job.fileStore.readGlobalFile(r1_cutadapt, os.path.join(work_dir, 'R1_cutadapt.fastq')) job.fileStore.readGlobalFile(r2_cutadapt, os.path.join(work_dir, 'R2_cutadapt.fastq')) # Get starIndex download_url(job=job, url=inputs.star_index, work_dir=work_dir, name='starIndex.tar.gz') subprocess.check_call(['tar', '-xvf', os.path.join(work_dir, 'starIndex.tar.gz'), '-C', work_dir]) # Parameters parameters = ['--runThreadN', str(cores), '--genomeDir', '/data/starIndex', '--outFileNamePrefix', 'rna', '--outSAMtype', 'BAM', 'SortedByCoordinate', '--outSAMunmapped', 'Within', '--quantMode', 'TranscriptomeSAM', '--outSAMattributes', 'NH', 'HI', 'AS', 'NM', 'MD', '--outFilterType', 'BySJout', '--outFilterMultimapNmax', '20', '--outFilterMismatchNmax', '999', '--outFilterMismatchNoverReadLmax', '0.04', '--alignIntronMin', '20', '--alignIntronMax', '1000000', '--alignMatesGapMax', '1000000', '--alignSJoverhangMin', '8', '--alignSJDBoverhangMin', '1', '--sjdbScore', '1', '--readFilesIn', '/data/R1_cutadapt.fastq', '/data/R2_cutadapt.fastq'] # Call: STAR Map docker_call(job=job, tool='quay.io/ucsc_cgl/star:2.4.2a--bcbd5122b69ff6ac4ef61958e47bde94001cfe80', work_dir=work_dir, parameters=parameters) # Call Samtools Index index_command = ['index', '/data/rnaAligned.sortedByCoord.out.bam'] docker_call(job=job, work_dir=work_dir, parameters=index_command, tool='quay.io/ucsc_cgl/samtools:1.3--256539928ea162949d8a65ca5c79a72ef557ce7c') # fileStore bam_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'rnaAligned.sortedByCoord.out.bam')) bai_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'rnaAligned.sortedByCoord.out.bam.bai')) job.fileStore.deleteGlobalFile(r1_cutadapt) job.fileStore.deleteGlobalFile(r2_cutadapt) # Launch children and follow-on vcqc_id = job.addChildJobFn(variant_calling_and_qc, inputs, bam_id, bai_id, cores=2, disk='30G').rv() spladder_id = job.addChildJobFn(spladder, inputs, bam_id, bai_id, disk='30G').rv() job.addFollowOnJobFn(consolidate_output_tarballs, inputs, vcqc_id, spladder_id, disk='30G')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def variant_calling_and_qc(job, inputs, bam_id, bai_id): """ Perform variant calling with samtools nad QC with CheckBias :param JobFunctionWrappingJob job: passed by Toil automatically :param Namespace inputs: Stores input arguments (see main) :param str bam_id: FileStore ID of bam :param str bai_id: FileStore ID of bam index file :return: FileStore ID of qc tarball :rtype: str """
job.fileStore.logToMaster('Variant calling and QC: {}'.format(inputs.uuid)) work_dir = job.fileStore.getLocalTempDir() # Pull in alignment.bam from fileStore job.fileStore.readGlobalFile(bam_id, os.path.join(work_dir, 'alignment.bam')) job.fileStore.readGlobalFile(bai_id, os.path.join(work_dir, 'alignment.bam.bai')) # Download input files input_info = [(inputs.genome, 'genome.fa'), (inputs.positions, 'positions.tsv'), (inputs.genome_index, 'genome.fa.fai'), (inputs.gtf, 'annotation.gtf'), (inputs.gtf_m53, 'annotation.m53')] for url, fname in input_info: download_url(job=job, url=url, work_dir=work_dir, name=fname) # Part 1: Variant Calling variant_command = ['mpileup', '-f', 'genome.fa', '-l', 'positions.tsv', '-v', 'alignment.bam', '-t', 'DP,SP,INFO/AD,INFO/ADF,INFO/ADR,INFO/DPR,SP', '-o', '/data/output.vcf.gz'] docker_call(job=job, work_dir=work_dir, parameters=variant_command, tool='quay.io/ucsc_cgl/samtools:1.3--256539928ea162949d8a65ca5c79a72ef557ce7c') # Part 2: QC qc_command = ['-o', 'qc', '-n', 'alignment.bam', '-a', 'annotation.gtf', '-m', 'annotation.m53'] docker_call(job=job, work_dir=work_dir, parameters=qc_command, tool='jvivian/checkbias:612f129--b08a1fb6526a620bbb0304b08356f2ae7c3c0ec3') # Write output to fileStore and return ids output_tsv = glob(os.path.join(work_dir, '*counts.tsv*'))[0] output_vcf = os.path.join(work_dir, 'output.vcf.gz') tarball_files('vcqc.tar.gz', file_paths=[output_tsv, output_vcf], output_dir=work_dir) return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'vcqc.tar.gz'))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def spladder(job, inputs, bam_id, bai_id): """ Run SplAdder to detect and quantify alternative splicing events :param JobFunctionWrappingJob job: passed by Toil automatically :param Namespace inputs: Stores input arguments (see main) :param str bam_id: FileStore ID of bam :param str bai_id: FileStore ID of bam index file :return: FileStore ID of SplAdder tarball :rtype: str """
job.fileStore.logToMaster('SplAdder: {}'.format(inputs.uuid)) work_dir = job.fileStore.getLocalTempDir() # Pull in alignment.bam from fileStore job.fileStore.readGlobalFile(bam_id, os.path.join(work_dir, 'alignment.bam')) job.fileStore.readGlobalFile(bai_id, os.path.join(work_dir, 'alignment.bam.bai')) # Download input file download_url(job=job, url=inputs.gtf, work_dir=work_dir, name='annotation.gtf') download_url(job=job, url=inputs.gtf_pickle, work_dir=work_dir, name='annotation.gtf.pickle') # Call Spladder command = ['--insert_ir=y', '--insert_es=y', '--insert_ni=y', '--remove_se=n', '--validate_sg=n', '-b', 'alignment.bam', '-o ', '/data', '-a', 'annotation.gtf', '-v', 'y', '-c', '3', '-M', 'single', '-T', 'n', '-n', '50', '-P', 'y', '-p', 'n', '--sparse_bam', 'y'] docker_call(job=job, work_dir=work_dir, parameters=command, sudo=inputs.sudo, tool='jvivian/spladder:1.0') # Write output to fileStore and return ids output_pickle = os.path.join(work_dir, ' ', 'spladder', 'genes_graph_conf3.alignment.pickle') if not os.path.exists(output_pickle): matches = [] for root, dirnames, filenames in os.walk(work_dir): for filename in fnmatch.filter(filenames, '*genes_graph*'): matches.append(os.path.join(root, filename)) if matches: output_pickle = matches[0] else: raise RuntimeError("Couldn't find genes file!") output_filt = os.path.join(work_dir, 'alignment.filt.hdf5') output = os.path.join(work_dir, 'alignment.hdf5') print os.listdir(work_dir) tarball_files('spladder.tar.gz', file_paths=[output_pickle, output_filt, output], output_dir=work_dir) return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'spladder.tar.gz'))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def consolidate_output_tarballs(job, inputs, vcqc_id, spladder_id): """ Combine the contents of separate tarballs into one. :param JobFunctionWrappingJob job: passed by Toil automatically :param Namespace inputs: Stores input arguments (see main) :param str vcqc_id: FileStore ID of variant calling and QC tarball :param str spladder_id: FileStore ID of spladder tarball """
job.fileStore.logToMaster('Consolidating files and uploading: {}'.format(inputs.uuid)) work_dir = job.fileStore.getLocalTempDir() # Retrieve IDs uuid = inputs.uuid # Unpack IDs # Retrieve output file paths to consolidate vcqc_tar = job.fileStore.readGlobalFile(vcqc_id, os.path.join(work_dir, 'vcqc.tar.gz')) spladder_tar = job.fileStore.readGlobalFile(spladder_id, os.path.join(work_dir, 'spladder.tar.gz')) # I/O fname = uuid + '.tar.gz' if not inputs.improper_pair else 'IMPROPER_PAIR' + uuid + '.tar.gz' out_tar = os.path.join(work_dir, fname) # Consolidate separate tarballs into one with tarfile.open(os.path.join(work_dir, out_tar), 'w:gz') as f_out: for tar in [vcqc_tar, spladder_tar]: with tarfile.open(tar, 'r') as f_in: for tarinfo in f_in: with closing(f_in.extractfile(tarinfo)) as f_in_file: if tar == vcqc_tar: tarinfo.name = os.path.join(uuid, 'variants_and_qc', os.path.basename(tarinfo.name)) else: tarinfo.name = os.path.join(uuid, 'spladder', os.path.basename(tarinfo.name)) f_out.addfile(tarinfo, fileobj=f_in_file) # Move to output directory if inputs.output_dir: mkdir_p(inputs.output_dir) shutil.copy(out_tar, os.path.join(inputs.output_dir, os.path.basename(out_tar))) # Upload to S3 if inputs.output_s3_dir: out_id = job.fileStore.writeGlobalFile(out_tar) job.addChildJobFn(s3am_upload_job, file_id=out_id, s3_dir=inputs.output_s3_dir, file_name=fname, key_path=inputs.ssec, cores=inputs.cores)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def main(): """ This Toil pipeline aligns reads and performs alternative splicing analysis. Please read the README.md located in the same directory for run instructions. """
# Define Parser object and add to toil url_prefix = 'https://s3-us-west-2.amazonaws.com/cgl-pipeline-inputs/' parser = argparse.ArgumentParser(description=main.__doc__, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('--config', required=True, help='Path to configuration file for samples, one per line. UUID,URL_to_bamfile. ' 'The URL may be a standard "http://", a "file://<abs_path>", or "s3://<bucket>/<key>"') parser.add_argument('--gtf', help='URL to annotation GTF file', default=url_prefix + 'rnaseq_cgl/gencode.v23.annotation.gtf') parser.add_argument('--gtf-pickle', help='Pickled GTF file', default=url_prefix + 'spladder/gencode.v23.annotation.gtf.pickle') parser.add_argument('--gtf-m53', help='M53 preprocessing annotation table', default=url_prefix + 'spladder/gencode.v23.annotation.gtf.m53') parser.add_argument('--positions', help='URL to SNP positions over genes file (TSV)', default=url_prefix + 'spladder/positions_fixed.tsv') parser.add_argument('--genome', help='URL to Genome fasta', default=url_prefix + 'rnaseq_cgl/hg38_no_alt.fa') parser.add_argument('--genome-index', help='Index file (fai) of genome', default=url_prefix + 'spladder/hg38_no_alt.fa.fai') parser.add_argument('--ssec', default=None, help='Path to master key used for downloading encrypted files.') parser.add_argument('--output-s3-dir', default=None, help='S3 Directory of the form: s3://bucket/directory') parser.add_argument('--output-dir', default=None, help='full path where final results will be output') parser.add_argument('--sudo', action='store_true', default=False, help='Set flag if sudo is required to run Docker.') parser.add_argument('--star-index', help='URL to download STAR Index built from HG38/gencodev23 annotation.', default=url_prefix + 'rnaseq_cgl/starIndex_hg38_no_alt.tar.gz') parser.add_argument('--fwd-3pr-adapter', help="Sequence for the FWD 3' Read Adapter.", default='AGATCGGAAGAG') parser.add_argument('--rev-3pr-adapter', help="Sequence for the REV 3' Read Adapter.", default='AGATCGGAAGAG') Job.Runner.addToilOptions(parser) args = parser.parse_args() # Sanity Checks if args.config: assert os.path.isfile(args.config), 'Config not found at: {}'.format(args.config) if args.ssec: assert os.path.isfile(args.ssec), 'Encryption key not found at: {}'.format(args.config) if args.output_s3_dir: assert args.output_s3_dir.startswith('s3://'), 'Wrong format for output s3 directory' # Program checks for program in ['curl', 'docker']: assert which(program), 'Program "{}" must be installed on every node.'.format(program) Job.Runner.startToil(Job.wrapJobFn(parse_input_samples, args), args)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validate_ip(s): """Validate a dotted-quad ip address. The string is considered a valid dotted-quad address if it consists of one to four octets (0-255) seperated by periods (.). True True False True Traceback (most recent call last): TypeError: expected string or buffer :param s: String to validate as a dotted-quad ip address. :type s: str :returns: ``True`` if a valid dotted-quad ip address, ``False`` otherwise. :raises: TypeError """
if _DOTTED_QUAD_RE.match(s): quads = s.split('.') for q in quads: if int(q) > 255: return False return True return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validate_netmask(s): """Validate that a dotted-quad ip address is a valid netmask. True True True True True False False False :param s: String to validate as a dotted-quad notation netmask. :type s: str :returns: ``True`` if a valid netmask, ``False`` otherwise. :raises: TypeError """
if validate_ip(s): # Convert to binary string, strip '0b' prefix, 0 pad to 32 bits mask = bin(ip2network(s))[2:].zfill(32) # all left most bits must be 1, all right most must be 0 seen0 = False for c in mask: if '1' == c: if seen0: return False else: seen0 = True return True else: return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validate_subnet(s): """Validate a dotted-quad ip address including a netmask. The string is considered a valid dotted-quad address with netmask if it consists of one to four octets (0-255) seperated by periods (.) followed by a forward slash (/) and a subnet bitmask which is expressed in dotted-quad format. True True True False False False Traceback (most recent call last): TypeError: expected string or unicode :param s: String to validate as a dotted-quad ip address with netmask. :type s: str :returns: ``True`` if a valid dotted-quad ip address with netmask, ``False`` otherwise. :raises: TypeError """
if isinstance(s, basestring): if '/' in s: start, mask = s.split('/', 2) return validate_ip(start) and validate_netmask(mask) else: return False raise TypeError("expected string or unicode")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ip2long(ip): """Convert a dotted-quad ip address to a network byte order 32-bit integer. 2130706433 2130706433 2130706432 True :param ip: Dotted-quad ip address (eg. '127.0.0.1'). :type ip: str :returns: Network byte order 32-bit integer or ``None`` if ip is invalid. """
if not validate_ip(ip): return None quads = ip.split('.') if len(quads) == 1: # only a network quad quads = quads + [0, 0, 0] elif len(quads) < 4: # partial form, last supplied quad is host address, rest is network host = quads[-1:] quads = quads[:-1] + [0, ] * (4 - len(quads)) + host lngip = 0 for q in quads: lngip = (lngip << 8) | int(q) return lngip
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ip2network(ip): """Convert a dotted-quad ip to base network number. This differs from :func:`ip2long` in that partial addresses as treated as all network instead of network plus host (eg. '127.1' expands to '127.1.0.0') :param ip: dotted-quad ip address (eg. ‘127.0.0.1’). :type ip: str :returns: Network byte order 32-bit integer or `None` if ip is invalid. """
if not validate_ip(ip): return None quads = ip.split('.') netw = 0 for i in range(4): netw = (netw << 8) | int(len(quads) > i and quads[i] or 0) return netw
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def long2ip(l): """Convert a network byte order 32-bit integer to a dotted quad ip address. '127.0.0.1' '0.0.0.0' '255.255.255.255' Traceback (most recent call last): TypeError: unsupported operand type(s) for >>: 'NoneType' and 'int' Traceback (most recent call last): TypeError: expected int between 0 and 4294967295 inclusive Traceback (most recent call last): TypeError: expected int between 0 and 4294967295 inclusive Traceback (most recent call last): TypeError: expected int between 0 and 4294967295 inclusive :param l: Network byte order 32-bit integer. :type l: int :returns: Dotted-quad ip address (eg. '127.0.0.1'). :raises: TypeError """
if MAX_IP < l or l < MIN_IP: raise TypeError( "expected int between %d and %d inclusive" % (MIN_IP, MAX_IP)) return '%d.%d.%d.%d' % ( l >> 24 & 255, l >> 16 & 255, l >> 8 & 255, l & 255)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def subnet2block(subnet): """Convert a dotted-quad ip address including a netmask into a tuple containing the network block start and end addresses. ('127.0.0.1', '127.0.0.1') ('127.0.0.0', '127.255.255.255') ('127.0.0.0', '127.0.255.255') ('127.1.0.0', '127.1.0.255') ('127.0.0.0', '127.0.0.7') ('0.0.0.0', '255.255.255.255') :param subnet: dotted-quad ip address with netmask (eg. '127.0.0.1/255.0.0.0'). :type subnet: str :returns: Tuple of block (start, end) or ``None`` if invalid. :raises: TypeError """
if not validate_subnet(subnet): return None ip, netmask = subnet.split('/') prefix = netmask2prefix(netmask) # convert dotted-quad ip to base network number network = ip2network(ip) return _block_from_ip_and_prefix(network, prefix)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def download_shared_files(job, samples, config): """ Downloads files shared by all samples in the pipeline :param JobFunctionWrappingJob job: passed automatically by Toil :param Namespace config: Argparse Namespace object containing argument inputs :param list[list] samples: A nested list of samples containing sample information """
job.fileStore.logToMaster('Downloaded shared files') file_names = ['reference', 'phase', 'mills', 'dbsnp', 'cosmic'] urls = [config.reference, config.phase, config.mills, config.dbsnp, config.cosmic] for name, url in zip(file_names, urls): if url: vars(config)[name] = job.addChildJobFn(download_url_job, url=url).rv() job.addFollowOnJobFn(reference_preprocessing, samples, config)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def reference_preprocessing(job, samples, config): """ Spawn the jobs that create index and dict file for reference :param JobFunctionWrappingJob job: passed automatically by Toil :param Namespace config: Argparse Namespace object containing argument inputs :param list[list] samples: A nested list of samples containing sample information """
job.fileStore.logToMaster('Processed reference files') config.fai = job.addChildJobFn(run_samtools_faidx, config.reference).rv() config.dict = job.addChildJobFn(run_picard_create_sequence_dictionary, config.reference).rv() job.addFollowOnJobFn(map_job, download_sample, samples, config)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def download_sample(job, sample, config): """ Download sample and store sample specific attributes :param JobFunctionWrappingJob job: passed automatically by Toil :param list sample: Contains uuid, normal URL, and tumor URL :param Namespace config: Argparse Namespace object containing argument inputs """
# Create copy of config that is sample specific config = argparse.Namespace(**vars(config)) uuid, normal_url, tumor_url = sample job.fileStore.logToMaster('Downloaded sample: ' + uuid) config.uuid = uuid config.normal = normal_url config.tumor = tumor_url config.cores = min(config.maxCores, int(multiprocessing.cpu_count())) disk = '1G' if config.ci_test else '20G' # Download sample bams and launch pipeline config.normal_bam = job.addChildJobFn(download_url_job, url=config.normal, s3_key_path=config.ssec, cghub_key_path=config.gtkey, disk=disk).rv() config.tumor_bam = job.addChildJobFn(download_url_job, url=config.tumor, s3_key_path=config.ssec, cghub_key_path=config.gtkey, disk=disk).rv() job.addFollowOnJobFn(index_bams, config)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def index_bams(job, config): """ Convenience job for handling bam indexing to make the workflow declaration cleaner :param JobFunctionWrappingJob job: passed automatically by Toil :param Namespace config: Argparse Namespace object containing argument inputs """
job.fileStore.logToMaster('Indexed sample BAMS: ' + config.uuid) disk = '1G' if config.ci_test else '20G' config.normal_bai = job.addChildJobFn(run_samtools_index, config.normal_bam, cores=1, disk=disk).rv() config.tumor_bai = job.addChildJobFn(run_samtools_index, config.tumor_bam, cores=1, disk=disk).rv() job.addFollowOnJobFn(preprocessing_declaration, config)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def preprocessing_declaration(job, config): """ Declare jobs related to preprocessing :param JobFunctionWrappingJob job: passed automatically by Toil :param Namespace config: Argparse Namespace object containing argument inputs """
if config.preprocessing: job.fileStore.logToMaster('Ran preprocessing: ' + config.uuid) disk = '1G' if config.ci_test else '20G' mem = '2G' if config.ci_test else '10G' processed_normal = job.wrapJobFn(run_gatk_preprocessing, config.normal_bam, config.normal_bai, config.reference, config.dict, config.fai, config.phase, config.mills, config.dbsnp, mem, cores=1, memory=mem, disk=disk) processed_tumor = job.wrapJobFn(run_gatk_preprocessing, config.tumor_bam, config.tumor_bai, config.reference, config.dict, config.fai, config.phase, config.mills, config.dbsnp, mem, cores=1, memory=mem, disk=disk) static_workflow = job.wrapJobFn(static_workflow_declaration, config, processed_normal.rv(0), processed_normal.rv(1), processed_tumor.rv(0), processed_tumor.rv(1)) job.addChild(processed_normal) job.addChild(processed_tumor) job.addFollowOn(static_workflow) else: job.addFollowOnJobFn(static_workflow_declaration, config, config.normal_bam, config.normal_bai, config.tumor_bam, config.tumor_bai)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def static_workflow_declaration(job, config, normal_bam, normal_bai, tumor_bam, tumor_bai): """ Statically declare workflow so sections can be modularly repurposed :param JobFunctionWrappingJob job: passed automatically by Toil :param Namespace config: Argparse Namespace object containing argument inputs :param str normal_bam: Normal BAM FileStoreID :param str normal_bai: Normal BAM index FileStoreID :param str tumor_bam: Tumor BAM FileStoreID :param str tumor_bai: Tumor BAM Index FileStoreID """
# Mutation and indel tool wiring memory = '1G' if config.ci_test else '10G' disk = '1G' if config.ci_test else '75G' mutect_results, pindel_results, muse_results = None, None, None if config.run_mutect: mutect_results = job.addChildJobFn(run_mutect, normal_bam, normal_bai, tumor_bam, tumor_bai, config.reference, config.dict, config.fai, config.cosmic, config.dbsnp, cores=1, memory=memory, disk=disk).rv() if config.run_pindel: pindel_results = job.addChildJobFn(run_pindel, normal_bam, normal_bai, tumor_bam, tumor_bai, config.reference, config.fai, cores=config.cores, memory=memory, disk=disk).rv() if config.run_muse: muse_results = job.addChildJobFn(run_muse, normal_bam, normal_bai, tumor_bam, tumor_bai, config.reference, config.dict, config.fai, config.dbsnp, cores=config.cores, memory=memory, disk=disk).rv() # Pass tool results (whether None or a promised return value) to consolidation step consolidation = job.wrapJobFn(consolidate_output, config, mutect_results, pindel_results, muse_results) job.addFollowOn(consolidation)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def consolidate_output(job, config, mutect, pindel, muse): """ Combine the contents of separate tarball outputs into one via streaming :param JobFunctionWrappingJob job: passed automatically by Toil :param Namespace config: Argparse Namespace object containing argument inputs :param str mutect: MuTect tarball FileStoreID :param str pindel: Pindel tarball FileStoreID :param str muse: MuSe tarball FileStoreID """
work_dir = job.fileStore.getLocalTempDir() mutect_tar, pindel_tar, muse_tar = None, None, None if mutect: mutect_tar = job.fileStore.readGlobalFile(mutect, os.path.join(work_dir, 'mutect.tar.gz')) if pindel: pindel_tar = job.fileStore.readGlobalFile(pindel, os.path.join(work_dir, 'pindel.tar.gz')) if muse: muse_tar = job.fileStore.readGlobalFile(muse, os.path.join(work_dir, 'muse.tar.gz')) out_tar = os.path.join(work_dir, config.uuid + '.tar.gz') # Consolidate separate tarballs into one as streams (avoids unnecessary untaring) tar_list = [x for x in [mutect_tar, pindel_tar, muse_tar] if x is not None] with tarfile.open(os.path.join(work_dir, out_tar), 'w:gz') as f_out: for tar in tar_list: with tarfile.open(tar, 'r') as f_in: for tarinfo in f_in: with closing(f_in.extractfile(tarinfo)) as f_in_file: if tar is mutect_tar: tarinfo.name = os.path.join(config.uuid, 'mutect', os.path.basename(tarinfo.name)) elif tar is pindel_tar: tarinfo.name = os.path.join(config.uuid, 'pindel', os.path.basename(tarinfo.name)) else: tarinfo.name = os.path.join(config.uuid, 'muse', os.path.basename(tarinfo.name)) f_out.addfile(tarinfo, fileobj=f_in_file) # Move to output location if urlparse(config.output_dir).scheme == 's3': job.fileStore.logToMaster('Uploading {} to S3: {}'.format(config.uuid, config.output_dir)) s3am_upload(job=job, fpath=out_tar, s3_dir=config.output_dir, num_cores=config.cores) else: job.fileStore.logToMaster('Moving {} to output dir: {}'.format(config.uuid, config.output_dir)) mkdir_p(config.output_dir) copy_files(file_paths=[out_tar], output_dir=config.output_dir)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def download_reference_files(job, inputs, samples): """ Downloads shared files that are used by all samples for alignment, or generates them if they were not provided. :param JobFunctionWrappingJob job: passed automatically by Toil :param Namespace inputs: Input arguments (see main) :param list[list[str, list[str, str]]] samples: Samples in the format [UUID, [URL1, URL2]] """
# Create dictionary to store FileStoreIDs of shared input files shared_ids = {} urls = [('amb', inputs.amb), ('ann', inputs.ann), ('bwt', inputs.bwt), ('pac', inputs.pac), ('sa', inputs.sa)] # Alt file is optional and can only be provided, not generated if inputs.alt: urls.append(('alt', inputs.alt)) # Download reference download_ref = job.wrapJobFn(download_url_job, inputs.ref, disk='3G') # Human genomes are typically ~3G job.addChild(download_ref) shared_ids['ref'] = download_ref.rv() # If FAI is provided, download it. Otherwise, generate it if inputs.fai: shared_ids['fai'] = job.addChildJobFn(download_url_job, inputs.fai).rv() else: faidx = job.wrapJobFn(run_samtools_faidx, download_ref.rv()) shared_ids['fai'] = download_ref.addChild(faidx).rv() # If all BWA index files are provided, download them. Otherwise, generate them if all(x[1] for x in urls): for name, url in urls: shared_ids[name] = job.addChildJobFn(download_url_job, url).rv() else: job.fileStore.logToMaster('BWA index files not provided, creating now') bwa_index = job.wrapJobFn(run_bwa_index, download_ref.rv()) download_ref.addChild(bwa_index) for x, name in enumerate(['amb', 'ann', 'bwt', 'pac', 'sa']): shared_ids[name] = bwa_index.rv(x) # Map_job distributes one sample in samples to the downlaod_sample_and_align function job.addFollowOnJobFn(map_job, download_sample_and_align, samples, inputs, shared_ids)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def download_sample_and_align(job, sample, inputs, ids): """ Downloads the sample and runs BWA-kit :param JobFunctionWrappingJob job: Passed by Toil automatically :param tuple(str, list) sample: UUID and URLS for sample :param Namespace inputs: Contains input arguments :param dict ids: FileStore IDs for shared inputs """
uuid, urls = sample r1_url, r2_url = urls if len(urls) == 2 else (urls[0], None) job.fileStore.logToMaster('Downloaded sample: {0}. R1 {1}\nR2 {2}\nStarting BWA Run'.format(uuid, r1_url, r2_url)) # Read fastq samples from file store ids['r1'] = job.addChildJobFn(download_url_job, r1_url, s3_key_path=inputs.ssec, disk=inputs.file_size).rv() if r2_url: ids['r2'] = job.addChildJobFn(download_url_job, r2_url, s3_key_path=inputs.ssec, disk=inputs.file_size).rv() else: ids['r2'] = None # Create config for bwakit inputs.cores = min(inputs.maxCores, multiprocessing.cpu_count()) inputs.uuid = uuid config = dict(**vars(inputs)) # Create config as a copy of inputs since it has values we want config.update(ids) # Overwrite attributes with the FileStoreIDs from ids config = argparse.Namespace(**config) # Define and wire job functions bam_id = job.wrapJobFn(run_bwakit, config, sort=inputs.sort, trim=inputs.trim, disk=inputs.file_size, cores=inputs.cores) job.addFollowOn(bam_id) output_name = uuid + '.bam' + str(inputs.suffix) if inputs.suffix else uuid + '.bam' if urlparse(inputs.output_dir).scheme == 's3': bam_id.addChildJobFn(s3am_upload_job, file_id=bam_id.rv(), file_name=output_name, s3_dir=inputs.output_dir, s3_key_path=inputs.ssec, cores=inputs.cores, disk=inputs.file_size) else: mkdir_p(inputs.ouput_dir) bam_id.addChildJobFn(copy_file_job, name=output_name, file_id=bam_id.rv(), output_dir=inputs.output_dir, disk=inputs.file_size)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_manifest(manifest_path): """ Parse manifest file :param str manifest_path: Path to manifest file :return: samples :rtype: list[str, list] """
samples = [] with open(manifest_path, 'r') as f: for line in f: if not line.isspace() and not line.startswith('#'): sample = line.strip().split('\t') require(2 <= len(sample) <= 3, 'Bad manifest format! ' 'Expected UUID\tURL1\t[URL2] (tab separated), got: {}'.format(sample)) uuid = sample[0] urls = sample[1:] for url in urls: require(urlparse(url).scheme and urlparse(url), 'Invalid URL passed for {}'.format(url)) samples.append([uuid, urls]) return samples
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _address2long(address): """ Convert an address string to a long. """
parsed = ipv4.ip2long(address) if parsed is None: parsed = ipv6.ip2long(address) return parsed
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def index(self, item): """ Return the 0-based position of `item` in this IpRange. 0 16777214 Traceback (most recent call last): ValueError: 10.0.0.1 is not in range :param item: Dotted-quad ip address. :type item: str :returns: Index of ip address in range """
item = self._cast(item) offset = item - self.startIp if offset >= 0 and offset < self._len: return offset raise ValueError('%s is not in range' % self._ipver.long2ip(item))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _detach_process(): """ Detach daemon process. Forks the current process into a parent and a detached child. The child process resides in its own process group, has no controlling terminal attached and is cleaned up by the init process. Returns ``True`` for the parent and ``False`` for the child. """
# To detach from our process group we need to call ``setsid``. We # can only do that if we aren't a process group leader. Therefore # we fork once, which makes sure that the new child process is not # a process group leader. pid = os.fork() if pid > 0: # Parent process # Use waitpid to "collect" the child process and avoid Zombies os.waitpid(pid, 0) return True os.setsid() # We now fork a second time and let the second's fork parent exit. # This makes the second fork's child process an orphan. Orphans are # cleaned up by the init process, so we won't end up with a zombie. # In addition, the second fork's child is no longer a session # leader and can therefore never acquire a controlling terminal. pid = os.fork() if pid > 0: os._exit(os.EX_OK) return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _block(predicate, timeout): """ Block until a predicate becomes true. ``predicate`` is a function taking no arguments. The call to ``_block`` blocks until ``predicate`` returns a true value. This is done by polling ``predicate``. ``timeout`` is either ``True`` (block indefinitely) or a timeout in seconds. The return value is the value of the predicate after the timeout. """
if timeout: if timeout is True: timeout = float('Inf') timeout = time.time() + timeout while not predicate() and time.time() < timeout: time.sleep(0.1) return predicate()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read_pid(self): """ Return the PID of the process owning the lock. Returns ``None`` if no lock is present. """
try: with open(self._path, 'r') as f: s = f.read().strip() if not s: return None return int(s) except IOError as e: if e.errno == errno.ENOENT: return None raise
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_logger_file_handles(self): """ Find the file handles used by our logger's handlers. """
handles = [] for handler in self.logger.handlers: # The following code works for logging's SysLogHandler, # StreamHandler, SocketHandler, and their subclasses. for attr in ['sock', 'socket', 'stream']: try: handle = getattr(handler, attr) if handle: handles.append(handle) break except AttributeError: continue return handles
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_running(self): """ Check if the daemon is running. """
pid = self.get_pid() if pid is None: return False # The PID file may still exist even if the daemon isn't running, # for example if it has crashed. try: os.kill(pid, 0) except OSError as e: if e.errno == errno.ESRCH: # In this case the PID file shouldn't have existed in # the first place, so we remove it self.pid_file.release() return False # We may also get an exception if we're not allowed to use # kill on the process, but that means that the process does # exist, which is all we care about here. return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _get_signal_event(self, s): ''' Get the event for a signal. Checks if the signal has been enabled and raises a ``ValueError`` if not. ''' try: return self._signal_events[int(s)] except KeyError: raise ValueError('Signal {} has not been enabled'.format(s))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def send_signal(self, s): """ Send a signal to the daemon process. The signal must have been enabled using the ``signals`` parameter of :py:meth:`Service.__init__`. Otherwise, a ``ValueError`` is raised. """
self._get_signal_event(s) # Check if signal has been enabled pid = self.get_pid() if not pid: raise ValueError('Daemon is not running.') os.kill(pid, s)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def stop(self, block=False): """ Tell the daemon process to stop. Sends the SIGTERM signal to the daemon process, requesting it to terminate. If ``block`` is true then the call blocks until the daemon process has exited. This may take some time since the daemon process will complete its on-going backup activities before shutting down. ``block`` can either be ``True`` (in which case it blocks indefinitely) or a timeout in seconds. The return value is ``True`` if the daemon process has been stopped and ``False`` otherwise. .. versionadded:: 0.3 The ``block`` parameter """
self.send_signal(signal.SIGTERM) return _block(lambda: not self.is_running(), block)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def kill(self, block=False): """ Kill the daemon process. Sends the SIGKILL signal to the daemon process, killing it. You probably want to try :py:meth:`stop` first. If ``block`` is true then the call blocks until the daemon process has exited. ``block`` can either be ``True`` (in which case it blocks indefinitely) or a timeout in seconds. Returns ``True`` if the daemon process has (already) exited and ``False`` otherwise. The PID file is always removed, whether the process has already exited or not. Note that this means that subsequent calls to :py:meth:`is_running` and :py:meth:`get_pid` will behave as if the process has exited. If you need to be sure that the process has already exited, set ``block`` to ``True``. .. versionadded:: 0.5.1 The ``block`` parameter """
pid = self.get_pid() if not pid: raise ValueError('Daemon is not running.') try: os.kill(pid, signal.SIGKILL) return _block(lambda: not self.is_running(), block) except OSError as e: if e.errno == errno.ESRCH: raise ValueError('Daemon is not running.') raise finally: self.pid_file.release()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def start(self, block=False): """ Start the daemon process. The daemon process is started in the background and the calling process returns. Once the daemon process is initialized it calls the :py:meth:`run` method. If ``block`` is true then the call blocks until the daemon process has started. ``block`` can either be ``True`` (in which case it blocks indefinitely) or a timeout in seconds. The return value is ``True`` if the daemon process has been started and ``False`` otherwise. .. versionadded:: 0.3 The ``block`` parameter """
pid = self.get_pid() if pid: raise ValueError('Daemon is already running at PID %d.' % pid) # The default is to place the PID file into ``/var/run``. This # requires root privileges. Since not having these is a common # problem we check a priori whether we can create the lock file. try: self.pid_file.acquire() finally: self.pid_file.release() # Clear previously received SIGTERMs. This must be done before # the calling process returns so that the calling process can # call ``stop`` directly after ``start`` returns without the # signal being lost. self.clear_signal(signal.SIGTERM) if _detach_process(): # Calling process returns return _block(lambda: self.is_running(), block) # Daemon process continues here self._debug('Daemon has detached') def on_signal(s, frame): self._debug('Received signal {}'.format(s)) self._signal_events[int(s)].set() def runner(): try: # We acquire the PID as late as possible, since its # existence is used to verify whether the service # is running. self.pid_file.acquire() self._debug('PID file has been acquired') self._debug('Calling `run`') self.run() self._debug('`run` returned without exception') except Exception as e: self.logger.exception(e) except SystemExit: self._debug('`run` called `sys.exit`') try: self.pid_file.release() self._debug('PID file has been released') except Exception as e: self.logger.exception(e) os._exit(os.EX_OK) # FIXME: This seems redundant try: setproctitle.setproctitle(self.name) self._debug('Process title has been set') files_preserve = (self.files_preserve + self._get_logger_file_handles()) signal_map = {s: on_signal for s in self._signal_events} signal_map.update({ signal.SIGTTIN: None, signal.SIGTTOU: None, signal.SIGTSTP: None, }) with DaemonContext( detach_process=False, signal_map=signal_map, files_preserve=files_preserve): self._debug('Daemon context has been established') # Python's signal handling mechanism only forwards signals to # the main thread and only when that thread is doing something # (e.g. not when it's waiting for a lock, etc.). If we use the # main thread for the ``run`` method this means that we cannot # use the synchronization devices from ``threading`` for # communicating the reception of SIGTERM to ``run``. Hence we # use a separate thread for ``run`` and make sure that the # main loop receives signals. See # https://bugs.python.org/issue1167930 thread = threading.Thread(target=runner) thread.start() while thread.is_alive(): time.sleep(1) except Exception as e: self.logger.exception(e) # We need to shutdown the daemon process at this point, because # otherwise it will continue executing from after the original # call to ``start``. os._exit(os.EX_OK)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create(opts): """Create a new environment Usage: datacats create [-bin] [--interactive] [-s NAME] [--address=IP] [--syslog] [--ckan=CKAN_VERSION] [--no-datapusher] [--site-url SITE_URL] [--no-init-db] ENVIRONMENT_DIR [PORT] Options: --address=IP Address to listen on (Linux-only) --ckan=CKAN_VERSION Use CKAN version CKAN_VERSION [default: 2.4] -b --bare Bare CKAN site with no example extension -i --image-only Create the environment but don't start containers --interactive Doesn't detach from the web container --no-datapusher Don't install/enable ckanext-datapusher --no-init-db Don't initialize the database. Useful for importing CKANs. -n --no-sysadmin Don't prompt for an initial sysadmin user account -s --site=NAME Pick a site to create [default: primary] --site-url SITE_URL The site_url to use in API responses (e.g. http://example.org:{port}/) --syslog Log to the syslog ENVIRONMENT_DIR is a path for the new environment directory. The last part of this path will be used as the environment name. """
if opts['--address'] and is_boot2docker(): raise DatacatsError('Cannot specify address on boot2docker.') return create_environment( environment_dir=opts['ENVIRONMENT_DIR'], port=opts['PORT'], create_skin=not opts['--bare'], start_web=not opts['--image-only'], create_sysadmin=not opts['--no-sysadmin'], site_name=opts['--site'], ckan_version=opts['--ckan'], address=opts['--address'], log_syslog=opts['--syslog'], datapusher=not opts['--no-datapusher'], site_url=opts['--site-url'], interactive=opts['--interactive'], init_db=not opts['--no-init-db'], )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def reset(environment, opts): """Resets a site to the default state. This will re-initialize the database and recreate the administrator account. Usage: datacats reset [-iyn] [-s NAME] [ENVIRONMENT] Options: -i --interactive Don't detach from the web container -s --site=NAME The site to reset [default: primary] -y --yes Respond yes to all questions -n --no-sysadmin Don't prompt for a sysadmin password"""
# pylint: disable=unused-argument if not opts['--yes']: y_or_n_prompt('Reset will remove all data related to the ' 'site {} and recreate the database'.format(opts['--site'])) print 'Resetting...' environment.stop_supporting_containers() environment.stop_ckan() clean_pyc(environment) # Save the port. saved_port = environment.port environment.purge_data([opts['--site']], never_delete=True) init({ 'ENVIRONMENT_DIR': opts['ENVIRONMENT'], '--site': opts['--site'], 'PORT': saved_port, '--syslog': None, '--address': None, '--image-only': False, '--interactive': opts['--interactive'], '--no-init-db': False, '--no-sysadmin': opts['--no-sysadmin'], '--site-url': None }, no_install=True)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def init(opts, no_install=False, quiet=False): """Initialize a purged environment or copied environment directory Usage: datacats init [-in] [--syslog] [-s NAME] [--address=IP] [--interactive] [--site-url SITE_URL] [ENVIRONMENT_DIR [PORT]] [--no-init-db] Options: --address=IP Address to listen on (Linux-only) --interactive Don't detach from the web container -i --image-only Create the environment but don't start containers --no-init-db Don't initialize the database. Useful for importing other CKANs -n --no-sysadmin Don't prompt for an initial sysadmin user account -s --site=NAME Pick a site to initialize [default: primary] --site-url SITE_URL The site_url to use in API responses (e.g. http://example.org:{port}/) --syslog Log to the syslog ENVIRONMENT_DIR is an existing datacats environment directory. Defaults to '.' """
if opts['--address'] and is_boot2docker(): raise DatacatsError('Cannot specify address on boot2docker.') environment_dir = opts['ENVIRONMENT_DIR'] port = opts['PORT'] address = opts['--address'] start_web = not opts['--image-only'] create_sysadmin = not opts['--no-sysadmin'] site_name = opts['--site'] site_url = opts['--site-url'] interactive = opts['--interactive'] init_db = not opts['--no-init-db'] environment_dir = abspath(environment_dir or '.') log_syslog = opts['--syslog'] environment = Environment.load(environment_dir, site_name) if address: environment.address = address if port: environment.port = int(port) if site_url: environment.site_url = site_url try: if environment.sites and site_name in environment.sites: raise DatacatsError('Site named {0} already exists.' .format(site_name)) # There are a couple of steps we can/must skip if we're making a sub-site only making_full_environment = not environment.data_exists() if not quiet: write('Creating environment {0}/{1} ' 'from existing environment directory "{0}"' .format(environment.name, environment.site_name)) steps = [ lambda: environment.create_directories(create_project_dir=False)] + ([ environment.save, environment.create_virtualenv ] if making_full_environment else []) + [ environment.save_site, environment.start_supporting_containers, environment.fix_storage_permissions, ] for fn in steps: fn() if not quiet: write('.') if not quiet: write('\n') except: if not quiet: print raise return finish_init(environment, start_web, create_sysadmin, log_syslog=log_syslog, do_install=not no_install, quiet=quiet, site_url=site_url, interactive=interactive, init_db=init_db)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save(self): """ Save profile settings into user profile directory """
config = self.profiledir + '/config' if not isdir(self.profiledir): makedirs(self.profiledir) cp = SafeConfigParser() cp.add_section('ssh') cp.set('ssh', 'private_key', self.ssh_private_key) cp.set('ssh', 'public_key', self.ssh_public_key) with open(config, 'w') as cfile: cp.write(cfile)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def generate_ssh_key(self): """ Generate a new ssh private and public key """
web_command( command=["ssh-keygen", "-q", "-t", "rsa", "-N", "", "-C", "datacats generated {0}@{1}".format( getuser(), gethostname()), "-f", "/output/id_rsa"], rw={self.profiledir: '/output'}, )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create(self, environment, target_name): """ Sends "create project" command to the remote server """
remote_server_command( ["ssh", environment.deploy_target, "create", target_name], environment, self, clean_up=True, )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def admin_password(self, environment, target_name, password): """ Return True if password was set successfully """
try: remote_server_command( ["ssh", environment.deploy_target, "admin_password", target_name, password], environment, self, clean_up=True ) return True except WebCommandError: return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def deploy(self, environment, target_name, stream_output=None): """ Return True if deployment was successful """
try: remote_server_command( [ "rsync", "-lrv", "--safe-links", "--munge-links", "--delete", "--inplace", "--chmod=ugo=rwX", "--exclude=.datacats-environment", "--exclude=.git", "/project/.", environment.deploy_target + ':' + target_name ], environment, self, include_project_dir=True, stream_output=stream_output, clean_up=True, ) except WebCommandError as e: raise DatacatsError( "Unable to deploy `{0}` to remote server for some reason:\n" " datacats was not able to copy data to the remote server" .format((target_name,)), parent_exception=e ) try: remote_server_command( [ "ssh", environment.deploy_target, "install", target_name, ], environment, self, clean_up=True, ) return True except WebCommandError as e: raise DatacatsError( "Unable to deploy `{0}` to remote server for some reason:\n" "datacats copied data to the server but failed to register\n" "(or `install`) the new catalog" .format((target_name,)), parent_exception=e )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def num_batches(n, batch_size): """Compute the number of mini-batches required to cover a data set of size `n` using batches of size `batch_size`. Parameters n: int the number of samples in the data set batch_size: int the mini-batch size Returns ------- int: the number of batches required """
b = n // batch_size if n % batch_size > 0: b += 1 return b
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def num_indices_generated(self): """ Get the number of indices that would be generated by this sampler. Returns ------- int, `np.inf` or `None`. An int if the number of samples is known, `np.inf` if it is infinite or `None` if the number of samples is unknown. """
if self.repeats == -1: return np.inf else: return self.length * self.repeats
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def in_order_indices_batch_iterator(self, batch_size): """ Create an iterator that generates in-order mini-batches of sample indices. The batches will have `batch_size` elements, with the exception of the final batch which will have less if there are not enough samples left to fill it. The generated mini-batches indices take the form of 1D NumPy integer arrays. Parameters batch_size: int Mini-batch size Returns ------- iterator An iterator that generates mini-batches in the form of 1D NumPy integer arrays. """
if self.repeats == 1: for i in range(0, self.length, batch_size): yield np.arange(i, min(i + batch_size, self.length)) else: repeats = self.repeats i = 0 while True: j = i + batch_size if j <= self.length: # Within size of data yield np.arange(i, j) i = j elif j <= self.length * 2: # One restart is required # Reduce the number of remaining repeats if repeats != -1: repeats -= 1 if repeats == 0: # Finished; emit remaining elements if i < self.length: yield np.arange(i, self.length) break # Wrap over # Compute number of elements required to make up # the batch k = batch_size - (self.length - i) yield np.append(np.arange(i, self.length), np.arange(0, k), axis=0) i = k else: # Multiple restarts required to fill the batch batch_ndx = np.arange(0) # i = 0 while len(batch_ndx) < batch_size: # Wrap over k = min(batch_size - len(batch_ndx), self.length - i) batch_ndx = np.append( batch_ndx, np.arange(i, i + k), axis=0) i += k if i >= self.length: i -= self.length # Reduce the number of remaining repeats if repeats != -1: repeats -= 1 if repeats == 0: break if len(batch_ndx) > 0: yield batch_ndx if repeats == 0: break
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def shuffled_indices_batch_iterator(self, batch_size, shuffle_rng): """ Create an iterator that generates randomly shuffled mini-batches of sample indices. The batches will have `batch_size` elements, with the exception of the final batch which will have less if there are not enough samples left to fill it. The generated mini-batches indices take the form of 1D NumPy integer arrays. Parameters batch_size: int Mini-batch size shuffle_rng: a `numpy.random.RandomState` that will be used to randomise element order. Returns ------- iterator An iterator that generates mini-batches in the form of 1D NumPy integer arrays. """
if self.repeats == 1: indices = shuffle_rng.permutation(self.length) for i in range(0, self.length, batch_size): yield indices[i:i + batch_size] else: repeats = self.repeats indices = shuffle_rng.permutation(self.length) i = 0 while True: j = i + batch_size if j <= self.length: # Within size of data yield indices[i:j] i = j else: # Multiple restarts required to fill the batch batch_ndx = np.arange(0) while len(batch_ndx) < batch_size: # Wrap over k = min(batch_size - len(batch_ndx), self.length - i) batch_ndx = np.append( batch_ndx, indices[i:i + k], axis=0) i += k if i >= self.length: # Loop over; new permutation indices = shuffle_rng.permutation(self.length) i -= self.length # Reduce the number of remaining repeats if repeats != -1: repeats -= 1 if repeats == 0: break if len(batch_ndx) > 0: yield batch_ndx if repeats == 0: break
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def class_balancing_sample_weights(y): """ Compute sample weight given an array of sample classes. The weights are assigned on a per-class basis and the per-class weights are inversely proportional to their frequency. Parameters y: NumPy array, 1D dtype=int sample classes, values must be 0 or positive Returns ------- NumPy array, 1D dtype=float per sample weight array """
h = np.bincount(y) cls_weight = 1.0 / (h.astype(float) * len(np.nonzero(h)[0])) cls_weight[np.isnan(cls_weight)] = 0.0 sample_weight = cls_weight[y] return sample_weight
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def class_balancing_sampler(y, indices): """ Construct a `WeightedSubsetSampler` that compensates for class imbalance. Parameters y: NumPy array, 1D dtype=int sample classes, values must be 0 or positive indices: NumPy array, 1D dtype=int An array of indices that identify the subset of samples drawn from data that are to be used Returns ------- WeightedSubsetSampler instance Sampler """
weights = WeightedSampler.class_balancing_sample_weights(y[indices]) return WeightedSubsetSampler(weights, indices=indices)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_data_path(filename): """ Get the path of the given file within the batchup data directory Parameters filename: str The filename to locate within the batchup data directory Returns ------- str The full path of the file """
if os.path.isabs(filename): return filename else: return os.path.join(get_data_dir(), filename)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def download(path, source_url): """ Download a file to a given path from a given URL, if it does not exist. Parameters path: str The (destination) path of the file on the local filesystem source_url: str The URL from which to download the file Returns ------- str The path of the file """
dir_path = os.path.dirname(path) if not os.path.exists(dir_path): os.makedirs(dir_path) if not os.path.exists(path): print('Downloading {} to {}'.format(source_url, path)) filename = source_url.split('/')[-1] def _progress(count, block_size, total_size): sys.stdout.write('\rDownloading {} {:.2%}'.format( filename, float(count * block_size) / float(total_size))) sys.stdout.flush() try: urlretrieve(source_url, path, reporthook=_progress) except: sys.stdout.write('\r') # Exception; remove any partially downloaded file and re-raise if os.path.exists(path): os.remove(path) raise sys.stdout.write('\r') return path
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def compute_sha256(path): """ Compute the SHA-256 hash of the file at the given path Parameters path: str The path of the file Returns ------- str The SHA-256 HEX digest """
hasher = hashlib.sha256() with open(path, 'rb') as f: # 10MB chunks for chunk in iter(lambda: f.read(10 * 1024 * 1024), b''): hasher.update(chunk) return hasher.hexdigest()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def verify_file(path, sha256): """ Verify the integrity of a file by checking its SHA-256 hash. If no digest is supplied, the digest is printed to the console. Closely follows the code in `torchvision.datasets.utils.check_integrity` Parameters path: str The path of the file to check sha256: str The expected SHA-256 hex digest of the file, or `None` to print the digest of the file to the console Returns ------- bool Indicates if the file passes the integrity check or not """
if not os.path.isfile(path): return False digest = compute_sha256(path) if sha256 is None: # No digest supplied; report it to the console so a develop can fill # it in print('SHA-256 of {}:'.format(path)) print(' "{}"'.format(digest)) else: if digest != sha256: return False return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def download_and_verify(path, source_url, sha256): """ Download a file to a given path from a given URL, if it does not exist. After downloading it, verify it integrity by checking the SHA-256 hash. Parameters path: str The (destination) path of the file on the local filesystem source_url: str The URL from which to download the file sha256: str The expected SHA-256 hex digest of the file, or `None` to print the digest of the file to the console Returns ------- str or None The path of the file if successfully downloaded otherwise `None` """
if os.path.exists(path): # Already exists? # Nothing to do, except print the SHA-256 if necessary if sha256 is None: print('The SHA-256 of {} is "{}"'.format( path, compute_sha256(path))) return path # Compute the path of the unverified file unverified_path = path + '.unverified' for i in range(_MAX_DOWNLOAD_TRIES): # Download it try: unverified_path = download(unverified_path, source_url) except Exception as e: # Report failure print( 'Download of {} unsuccessful; error {}; ' 'deleting and re-trying...'.format(source_url, e)) # Delete so that we can retry if os.path.exists(unverified_path): os.remove(unverified_path) else: if os.path.exists(unverified_path): # Got something... if verify_file(unverified_path, sha256): # Success: rename the unverified file to the destination # filename os.rename(unverified_path, path) return path else: # Report failure print( 'Download of {} unsuccessful; verification failed; ' 'deleting and re-trying...'.format(source_url)) # Delete so that we can retry os.remove(unverified_path) print('Did not succeed in downloading {} (tried {} times)'.format( source_url, _MAX_DOWNLOAD_TRIES )) return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def copy_and_verify(path, source_path, sha256): """ Copy a file to a given path from a given path, if it does not exist. After copying it, verify it integrity by checking the SHA-256 hash. Parameters path: str The (destination) path of the file on the local filesystem source_path: str The path from which to copy the file sha256: str The expected SHA-256 hex digest of the file, or `None` to print the digest of the file to the console Returns ------- str or None The path of the file if successfully downloaded otherwise `None` """
if os.path.exists(path): # Already exists? # Nothing to do, except print the SHA-256 if necessary if sha256 is None: print('The SHA-256 of {} is "{}"'.format( path, compute_sha256(path))) return path if not os.path.exists(source_path): return None # Compute the path of the unverified file unverified_path = path + '.unverified' # Copy it dir_path = os.path.dirname(path) if not os.path.exists(dir_path): os.makedirs(dir_path) shutil.copy(source_path, unverified_path) if os.path.exists(unverified_path): # Got something... if verify_file(unverified_path, sha256): # Success: rename the unverified file to the destination # filename os.rename(unverified_path, path) return path else: # Report failure print('SHA verification of file {} failed'.format(source_path)) # Delete os.remove(unverified_path) return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def shell(environment, opts): """Run a command or interactive shell within this environment Usage: Options: -d --detach Run the resulting container in the background -s --site=NAME Specify a site to run the shell on [default: primary] ENVIRONMENT may be an environment name or a path to an environment directory. Default: '.' """
environment.require_data() environment.start_supporting_containers() return environment.interactive_shell( opts['COMMAND'], detach=opts['--detach'] )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def paster(opts): """Run a paster command from the current directory Usage: Options: -s --site=NAME Specify a site to run this paster command on [default: primary] -d --detach Run the resulting container in the background You must be inside a datacats environment to run this. The paster command will run within your current directory inside the environment. You don't need to specify the --plugin option. The --config option also need not be specified. """
environment = Environment.load('.') environment.require_data() environment.start_supporting_containers() if not opts['COMMAND']: opts['COMMAND'] = ['--', 'help'] assert opts['COMMAND'][0] == '--' return environment.interactive_shell( opts['COMMAND'][1:], paster=True, detach=opts['--detach'] )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save_new_site(site_name, sitedir, srcdir, port, address, site_url, passwords): """ Add a site's configuration to the source dir and site dir """
cp = ConfigParser.SafeConfigParser() cp.read([srcdir + '/.datacats-environment']) section_name = 'site_' + site_name if not cp.has_section(section_name): cp.add_section(section_name) cp.set(section_name, 'port', str(port)) if address: cp.set(section_name, 'address', address) if site_url: cp.set(section_name, 'site_url', site_url) with open(srcdir + '/.datacats-environment', 'w') as config: cp.write(config) # save passwords to datadir cp = ConfigParser.SafeConfigParser() cp.add_section('passwords') for n in sorted(passwords): cp.set('passwords', n.lower(), passwords[n]) # Write to the sitedir so we maintain separate passwords. with open(sitedir + '/passwords.ini', 'w') as config: cp.write(config)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save_new_environment(name, datadir, srcdir, ckan_version, deploy_target=None, always_prod=False): """ Save an environment's configuration to the source dir and data dir """
with open(datadir + '/.version', 'w') as f: f.write('2') cp = ConfigParser.SafeConfigParser() cp.read(srcdir + '/.datacats-environment') if not cp.has_section('datacats'): cp.add_section('datacats') cp.set('datacats', 'name', name) cp.set('datacats', 'ckan_version', ckan_version) if deploy_target: if not cp.has_section('deploy'): cp.add_section('deploy') cp.set('deploy', 'target', deploy_target) if always_prod: cp.set('datacats', 'always_prod', 'true') with open(srcdir + '/.datacats-environment', 'w') as config: cp.write(config) save_srcdir_location(datadir, srcdir)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_environment(srcdir, datadir=None, allow_old=False): """ Load configuration values for an environment :param srcdir: environment source directory :param datadir: environment data direcory, if None will be discovered from srcdir :param allow_old: Don't throw an exception if this is an old site This is only valid for sites that you are purging. if datadir is None it will be discovered from srcdir Returns (datadir, name, ckan_version, always_prod, deploy_target, remote_server_key) """
cp = ConfigParser.SafeConfigParser() try: cp.read([srcdir + '/.datacats-environment']) except ConfigParser.Error: raise DatacatsError('Error reading environment information') name = cp.get('datacats', 'name') if datadir: # update the link in case user moved their srcdir save_srcdir_location(datadir, srcdir) else: datadir = path.expanduser('~/.datacats/' + name) # FIXME: check if datadir is sane, project-dir points back to srcdir if migrate.needs_format_conversion(datadir) and not allow_old: raise DatacatsError('This environment uses an old format. You must' ' migrate to the new format. To do so, use the' ' "datacats migrate" command.') if migrate.is_locked(datadir): raise DatacatsError('Migration in progress, cannot continue.\n' 'If you interrupted a migration, you should' ' attempt manual recovery or contact us by' ' filing an issue at http://github.com/datacats/' 'datacats.\nAs a last resort, you could delete' ' all your stored data and create a new environment' ' by running "datacats purge" followed by' ' "datacats init".') # FIXME: consider doing data_complete check here ckan_version = cp.get('datacats', 'ckan_version') try: always_prod = cp.getboolean('datacats', 'always_prod') except ConfigParser.NoOptionError: always_prod = False try: extra_containers = cp.get('datacats', 'extra_containers').split(' ') except ConfigParser.NoOptionError: extra_containers = () # if remote_server's custom ssh connection # address is defined, # we overwrite the default datacats.com one try: deploy_target = cp.get('deploy', 'remote_server_user') \ + "@" + cp.get('deploy', 'remote_server') except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): deploy_target = DEFAULT_REMOTE_SERVER_TARGET # if remote_server's ssh public key is given, # we overwrite the default datacats.com one try: remote_server_key = cp.get('deploy', 'remote_server_key') except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): remote_server_key = None return (datadir, name, ckan_version, always_prod, deploy_target, remote_server_key, extra_containers)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_site(srcdir, datadir, site_name=None): """ Load configuration values for a site. Returns (port, address, site_url, passwords) """
if site_name is None: site_name = 'primary' if not validate.valid_name(site_name): raise DatacatsError('{} is not a valid site name.'.format(site_name)) cp = ConfigParser.SafeConfigParser() try: cp.read([srcdir + '/.datacats-environment']) except ConfigParser.Error: raise DatacatsError('Error reading environment information') site_section = 'site_' + site_name try: port = cp.getint(site_section, 'port') except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): port = None try: address = cp.get(site_section, 'address') except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): address = None try: site_url = cp.get(site_section, 'site_url') except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): site_url = None passwords = {} cp = ConfigParser.SafeConfigParser() cp.read(datadir + '/sites/' + site_name + '/passwords.ini') try: pw_options = cp.options('passwords') except ConfigParser.NoSectionError: pw_options = [] for n in pw_options: passwords[n.upper()] = cp.get('passwords', n) return port, address, site_url, passwords
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def data_complete(datadir, sitedir, get_container_name): """ Return True if the directories and containers we're expecting are present in datadir, sitedir and containers """
if any(not path.isdir(sitedir + x) for x in ('/files', '/run', '/solr')): return False if docker.is_boot2docker(): # Inspect returns None if the container doesn't exist. return all(docker.inspect_container(get_container_name(x)) for x in ('pgdata', 'venv')) return path.isdir(datadir + '/venv') and path.isdir(sitedir + '/postgres')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_directories(datadir, sitedir, srcdir=None): """ Create expected directories in datadir, sitedir and optionally srcdir """
# It's possible that the datadir already exists # (we're making a secondary site) if not path.isdir(datadir): os.makedirs(datadir, mode=0o700) try: # This should take care if the 'site' subdir if needed os.makedirs(sitedir, mode=0o700) except OSError: raise DatacatsError("Site already exists.") # venv isn't site-specific, the rest are. if not docker.is_boot2docker(): if not path.isdir(datadir + '/venv'): os.makedirs(datadir + '/venv') os.makedirs(sitedir + '/postgres') os.makedirs(sitedir + '/solr') os.makedirs(sitedir + '/files') os.makedirs(sitedir + '/run') if srcdir: os.makedirs(srcdir)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def stop_supporting_containers(get_container_name, extra_containers): """ Stop postgres and solr containers, along with any specified extra containers """
docker.remove_container(get_container_name('postgres')) docker.remove_container(get_container_name('solr')) for container in extra_containers: docker.remove_container(get_container_name(container))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def containers_running(get_container_name): """ Return a list of containers tracked by this environment that are running """
running = [] for n in ['web', 'postgres', 'solr', 'datapusher', 'redis']: info = docker.inspect_container(get_container_name(n)) if info and not info['State']['Running']: running.append(n + '(halted)') elif info: running.append(n) return running
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _load_sites(self): """ Gets the names of all of the sites from the datadir and stores them in self.sites. Also returns this list. """
if not self.sites: self.sites = task.list_sites(self.datadir) return self.sites
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save_site(self, create=True): """ Save environment settings in the directory that need to be saved even when creating only a new sub-site env. """
self._load_sites() if create: self.sites.append(self.site_name) task.save_new_site(self.site_name, self.sitedir, self.target, self.port, self.address, self.site_url, self.passwords)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save(self): """ Save environment settings into environment directory, overwriting any existing configuration and discarding site config """
task.save_new_environment(self.name, self.datadir, self.target, self.ckan_version, self.deploy_target, self.always_prod)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def new(cls, path, ckan_version, site_name, **kwargs): """ Return a Environment object with settings for a new project. No directories or containers are created by this call. :params path: location for new project directory, may be relative :params ckan_version: release of CKAN to install :params site_name: The name of the site to install database and solr \ eventually. For additional keyword arguments see the __init__ method. Raises DatcatsError if directories or project with same name already exits. """
if ckan_version == 'master': ckan_version = 'latest' name, datadir, srcdir = task.new_environment_check(path, site_name, ckan_version) environment = cls(name, srcdir, datadir, site_name, ckan_version, **kwargs) environment._generate_passwords() return environment
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load(cls, environment_name=None, site_name='primary', data_only=False, allow_old=False): """ Return an Environment object based on an existing environnment+site. :param environment_name: exising environment name, path or None to look in current or parent directories for project :param data_only: set to True to only load from data dir, not the project dir; Used for purging environment data. :param allow_old: load a very minimal subset of what we usually load. This will only work for purging environment data on an old site. Raises DatacatsError if environment can't be found or if there is an error parsing the environment information. """
srcdir, extension_dir, datadir = task.find_environment_dirs( environment_name, data_only) if datadir and data_only: return cls(environment_name, None, datadir, site_name) (datadir, name, ckan_version, always_prod, deploy_target, remote_server_key, extra_containers) = task.load_environment(srcdir, datadir, allow_old) if not allow_old: (port, address, site_url, passwords) = task.load_site(srcdir, datadir, site_name) else: (port, address, site_url, passwords) = (None, None, None, None) environment = cls(name, srcdir, datadir, site_name, ckan_version=ckan_version, port=port, deploy_target=deploy_target, site_url=site_url, always_prod=always_prod, address=address, extension_dir=extension_dir, remote_server_key=remote_server_key, extra_containers=extra_containers) if passwords: environment.passwords = passwords else: environment._generate_passwords() if not allow_old: environment._load_sites() return environment
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def data_complete(self): """ Return True if all the expected datadir files are present """
return task.data_complete(self.datadir, self.sitedir, self._get_container_name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def require_data(self): """ raise a DatacatsError if the datadir or volumes are missing or damaged """
files = task.source_missing(self.target) if files: raise DatacatsError('Missing files in source directory:\n' + '\n'.join(files)) if not self.data_exists(): raise DatacatsError('Environment datadir missing. ' 'Try "datacats init".') if not self.data_complete(): raise DatacatsError('Environment datadir damaged or volumes ' 'missing. ' 'To reset and discard all data use ' '"datacats reset"')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_directories(self, create_project_dir=True): """ Call once for new projects to create the initial project directories. """
return task.create_directories(self.datadir, self.sitedir, self.target if create_project_dir else None)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_source(self, datapusher=True): """ Populate ckan directory from preloaded image and copy who.ini and schema.xml info conf directory """
task.create_source(self.target, self._preload_image(), datapusher)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_ckan_ini(self): """ Use make-config to generate an initial development.ini file """
self.run_command( command='/scripts/run_as_user.sh /usr/lib/ckan/bin/paster make-config' ' ckan /project/development.ini', rw_project=True, ro={scripts.get_script_path('run_as_user.sh'): '/scripts/run_as_user.sh'}, )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update_ckan_ini(self, skin=True): """ Use config-tool to update development.ini with our environment settings :param skin: use environment template skin plugin True/False """
command = [ '/usr/lib/ckan/bin/paster', '--plugin=ckan', 'config-tool', '/project/development.ini', '-e', 'sqlalchemy.url = postgresql://<hidden>', 'ckan.datastore.read_url = postgresql://<hidden>', 'ckan.datastore.write_url = postgresql://<hidden>', 'ckan.datapusher.url = http://datapusher:8800', 'solr_url = http://solr:8080/solr', 'ckan.storage_path = /var/www/storage', 'ckan.plugins = datastore resource_proxy text_view ' + ('datapusher ' if exists(self.target + '/datapusher') else '') + 'recline_grid_view recline_graph_view' + (' {0}_theme'.format(self.name) if skin else ''), 'ckan.site_title = ' + self.name, 'ckan.site_logo =', 'ckan.auth.create_user_via_web = false', ] self.run_command(command=command, rw_project=True)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_install_template_skin(self): """ Create an example ckan extension for this environment and install it """
ckan_extension_template(self.name, self.target) self.install_package_develop('ckanext-' + self.name + 'theme')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ckan_db_init(self, retry_seconds=DB_INIT_RETRY_SECONDS): """ Run db init to create all ckan tables :param retry_seconds: how long to retry waiting for db to start """
# XXX workaround for not knowing how long we need to wait # for postgres to be ready. fix this by changing the postgres # entrypoint, or possibly running once with command=/bin/true started = time.time() while True: try: self.run_command( '/usr/lib/ckan/bin/paster --plugin=ckan db init ' '-c /project/development.ini', db_links=True, clean_up=True, ) break except WebCommandError: if started + retry_seconds > time.time(): raise time.sleep(DB_INIT_RETRY_DELAY)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def start_ckan(self, production=False, log_syslog=False, paster_reload=True, interactive=False): """ Start the apache server or paster serve :param log_syslog: A flag to redirect all container logs to host's syslog :param production: True for apache, False for paster serve + debug on :param paster_reload: Instruct paster to watch for file changes """
self.stop_ckan() address = self.address or '127.0.0.1' port = self.port # in prod we always use log_syslog driver log_syslog = True if self.always_prod else log_syslog production = production or self.always_prod # We only override the site URL with the docker URL on three conditions override_site_url = (self.address is None and not is_boot2docker() and not self.site_url) command = ['/scripts/web.sh', str(production), str(override_site_url), str(paster_reload)] # XXX nasty hack, remove this once we have a lessc command # for users (not just for building our preload image) if not production: css = self.target + '/ckan/ckan/public/base/css' if not exists(css + '/main.debug.css'): from shutil import copyfile copyfile(css + '/main.css', css + '/main.debug.css') ro = { self.target: '/project', scripts.get_script_path('datapusher.sh'): '/scripts/datapusher.sh' } if not is_boot2docker(): ro[self.datadir + '/venv'] = '/usr/lib/ckan' datapusher = self.needs_datapusher() if datapusher: run_container( self._get_container_name('datapusher'), 'datacats/web', '/scripts/datapusher.sh', ro=ro, volumes_from=(self._get_container_name('venv') if is_boot2docker() else None), log_syslog=log_syslog) while True: self._create_run_ini(port, production) try: self._run_web_container(port, command, address, log_syslog=log_syslog, datapusher=datapusher, interactive=interactive) if not is_boot2docker(): self.address = address except PortAllocatedError: port = self._next_port(port) continue break
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _run_web_container(self, port, command, address, log_syslog=False, datapusher=True, interactive=False): """ Start web container on port with command """
if is_boot2docker(): ro = {} volumes_from = self._get_container_name('venv') else: ro = {self.datadir + '/venv': '/usr/lib/ckan'} volumes_from = None links = { self._get_container_name('solr'): 'solr', self._get_container_name('postgres'): 'db' } links.update({self._get_container_name(container): container for container in self.extra_containers}) if datapusher: if 'datapusher' not in self.containers_running(): raise DatacatsError(container_logs(self._get_container_name('datapusher'), "all", False, False)) links[self._get_container_name('datapusher')] = 'datapusher' ro = dict({ self.target: '/project/', scripts.get_script_path('web.sh'): '/scripts/web.sh', scripts.get_script_path('adjust_devini.py'): '/scripts/adjust_devini.py'}, **ro) rw = { self.sitedir + '/files': '/var/www/storage', self.sitedir + '/run/development.ini': '/project/development.ini' } try: if not interactive: run_container( name=self._get_container_name('web'), image='datacats/web', rw=rw, ro=ro, links=links, volumes_from=volumes_from, command=command, port_bindings={ 5000: port if is_boot2docker() else (address, port)}, log_syslog=log_syslog ) else: # FIXME: share more code with interactive_shell if is_boot2docker(): switches = ['--volumes-from', self._get_container_name('pgdata'), '--volumes-from', self._get_container_name('venv')] else: switches = [] switches += ['--volume={}:{}:ro'.format(vol, ro[vol]) for vol in ro] switches += ['--volume={}:{}'.format(vol, rw[vol]) for vol in rw] links = ['--link={}:{}'.format(link, links[link]) for link in links] args = ['docker', 'run', '-it', '--name', self._get_container_name('web'), '-p', '{}:5000'.format(port) if is_boot2docker() else '{}:{}:5000'.format(address, port)] + \ switches + links + ['datacats/web', ] + command subprocess.call(args) except APIError as e: if '409' in str(e): raise DatacatsError('Web container already running. ' 'Please stop_web before running.') else: raise