text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Returns the paths of files from the FileStore
<END_TASK>
<USER_TASK:>
Description:
def return_input_paths(job, work_dir, ids, *args):
"""
Returns the paths of files from the FileStore
Input1: Toil job instance
Input2: Working directory
Input3: jobstore id dictionary
Input4: names of files to be returned from the jobstore
Returns: path(s) to the file(s) requested -- unpack these!
""" |
paths = OrderedDict()
for name in args:
if not os.path.exists(os.path.join(work_dir, name)):
file_path = job.fileStore.readGlobalFile(ids[name], os.path.join(work_dir, name))
else:
file_path = os.path.join(work_dir, name)
paths[name] = file_path
if len(args) == 1:
return file_path
return paths.values() |
<SYSTEM_TASK:>
Moves files from work_dir to output_dir
<END_TASK>
<USER_TASK:>
Description:
def move_to_output_dir(work_dir, output_dir, uuid=None, files=list()):
"""
Moves files from work_dir to output_dir
Input1: Working directory
Input2: Output directory
Input3: UUID to be preprended onto file name
Input4: list of file names to be moved from working dir to output dir
""" |
for fname in files:
if uuid is None:
shutil.move(os.path.join(work_dir, fname), os.path.join(output_dir, fname))
else:
shutil.move(os.path.join(work_dir, fname), os.path.join(output_dir, '{}.{}'.format(uuid, fname))) |
<SYSTEM_TASK:>
Downloads shared files that are used by all samples for alignment and places them in the jobstore.
<END_TASK>
<USER_TASK:>
Description:
def batch_start(job, input_args):
"""
Downloads shared files that are used by all samples for alignment and places them in the jobstore.
""" |
shared_files = ['ref.fa', 'ref.fa.amb', 'ref.fa.ann', 'ref.fa.bwt', 'ref.fa.pac', 'ref.fa.sa', 'ref.fa.fai']
shared_ids = {}
for fname in shared_files:
url = input_args[fname]
shared_ids[fname] = job.addChildJobFn(download_from_url, url, fname).rv()
job.addFollowOnJobFn(spawn_batch_jobs, shared_ids, input_args) |
<SYSTEM_TASK:>
Spawns an alignment job for every sample in the input configuration file
<END_TASK>
<USER_TASK:>
Description:
def spawn_batch_jobs(job, shared_ids, input_args):
"""
Spawns an alignment job for every sample in the input configuration file
""" |
samples = []
config = input_args['config']
with open(config, 'r') as f_in:
for line in f_in:
line = line.strip().split(',')
uuid = line[0]
urls = line[1:]
samples.append((uuid, urls))
for sample in samples:
job.addChildJobFn(alignment, shared_ids, input_args, sample, cores=32, memory='20 G', disk='100 G') |
<SYSTEM_TASK:>
Runs BWA and then Bamsort on the supplied fastqs for this sample
<END_TASK>
<USER_TASK:>
Description:
def alignment(job, ids, input_args, sample):
"""
Runs BWA and then Bamsort on the supplied fastqs for this sample
Input1: Toil Job instance
Input2: jobstore id dictionary
Input3: Input arguments dictionary
Input4: Sample tuple -- contains uuid and urls for the sample
""" |
uuid, urls = sample
# ids['bam'] = job.fileStore.getEmptyFileStoreID()
work_dir = job.fileStore.getLocalTempDir()
output_dir = input_args['output_dir']
key_path = input_args['ssec']
cores = multiprocessing.cpu_count()
# I/O
return_input_paths(job, work_dir, ids, 'ref.fa', 'ref.fa.amb', 'ref.fa.ann',
'ref.fa.bwt', 'ref.fa.pac', 'ref.fa.sa', 'ref.fa.fai')
# Get fastqs associated with this sample
for url in urls:
download_encrypted_file(work_dir, url, key_path, os.path.basename(url))
# Parameters for BWA and Bamsort
docker_cmd = ['docker', 'run', '--rm', '-v', '{}:/data'.format(work_dir)]
bwa_command = ["jvivian/bwa",
"mem",
"-R", "@RG\tID:{0}\tPL:Illumina\tSM:{0}\tLB:KapaHyper".format(uuid),
"-T", str(0),
"-t", str(cores),
"/data/ref.fa"] + [os.path.join('/data/', os.path.basename(x)) for x in urls]
bamsort_command = ["jeltje/biobambam",
"/usr/local/bin/bamsort",
"inputformat=sam",
"level=1",
"inputthreads={}".format(cores),
"outputthreads={}".format(cores),
"calmdnm=1",
"calmdnmrecompindetonly=1",
"calmdnmreference=/data/ref.fa",
"I=/data/{}".format(uuid + '.sam')]
# Piping the output to a file handle
with open(os.path.join(work_dir, uuid + '.sam'), 'w') as f_out:
subprocess.check_call(docker_cmd + bwa_command, stdout=f_out)
with open(os.path.join(work_dir, uuid + '.bam'), 'w') as f_out:
subprocess.check_call(docker_cmd + bamsort_command, stdout=f_out)
# Save in JobStore
# job.fileStore.updateGlobalFile(ids['bam'], os.path.join(work_dir, uuid + '.bam'))
ids['bam'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, uuid + '.bam'))
# Copy file to S3
if input_args['s3_dir']:
job.addChildJobFn(upload_bam_to_s3, ids, input_args, sample, cores=32, memory='20 G', disk='30 G')
# Move file in output_dir
if input_args['output_dir']:
move_to_output_dir(work_dir, output_dir, uuid=None, files=[uuid + '.bam']) |
<SYSTEM_TASK:>
Uploads output BAM from sample to S3
<END_TASK>
<USER_TASK:>
Description:
def upload_bam_to_s3(job, ids, input_args, sample):
"""
Uploads output BAM from sample to S3
Input1: Toil Job instance
Input2: jobstore id dictionary
Input3: Input arguments dictionary
Input4: Sample tuple -- contains uuid and urls for the sample
""" |
uuid, urls = sample
key_path = input_args['ssec']
work_dir = job.fileStore.getLocalTempDir()
# Parse s3_dir to get bucket and s3 path
s3_dir = input_args['s3_dir']
bucket_name = s3_dir.lstrip('/').split('/')[0]
bucket_dir = '/'.join(s3_dir.lstrip('/').split('/')[1:])
base_url = 'https://s3-us-west-2.amazonaws.com/'
url = os.path.join(base_url, bucket_name, bucket_dir, uuid + '.bam')
#I/O
job.fileStore.readGlobalFile(ids['bam'], os.path.join(work_dir, uuid + '.bam'))
# Generate keyfile for upload
with open(os.path.join(work_dir, uuid + '.key'), 'wb') as f_out:
f_out.write(generate_unique_key(key_path, url))
# Commands to upload to S3 via S3AM
s3am_command = ['s3am',
'upload',
'--sse-key-file', os.path.join(work_dir, uuid + '.key'),
'file://{}'.format(os.path.join(work_dir, uuid + '.bam')),
bucket_name,
os.path.join(bucket_dir, uuid + '.bam')]
subprocess.check_call(s3am_command) |
<SYSTEM_TASK:>
Tars a group of files together into a tarball
<END_TASK>
<USER_TASK:>
Description:
def tarball_files(work_dir, tar_name, uuid=None, files=None):
"""
Tars a group of files together into a tarball
work_dir: str Current Working Directory
tar_name: str Name of tarball
uuid: str UUID to stamp files with
files: str(s) List of filenames to place in the tarball from working directory
""" |
with tarfile.open(os.path.join(work_dir, tar_name), 'w:gz') as f_out:
for fname in files:
if uuid:
f_out.add(os.path.join(work_dir, fname), arcname=uuid + '.' + fname)
else:
f_out.add(os.path.join(work_dir, fname), arcname=fname) |
<SYSTEM_TASK:>
This function will administer 5 jobs at a time then recursively call itself until subset is empty
<END_TASK>
<USER_TASK:>
Description:
def start_batch(job, input_args):
"""
This function will administer 5 jobs at a time then recursively call itself until subset is empty
""" |
samples = parse_sra(input_args['sra'])
# for analysis_id in samples:
job.addChildJobFn(download_and_transfer_sample, input_args, samples, cores=1, disk='30') |
<SYSTEM_TASK:>
Downloads a sample from dbGaP via SRAToolKit, then uses S3AM to transfer it to S3
<END_TASK>
<USER_TASK:>
Description:
def download_and_transfer_sample(job, input_args, samples):
"""
Downloads a sample from dbGaP via SRAToolKit, then uses S3AM to transfer it to S3
input_args: dict Dictionary of input arguments
analysis_id: str An analysis ID for a sample in CGHub
""" |
if len(samples) > 1:
a = samples[len(samples)/2:]
b = samples[:len(samples)/2]
job.addChildJobFn(download_and_transfer_sample, input_args, a, disk='30G')
job.addChildJobFn(download_and_transfer_sample, input_args, b, disk='30G')
else:
analysis_id = samples[0]
work_dir = job.fileStore.getLocalTempDir()
sudo = input_args['sudo']
# Acquire dbgap_key
shutil.copy(input_args['dbgap_key'], os.path.join(work_dir, 'dbgap.ngc'))
# Call to fastq-dump to pull down SRA files and convert to fastq
if input_args['single_end']:
parameters = [analysis_id]
else:
parameters = ['--split-files', analysis_id]
docker_call(tool='quay.io/ucsc_cgl/fastq-dump:2.5.7--4577a6c1a3c94adaa0c25dd6c03518ee610433d1',
work_dir=work_dir, tool_parameters=parameters, sudo=sudo)
# Collect files and encapsulate into a tarball
shutil.rmtree(os.path.join(work_dir, 'sra'))
sample_name = analysis_id + '.tar.gz'
if input_args['single_end']:
r = [os.path.basename(x) for x in glob.glob(os.path.join(work_dir, '*.f*'))]
tarball_files(work_dir, tar_name=sample_name, files=r)
else:
r1 = [os.path.basename(x) for x in glob.glob(os.path.join(work_dir, '*_1*'))]
r2 = [os.path.basename(x) for x in glob.glob(os.path.join(work_dir, '*_2*'))]
tarball_files(work_dir, tar_name=sample_name, files=r1 + r2)
# Parse s3_dir to get bucket and s3 path
key_path = input_args['ssec']
s3_dir = input_args['s3_dir']
bucket_name = s3_dir.lstrip('/').split('/')[0]
base_url = 'https://s3-us-west-2.amazonaws.com/'
url = os.path.join(base_url, bucket_name, sample_name)
# Generate keyfile for upload
with open(os.path.join(work_dir, 'temp.key'), 'wb') as f_out:
f_out.write(generate_unique_key(key_path, url))
# Upload to S3 via S3AM
s3am_command = ['s3am',
'upload',
'--sse-key-file', os.path.join(work_dir, 'temp.key'),
'file://{}'.format(os.path.join(work_dir, sample_name)),
's3://' + bucket_name + '/']
subprocess.check_call(s3am_command) |
<SYSTEM_TASK:>
Uploads a file from the FileStore to an output directory on the local filesystem or S3.
<END_TASK>
<USER_TASK:>
Description:
def output_file_job(job, filename, file_id, output_dir, s3_key_path=None):
"""
Uploads a file from the FileStore to an output directory on the local filesystem or S3.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str filename: basename for file
:param str file_id: FileStoreID
:param str output_dir: Amazon S3 URL or local path
:param str s3_key_path: (OPTIONAL) Path to 32-byte key to be used for SSE-C encryption
:return:
""" |
job.fileStore.logToMaster('Writing {} to {}'.format(filename, output_dir))
work_dir = job.fileStore.getLocalTempDir()
filepath = job.fileStore.readGlobalFile(file_id, os.path.join(work_dir, filename))
if urlparse(output_dir).scheme == 's3':
s3am_upload(job=job, fpath=os.path.join(work_dir, filepath),
s3_dir=output_dir,
s3_key_path=s3_key_path)
elif os.path.exists(os.path.join(output_dir, filename)):
job.fileStore.logToMaster("File already exists: {}".format(filename))
else:
mkdir_p(output_dir)
copy_files([filepath], output_dir) |
<SYSTEM_TASK:>
Downloads encrypted files from S3 via header injection
<END_TASK>
<USER_TASK:>
Description:
def download_encrypted_file(job, input_args, name):
"""
Downloads encrypted files from S3 via header injection
input_args: dict Input dictionary defined in main()
name: str Symbolic name associated with file
""" |
work_dir = job.fileStore.getLocalTempDir()
key_path = input_args['ssec']
file_path = os.path.join(work_dir, name)
url = input_args[name]
with open(key_path, 'r') as f:
key = f.read()
if len(key) != 32:
raise RuntimeError('Invalid Key! Must be 32 bytes: {}'.format(key))
key = generate_unique_key(key_path, url)
encoded_key = base64.b64encode(key)
encoded_key_md5 = base64.b64encode(hashlib.md5(key).digest())
h1 = 'x-amz-server-side-encryption-customer-algorithm:AES256'
h2 = 'x-amz-server-side-encryption-customer-key:{}'.format(encoded_key)
h3 = 'x-amz-server-side-encryption-customer-key-md5:{}'.format(encoded_key_md5)
try:
subprocess.check_call(['curl', '-fs', '--retry', '5', '-H', h1, '-H', h2, '-H', h3, url, '-o', file_path])
except OSError:
raise RuntimeError('Failed to find "curl". Install via "apt-get install curl"')
assert os.path.exists(file_path)
return job.fileStore.writeGlobalFile(file_path) |
<SYSTEM_TASK:>
Simple curl request made for a given url
<END_TASK>
<USER_TASK:>
Description:
def download_from_url(job, url):
"""
Simple curl request made for a given url
url: str URL to download
""" |
work_dir = job.fileStore.getLocalTempDir()
file_path = os.path.join(work_dir, os.path.basename(url))
if not os.path.exists(file_path):
if url.startswith('s3:'):
download_from_s3_url(file_path, url)
else:
try:
subprocess.check_call(['curl', '-fs', '--retry', '5', '--create-dir', url, '-o', file_path])
except OSError:
raise RuntimeError('Failed to find "curl". Install via "apt-get install curl"')
assert os.path.exists(file_path)
return job.fileStore.writeGlobalFile(file_path) |
<SYSTEM_TASK:>
Makes subprocess call of a command to a docker container.
<END_TASK>
<USER_TASK:>
Description:
def docker_call(work_dir, tool_parameters, tool, java_opts=None, outfile=None, sudo=False):
"""
Makes subprocess call of a command to a docker container.
tool_parameters: list An array of the parameters to be passed to the tool
tool: str Name of the Docker image to be used (e.g. quay.io/ucsc_cgl/samtools)
java_opts: str Optional commands to pass to a java jar execution. (e.g. '-Xmx15G')
outfile: file Filehandle that stderr will be passed to
sudo: bool If the user wants the docker command executed as sudo
""" |
base_docker_call = 'docker run --log-driver=none --rm -v {}:/data'.format(work_dir).split()
if sudo:
base_docker_call = ['sudo'] + base_docker_call
if java_opts:
base_docker_call = base_docker_call + ['-e', 'JAVA_OPTS={}'.format(java_opts)]
try:
if outfile:
subprocess.check_call(base_docker_call + [tool] + tool_parameters, stdout=outfile)
else:
subprocess.check_call(base_docker_call + [tool] + tool_parameters)
except subprocess.CalledProcessError:
raise RuntimeError('docker command returned a non-zero exit status. Check error logs.')
except OSError:
raise RuntimeError('docker not found on system. Install on all nodes.') |
<SYSTEM_TASK:>
A list of files to move from work_dir to output_dir.
<END_TASK>
<USER_TASK:>
Description:
def copy_to_output_dir(work_dir, output_dir, uuid=None, files=list()):
"""
A list of files to move from work_dir to output_dir.
work_dir: str Current working directory
output_dir: str Output directory for files to go
uuid: str UUID to "stamp" onto output files
files: list List of files to iterate through
""" |
for fname in files:
if uuid is None:
shutil.copy(os.path.join(work_dir, fname), os.path.join(output_dir, fname))
else:
shutil.copy(os.path.join(work_dir, fname), os.path.join(output_dir, '{}.{}'.format(uuid, fname))) |
<SYSTEM_TASK:>
Checks that dependency programs are installed.
<END_TASK>
<USER_TASK:>
Description:
def program_checks(job, input_args):
"""
Checks that dependency programs are installed.
input_args: dict Dictionary of input arguments (from main())
""" |
# Program checks
for program in ['curl', 'docker', 'unzip', 'samtools']:
assert which(program), 'Program "{}" must be installed on every node.'.format(program)
job.addChildJobFn(download_shared_files, input_args) |
<SYSTEM_TASK:>
Downloads and stores shared inputs files in the FileStore
<END_TASK>
<USER_TASK:>
Description:
def download_shared_files(job, input_args):
"""
Downloads and stores shared inputs files in the FileStore
input_args: dict Dictionary of input arguments (from main())
""" |
shared_files = ['unc.bed', 'hg19.transcripts.fa', 'composite_exons.bed', 'normalize.pl', 'rsem_ref.zip',
'ebwt.zip', 'chromosomes.zip']
shared_ids = {}
for f in shared_files:
shared_ids[f] = job.addChildJobFn(download_from_url, input_args[f]).rv()
if input_args['config'] or input_args['config_fastq']:
job.addFollowOnJobFn(parse_config_file, shared_ids, input_args)
else:
sample_path = input_args['input']
uuid = os.path.splitext(os.path.basename(sample_path))[0]
sample = (uuid, sample_path)
job.addFollowOnJobFn(download_sample, shared_ids, input_args, sample) |
<SYSTEM_TASK:>
Launches pipeline for each sample.
<END_TASK>
<USER_TASK:>
Description:
def parse_config_file(job, ids, input_args):
"""
Launches pipeline for each sample.
shared_ids: dict Dictionary of fileStore IDs
input_args: dict Dictionary of input arguments
""" |
samples = []
config = input_args['config']
with open(config, 'r') as f:
for line in f.readlines():
if not line.isspace():
sample = line.strip().split(',')
samples.append(sample)
for sample in samples:
job.addChildJobFn(download_sample, ids, input_args, sample) |
<SYSTEM_TASK:>
Defines variables unique to a sample that are used in the rest of the pipelines
<END_TASK>
<USER_TASK:>
Description:
def download_sample(job, ids, input_args, sample):
"""
Defines variables unique to a sample that are used in the rest of the pipelines
ids: dict Dictionary of fileStore IDS
input_args: dict Dictionary of input arguments
sample: tuple Contains uuid and sample_url
""" |
if len(sample) == 2:
uuid, sample_location = sample
url1, url2 = None, None
else:
uuid, url1, url2 = sample
sample_location = None
# Update values unique to sample
sample_input = dict(input_args)
sample_input['uuid'] = uuid
sample_input['sample.tar'] = sample_location
if sample_input['output_dir']:
sample_input['output_dir'] = os.path.join(input_args['output_dir'], uuid)
sample_input['cpu_count'] = multiprocessing.cpu_count()
job_vars = (sample_input, ids)
# Download or locate local file and place in the jobStore
if sample_input['input']:
ids['sample.tar'] = job.fileStore.writeGlobalFile(os.path.abspath(sample_location))
elif sample_input['config_fastq']:
ids['R1.fastq'] = job.fileStore.writeGlobalFile(urlparse(url1).path)
ids['R2.fastq'] = job.fileStore.writeGlobalFile(urlparse(url2).path)
else:
if sample_input['ssec']:
ids['sample.tar'] = job.addChildJobFn(download_encrypted_file, sample_input, 'sample.tar', disk='25G').rv()
else:
ids['sample.tar'] = job.addChildJobFn(download_from_url, sample_input['sample.tar'], disk='25G').rv()
job.addFollowOnJobFn(static_dag_launchpoint, job_vars) |
<SYSTEM_TASK:>
Statically define jobs in the pipeline
<END_TASK>
<USER_TASK:>
Description:
def static_dag_launchpoint(job, job_vars):
"""
Statically define jobs in the pipeline
job_vars: tuple Tuple of dictionaries: input_args and ids
""" |
input_args, ids = job_vars
if input_args['config_fastq']:
cores = input_args['cpu_count']
a = job.wrapJobFn(mapsplice, job_vars, cores=cores, disk='130G').encapsulate()
else:
a = job.wrapJobFn(merge_fastqs, job_vars, disk='70 G').encapsulate()
b = job.wrapJobFn(consolidate_output, job_vars, a.rv())
# Take advantage of "encapsulate" to simplify pipeline wiring
job.addChild(a)
a.addChild(b) |
<SYSTEM_TASK:>
Unzips input sample and concats the Read1 and Read2 groups together.
<END_TASK>
<USER_TASK:>
Description:
def merge_fastqs(job, job_vars):
"""
Unzips input sample and concats the Read1 and Read2 groups together.
job_vars: tuple Tuple of dictionaries: input_args and ids
""" |
input_args, ids = job_vars
work_dir = job.fileStore.getLocalTempDir()
cores = input_args['cpu_count']
single_end_reads = input_args['single_end_reads']
# I/O
sample = return_input_paths(job, work_dir, ids, 'sample.tar')
# Untar File
# subprocess.check_call(['unzip', sample, '-d', work_dir])
subprocess.check_call(['tar', '-xvf', sample, '-C', work_dir])
# Remove large files before creating concat versions.
os.remove(os.path.join(work_dir, 'sample.tar'))
# Zcat files in parallel
if single_end_reads:
files = sorted(glob.glob(os.path.join(work_dir, '*')))
with open(os.path.join(work_dir, 'R1.fastq'), 'w') as f1:
subprocess.check_call(['zcat'] + files, stdout=f1)
# FileStore
ids['R1.fastq'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R1.fastq'))
else:
r1_files = sorted(glob.glob(os.path.join(work_dir, '*R1*')))
r2_files = sorted(glob.glob(os.path.join(work_dir, '*R2*')))
with open(os.path.join(work_dir, 'R1.fastq'), 'w') as f1:
p1 = subprocess.Popen(['zcat'] + r1_files, stdout=f1)
with open(os.path.join(work_dir, 'R2.fastq'), 'w') as f2:
p2 = subprocess.Popen(['zcat'] + r2_files, stdout=f2)
p1.wait()
p2.wait()
# FileStore
ids['R1.fastq'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R1.fastq'))
ids['R2.fastq'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R2.fastq'))
job.fileStore.deleteGlobalFile(ids['sample.tar'])
# Spawn child job
return job.addChildJobFn(mapsplice, job_vars, cores=cores, disk='130 G').rv() |
<SYSTEM_TASK:>
This function adds read groups to the headers
<END_TASK>
<USER_TASK:>
Description:
def add_read_groups(job, job_vars):
"""
This function adds read groups to the headers
job_vars: tuple Tuple of dictionaries: input_args and ids
""" |
input_args, ids = job_vars
work_dir = job.fileStore.getLocalTempDir()
sudo = input_args['sudo']
# I/O
alignments = return_input_paths(job, work_dir, ids, 'alignments.bam')
output = os.path.join(work_dir, 'rg_alignments.bam')
# Command and callg
parameter = ['AddOrReplaceReadGroups',
'INPUT={}'.format(docker_path(alignments)),
'OUTPUT={}'.format(docker_path(output)),
'RGSM={}'.format(input_args['uuid']),
'RGID={}'.format(input_args['uuid']),
'RGLB=TruSeq',
'RGPL=illumina',
'RGPU=barcode',
'VALIDATION_STRINGENCY=SILENT']
docker_call(tool='quay.io/ucsc_cgl/picardtools:1.95--dd5ac549b95eb3e5d166a5e310417ef13651994e',
tool_parameters=parameter, work_dir=work_dir, sudo=sudo)
# Write to FileStore
ids['rg_alignments.bam'] = job.fileStore.writeGlobalFile(output)
# Run child job
return job.addChildJobFn(bamsort_and_index, job_vars, disk='30 G').rv() |
<SYSTEM_TASK:>
Sorts bam file and produces index file
<END_TASK>
<USER_TASK:>
Description:
def bamsort_and_index(job, job_vars):
"""
Sorts bam file and produces index file
job_vars: tuple Tuple of dictionaries: input_args and ids
""" |
# Unpack variables
input_args, ids = job_vars
work_dir = job.fileStore.getLocalTempDir()
sudo = input_args['sudo']
# I/O
rg_alignments = return_input_paths(job, work_dir, ids, 'rg_alignments.bam')
output = os.path.join(work_dir, 'sorted.bam')
# Command -- second argument is "Output Prefix"
cmd1 = ['sort', docker_path(rg_alignments), docker_path('sorted')]
cmd2 = ['index', docker_path(output)]
docker_call(tool='quay.io/ucsc_cgl/samtools:0.1.19--dd5ac549b95eb3e5d166a5e310417ef13651994e',
tool_parameters=cmd1, work_dir=work_dir, sudo=sudo)
docker_call(tool='quay.io/ucsc_cgl/samtools:0.1.19--dd5ac549b95eb3e5d166a5e310417ef13651994e',
tool_parameters=cmd2, work_dir=work_dir, sudo=sudo)
# Write to FileStore
ids['sorted.bam'] = job.fileStore.writeGlobalFile(output)
ids['sorted.bam.bai'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'sorted.bam.bai'))
# Run child job
output_ids = job.addChildJobFn(sort_bam_by_reference, job_vars, disk='50 G').rv()
rseq_id = job.addChildJobFn(rseq_qc, job_vars, disk='20 G').rv()
return rseq_id, output_ids |
<SYSTEM_TASK:>
Sorts the bam by reference
<END_TASK>
<USER_TASK:>
Description:
def sort_bam_by_reference(job, job_vars):
"""
Sorts the bam by reference
job_vars: tuple Tuple of dictionaries: input_args and ids
""" |
# Unpack variables
input_args, ids = job_vars
work_dir = job.fileStore.getLocalTempDir()
# I/O
sorted_bam, sorted_bai = return_input_paths(job, work_dir, ids, 'sorted.bam', 'sorted.bam.bai')
output = os.path.join(work_dir, 'sort_by_ref.bam')
# Call: Samtools
ref_seqs = []
handle = subprocess.Popen(["samtools", "view", "-H", sorted_bam], stdout=subprocess.PIPE).stdout
for line in handle:
if line.startswith("@SQ"):
tmp = line.split("\t")
chrom = tmp[1].split(":")[1]
ref_seqs.append(chrom)
handle.close()
# Iterate through chromosomes to create mini-bams
for chrom in ref_seqs:
# job.addChildJobFn(sbbr_child, chrom, os.path.join(work_dir, chrom), sorted_bam)
cmd_view = ["samtools", "view", "-b", sorted_bam, chrom]
cmd_sort = ["samtools", "sort", "-m", "3000000000", "-n", "-", os.path.join(work_dir, chrom)]
p1 = subprocess.Popen(cmd_view, stdout=subprocess.PIPE)
subprocess.check_call(cmd_sort, stdin=p1.stdout)
sorted_files = [os.path.join(work_dir, chrom) + '.bam' for chrom in ref_seqs]
cmd = ["samtools", "cat", "-o", output] + sorted_files
subprocess.check_call(cmd)
# Write to FileStore
ids['sort_by_ref.bam'] = job.fileStore.writeGlobalFile(output)
rsem_id = job.addChildJobFn(transcriptome, job_vars, disk='30 G', memory='30 G').rv()
exon_id = job.addChildJobFn(exon_count, job_vars, disk='30 G').rv()
return exon_id, rsem_id |
<SYSTEM_TASK:>
Produces exon counts
<END_TASK>
<USER_TASK:>
Description:
def exon_count(job, job_vars):
"""
Produces exon counts
job_vars: tuple Tuple of dictionaries: input_args and ids
""" |
input_args, ids = job_vars
work_dir = job.fileStore.getLocalTempDir()
uuid = input_args['uuid']
sudo = input_args['sudo']
# I/O
sort_by_ref, normalize_pl, composite_bed = return_input_paths(job, work_dir, ids, 'sort_by_ref.bam',
'normalize.pl', 'composite_exons.bed')
# Command
tool = 'jvivian/bedtools'
cmd_1 = ['coverage',
'-split',
'-abam', docker_path(sort_by_ref),
'-b', docker_path(composite_bed)]
cmd_2 = ['perl',
os.path.join(work_dir, 'normalize.pl'),
sort_by_ref,
composite_bed]
popen_docker = ['docker', 'run', '-v', '{}:/data'.format(work_dir), tool]
if sudo:
popen_docker = ['sudo'] + popen_docker
p = subprocess.Popen(popen_docker + cmd_1, stdout=subprocess.PIPE)
with open(os.path.join(work_dir, 'exon_quant'), 'w') as f:
subprocess.check_call(cmd_2, stdin=p.stdout, stdout=f)
p1 = subprocess.Popen(['cat', os.path.join(work_dir, 'exon_quant')], stdout=subprocess.PIPE)
p2 = subprocess.Popen(['tr', '":"', '"\t"'], stdin=p1.stdout, stdout=subprocess.PIPE)
p3 = subprocess.Popen(['tr', '"-"', '"\t"'], stdin=p2.stdout, stdout=subprocess.PIPE)
with open(os.path.join(work_dir, 'exon_quant.bed'), 'w') as f:
subprocess.check_call(['cut', '-f1-4'], stdin=p3.stdout, stdout=f)
# Create zip, upload to fileStore, and move to output_dir as a backup
output_files = ['exon_quant.bed', 'exon_quant']
tarball_files(work_dir, tar_name='exon.tar.gz', uuid=uuid, files=output_files)
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'exon.tar.gz')) |
<SYSTEM_TASK:>
Creates a bam of just the transcriptome
<END_TASK>
<USER_TASK:>
Description:
def transcriptome(job, job_vars):
"""
Creates a bam of just the transcriptome
job_vars: tuple Tuple of dictionaries: input_args and ids
""" |
input_args, ids = job_vars
work_dir = job.fileStore.getLocalTempDir()
sudo = input_args['sudo']
# I/O
sort_by_ref, bed, hg19_fa = return_input_paths(job, work_dir, ids, 'sort_by_ref.bam',
'unc.bed', 'hg19.transcripts.fa')
output = os.path.join(work_dir, 'transcriptome.bam')
# Command
parameters = ['sam-xlate',
'--bed', docker_path(bed),
'--in', docker_path(sort_by_ref),
'--order', docker_path(hg19_fa),
'--out', docker_path(output),
'--xgtag',
'--reverse']
docker_call(tool='quay.io/ucsc_cgl/ubu:1.2--02806964cdf74bf5c39411b236b4c4e36d026843',
tool_parameters=parameters, work_dir=work_dir, java_opts='-Xmx30g', sudo=sudo)
# Write to FileStore
ids['transcriptome.bam'] = job.fileStore.writeGlobalFile(output)
# Run child job
return job.addChildJobFn(filter_bam, job_vars, memory='30G', disk='30G').rv() |
<SYSTEM_TASK:>
Performs filtering on the transcriptome bam
<END_TASK>
<USER_TASK:>
Description:
def filter_bam(job, job_vars):
"""
Performs filtering on the transcriptome bam
job_vars: tuple Tuple of dictionaries: input_args and ids
""" |
input_args, ids = job_vars
work_dir = job.fileStore.getLocalTempDir()
cores = input_args['cpu_count']
sudo = input_args['sudo']
# I/O
transcriptome_bam = return_input_paths(job, work_dir, ids, 'transcriptome.bam')
output = os.path.join(work_dir, 'filtered.bam')
# Command
parameters = ['sam-filter',
'--strip-indels',
'--max-insert', '1000',
'--mapq', '1',
'--in', docker_path(transcriptome_bam),
'--out', docker_path(output)]
docker_call(tool='quay.io/ucsc_cgl/ubu:1.2--02806964cdf74bf5c39411b236b4c4e36d026843',
tool_parameters=parameters, work_dir=os.path.dirname(output), java_opts='-Xmx30g', sudo=sudo)
# Write to FileStore
ids['filtered.bam'] = job.fileStore.writeGlobalFile(output)
# Run child job
return job.addChildJobFn(rsem, job_vars, cores=cores, disk='30 G').rv() |
<SYSTEM_TASK:>
Runs RSEM to produce counts
<END_TASK>
<USER_TASK:>
Description:
def rsem(job, job_vars):
"""
Runs RSEM to produce counts
job_vars: tuple Tuple of dictionaries: input_args and ids
""" |
input_args, ids = job_vars
work_dir = job.fileStore.getLocalTempDir()
cpus = input_args['cpu_count']
sudo = input_args['sudo']
single_end_reads = input_args['single_end_reads']
# I/O
filtered_bam, rsem_ref = return_input_paths(job, work_dir, ids, 'filtered.bam', 'rsem_ref.zip')
subprocess.check_call(['unzip', '-o', os.path.join(work_dir, 'rsem_ref.zip'), '-d', work_dir])
output_prefix = 'rsem'
# Make tool call to Docker
parameters = ['--quiet',
'--no-qualities',
'-p', str(cpus),
'--forward-prob', '0.5',
'--seed-length', '25',
'--fragment-length-mean', '-1.0',
'--bam', docker_path(filtered_bam)]
if not single_end_reads:
parameters.extend(['--paired-end'])
parameters.extend(['/data/rsem_ref/hg19_M_rCRS_ref', output_prefix])
docker_call(tool='quay.io/ucsc_cgl/rsem:1.2.25--4e8d1b31d4028f464b3409c6558fb9dfcad73f88',
tool_parameters=parameters, work_dir=work_dir, sudo=sudo)
os.rename(os.path.join(work_dir, output_prefix + '.genes.results'), os.path.join(work_dir, 'rsem_gene.tab'))
os.rename(os.path.join(work_dir, output_prefix + '.isoforms.results'), os.path.join(work_dir, 'rsem_isoform.tab'))
# Write to FileStore
ids['rsem_gene.tab'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'rsem_gene.tab'))
ids['rsem_isoform.tab'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'rsem_isoform.tab'))
# Run child jobs
return job.addChildJobFn(rsem_postprocess, job_vars).rv() |
<SYSTEM_TASK:>
Combine the contents of separate zipped outputs into one via streaming
<END_TASK>
<USER_TASK:>
Description:
def consolidate_output(job, job_vars, output_ids):
"""
Combine the contents of separate zipped outputs into one via streaming
job_vars: tuple Tuple of dictionaries: input_args and ids
output_ids: tuple Nested tuple of all the output fileStore IDs
""" |
input_args, ids = job_vars
work_dir = job.fileStore.getLocalTempDir()
uuid = input_args['uuid']
# Retrieve IDs
rseq_id, exon_id, rsem_id = flatten(output_ids)
# Retrieve output file paths to consolidate
# map_tar = job.fileStore.readGlobalFile(map_id, os.path.join(work_dir, 'map.tar.gz'))
qc_tar = job.fileStore.readGlobalFile(rseq_id, os.path.join(work_dir, 'qc.tar.gz'))
exon_tar = job.fileStore.readGlobalFile(exon_id, os.path.join(work_dir, 'exon.tar.gz'))
rsem_tar = job.fileStore.readGlobalFile(rsem_id, os.path.join(work_dir, 'rsem.tar.gz'))
# I/O
out_tar = os.path.join(work_dir, uuid + '.tar.gz')
# Consolidate separate tarballs
with tarfile.open(os.path.join(work_dir, out_tar), 'w:gz') as f_out:
for tar in [rsem_tar, exon_tar, qc_tar]:
with tarfile.open(tar, 'r') as f_in:
for tarinfo in f_in:
with closing(f_in.extractfile(tarinfo)) as f_in_file:
if tar == qc_tar:
tarinfo.name = os.path.join(uuid, 'rseq_qc', os.path.basename(tarinfo.name))
else:
tarinfo.name = os.path.join(uuid, os.path.basename(tarinfo.name))
f_out.addfile(tarinfo, fileobj=f_in_file)
# Move to output directory of selected
if input_args['output_dir']:
output_dir = input_args['output_dir']
mkdir_p(output_dir)
copy_to_output_dir(work_dir, output_dir, uuid=None, files=[uuid + '.tar.gz'])
# Write output file to fileStore
ids['uuid.tar.gz'] = job.fileStore.writeGlobalFile(out_tar)
# If S3 bucket argument specified, upload to S3
if input_args['s3_dir']:
job.addChildJobFn(upload_output_to_s3, job_vars) |
<SYSTEM_TASK:>
This is a Toil pipeline for the UNC best practice RNA-Seq analysis.
<END_TASK>
<USER_TASK:>
Description:
def main():
"""
This is a Toil pipeline for the UNC best practice RNA-Seq analysis.
RNA-seq fastqs are combined, aligned, sorted, filtered, and quantified.
Please read the README.md located in the same directory.
""" |
# Define Parser object and add to toil
parser = build_parser()
Job.Runner.addToilOptions(parser)
args = parser.parse_args()
# Store inputs from argparse
inputs = {'config': args.config,
'config_fastq': args.config_fastq,
'input': args.input,
'unc.bed': args.unc,
'hg19.transcripts.fa': args.fasta,
'composite_exons.bed': args.composite_exons,
'normalize.pl': args.normalize,
'output_dir': args.output_dir,
'rsem_ref.zip': args.rsem_ref,
'chromosomes.zip': args.chromosomes,
'ebwt.zip': args.ebwt,
'ssec': args.ssec,
's3_dir': args.s3_dir,
'sudo': args.sudo,
'single_end_reads': args.single_end_reads,
'upload_bam_to_s3': args.upload_bam_to_s3,
'uuid': None,
'sample.tar': None,
'cpu_count': None}
# Launch jobs
Job.Runner.startToil(Job.wrapJobFn(download_shared_files, inputs), args) |
<SYSTEM_TASK:>
Remove the given file from hdfs with master at the given IP address
<END_TASK>
<USER_TASK:>
Description:
def remove_file(master_ip, filename, spark_on_toil):
"""
Remove the given file from hdfs with master at the given IP address
:type masterIP: MasterAddress
""" |
master_ip = master_ip.actual
ssh_call = ['ssh', '-o', 'StrictHostKeyChecking=no', master_ip]
if spark_on_toil:
output = check_output(ssh_call + ['docker', 'ps'])
container_id = next(line.split()[0] for line in output.splitlines() if 'apache-hadoop-master' in line)
ssh_call += ['docker', 'exec', container_id]
try:
check_call(ssh_call + ['hdfs', 'dfs', '-rm', '-r', '/' + filename])
except:
pass |
<SYSTEM_TASK:>
Downloads input data files from S3.
<END_TASK>
<USER_TASK:>
Description:
def download_data(job, master_ip, inputs, known_snps, bam, hdfs_snps, hdfs_bam):
"""
Downloads input data files from S3.
:type masterIP: MasterAddress
""" |
log.info("Downloading known sites file %s to %s.", known_snps, hdfs_snps)
call_conductor(job, master_ip, known_snps, hdfs_snps, memory=inputs.memory)
log.info("Downloading input BAM %s to %s.", bam, hdfs_bam)
call_conductor(job, master_ip, bam, hdfs_bam, memory=inputs.memory) |
<SYSTEM_TASK:>
A Toil job function performing ADAM preprocessing on a single sample
<END_TASK>
<USER_TASK:>
Description:
def static_adam_preprocessing_dag(job, inputs, sample, output_dir, suffix=''):
"""
A Toil job function performing ADAM preprocessing on a single sample
""" |
inputs.sample = sample
inputs.output_dir = output_dir
inputs.suffix = suffix
if inputs.master_ip is not None or inputs.run_local:
if not inputs.run_local and inputs.master_ip == 'auto':
# Static, standalone Spark cluster managed by uberscript
spark_on_toil = False
scale_up = job.wrapJobFn(scale_external_spark_cluster, 1)
job.addChild(scale_up)
spark_work = job.wrapJobFn(download_run_and_upload,
inputs.master_ip, inputs, spark_on_toil)
scale_up.addChild(spark_work)
scale_down = job.wrapJobFn(scale_external_spark_cluster, -1)
spark_work.addChild(scale_down)
else:
# Static, external Spark cluster
spark_on_toil = False
spark_work = job.wrapJobFn(download_run_and_upload,
inputs.master_ip, inputs, spark_on_toil)
job.addChild(spark_work)
else:
# Dynamic subclusters, i.e. Spark-on-Toil
spark_on_toil = True
cores = multiprocessing.cpu_count()
master_ip = spawn_spark_cluster(job,
False, # Sudo
inputs.num_nodes-1,
cores=cores,
memory=inputs.memory)
spark_work = job.wrapJobFn(download_run_and_upload,
master_ip, inputs, spark_on_toil)
job.addChild(spark_work) |
<SYSTEM_TASK:>
Runs GATK Hard Filtering on a Genomic VCF file and uploads the results.
<END_TASK>
<USER_TASK:>
Description:
def hard_filter_pipeline(job, uuid, vcf_id, config):
"""
Runs GATK Hard Filtering on a Genomic VCF file and uploads the results.
0: Start 0 --> 1 --> 3 --> 5 --> 6
1: Select SNPs | |
2: Select INDELs +-> 2 --> 4 +
3: Apply SNP Filter
4: Apply INDEL Filter
5: Merge SNP and INDEL VCFs
6: Write filtered VCF to output directory
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str uuid: Unique sample identifier
:param str vcf_id: VCF FileStoreID
:param Namespace config: Pipeline configuration options and shared files
Requires the following config attributes:
config.genome_fasta FilesStoreID for reference genome fasta file
config.genome_fai FilesStoreID for reference genome fasta index file
config.genome_dict FilesStoreID for reference genome sequence dictionary file
config.snp_filter_name Name of SNP filter for VCF header
config.snp_filter_expression SNP JEXL filter expression
config.indel_filter_name Name of INDEL filter for VCF header
config.indel_filter_expression INDEL JEXL filter expression
config.xmx Java heap size in bytes
config.suffix Suffix added to output filename
config.output_dir URL or local path to output directory
config.ssec Path to key file for SSE-C encryption
:return: SNP and INDEL FileStoreIDs
:rtype: tuple
""" |
job.fileStore.logToMaster('Running Hard Filter on {}'.format(uuid))
# Get the total size of the genome reference
genome_ref_size = config.genome_fasta.size + config.genome_fai.size + config.genome_dict.size
# The SelectVariants disk requirement depends on the input VCF, the genome reference files,
# and the output VCF. The output VCF is smaller than the input VCF. The disk requirement
# is identical for SNPs and INDELs.
select_variants_disk = PromisedRequirement(lambda vcf, ref_size: 2 * vcf.size + ref_size,
vcf_id,
genome_ref_size)
select_snps = job.wrapJobFn(gatk_select_variants,
'SNP',
vcf_id,
config.genome_fasta,
config.genome_fai,
config.genome_dict,
memory=config.xmx,
disk=select_variants_disk)
# The VariantFiltration disk requirement depends on the input VCF, the genome reference files,
# and the output VCF. The filtered VCF is smaller than the input VCF.
snp_filter_disk = PromisedRequirement(lambda vcf, ref_size: 2 * vcf.size + ref_size,
select_snps.rv(),
genome_ref_size)
snp_filter = job.wrapJobFn(gatk_variant_filtration,
select_snps.rv(),
config.snp_filter_name,
config.snp_filter_expression,
config.genome_fasta,
config.genome_fai,
config.genome_dict,
memory=config.xmx,
disk=snp_filter_disk)
select_indels = job.wrapJobFn(gatk_select_variants,
'INDEL',
vcf_id,
config.genome_fasta,
config.genome_fai,
config.genome_dict,
memory=config.xmx,
disk=select_variants_disk)
indel_filter_disk = PromisedRequirement(lambda vcf, ref_size: 2 * vcf.size + ref_size,
select_indels.rv(),
genome_ref_size)
indel_filter = job.wrapJobFn(gatk_variant_filtration,
select_indels.rv(),
config.indel_filter_name,
config.indel_filter_expression,
config.genome_fasta,
config.genome_fai,
config.genome_dict,
memory=config.xmx,
disk=indel_filter_disk)
# The CombineVariants disk requirement depends on the SNP and INDEL input VCFs and the
# genome reference files. The combined VCF is approximately the same size as the input files.
combine_vcfs_disk = PromisedRequirement(lambda vcf1, vcf2, ref_size:
2 * (vcf1.size + vcf2.size) + ref_size,
indel_filter.rv(),
snp_filter.rv(),
genome_ref_size)
combine_vcfs = job.wrapJobFn(gatk_combine_variants,
{'SNPs': snp_filter.rv(), 'INDELs': indel_filter.rv()},
config.genome_fasta,
config.genome_fai,
config.genome_dict,
merge_option='UNSORTED', # Merges variants from a single sample
memory=config.xmx,
disk=combine_vcfs_disk)
job.addChild(select_snps)
job.addChild(select_indels)
select_snps.addChild(snp_filter)
snp_filter.addChild(combine_vcfs)
select_indels.addChild(indel_filter)
indel_filter.addChild(combine_vcfs)
# Output the hard filtered VCF
output_dir = os.path.join(config.output_dir, uuid)
output_filename = '%s.hard_filter%s.vcf' % (uuid, config.suffix)
output_vcf = job.wrapJobFn(output_file_job,
output_filename,
combine_vcfs.rv(),
output_dir,
s3_key_path=config.ssec,
disk=PromisedRequirement(lambda x: x.size, combine_vcfs.rv()))
combine_vcfs.addChild(output_vcf)
return combine_vcfs.rv() |
<SYSTEM_TASK:>
Downloads a sample from CGHub via GeneTorrent, then uses S3AM to transfer it to S3
<END_TASK>
<USER_TASK:>
Description:
def download_and_transfer_sample(job, sample, inputs):
"""
Downloads a sample from CGHub via GeneTorrent, then uses S3AM to transfer it to S3
input_args: dict Dictionary of input arguments
analysis_id: str An analysis ID for a sample in CGHub
""" |
analysis_id = sample[0]
work_dir = job.fileStore.getLocalTempDir()
folder_path = os.path.join(work_dir, os.path.basename(analysis_id))
# Acquire genetorrent key and download sample
shutil.copy(inputs['genetorrent_key'], os.path.join(work_dir, 'cghub.key'))
parameters = ['-vv', '-c', 'cghub.key', '-d', analysis_id]
docker_call(job=job, tool='quay.io/ucsc_cgl/genetorrent:3.8.7--9911761265b6f08bc3ef09f53af05f56848d805b',
work_dir=work_dir, parameters=parameters)
try:
sample = glob.glob(os.path.join(folder_path, '*tar*'))[0]
except KeyError as e:
print 'No tarfile found inside of folder: '.format(e)
raise
# Upload sample to S3AM
key_path = inputs['ssec']
if sample.endswith('gz'):
sample_name = analysis_id + '.tar.gz'
shutil.move(sample, os.path.join(work_dir, sample_name))
else:
sample_name = analysis_id + '.tar'
shutil.move(sample, os.path.join(work_dir, sample_name))
# Parse s3_dir to get bucket and s3 path
s3_dir = inputs['s3_dir']
bucket_name = s3_dir.lstrip('/').split('/')[0]
base_url = 'https://s3-us-west-2.amazonaws.com/'
url = os.path.join(base_url, bucket_name, sample_name)
# Generate keyfile for upload
with open(os.path.join(work_dir, 'temp.key'), 'wb') as f_out:
f_out.write(generate_unique_key(key_path, url))
# Upload to S3 via S3AM
s3am_command = ['s3am',
'upload',
'--sse-key-file', os.path.join(work_dir, 'temp.key'),
'file://{}'.format(os.path.join(work_dir, sample_name)),
's3://' + bucket_name + '/']
subprocess.check_call(s3am_command) |
<SYSTEM_TASK:>
This is a Toil pipeline to transfer TCGA data into an S3 Bucket
<END_TASK>
<USER_TASK:>
Description:
def main():
"""
This is a Toil pipeline to transfer TCGA data into an S3 Bucket
Data is pulled down with Genetorrent and transferred to S3 via S3AM.
""" |
# Define Parser object and add to toil
parser = build_parser()
Job.Runner.addToilOptions(parser)
args = parser.parse_args()
# Store inputs from argparse
inputs = {'genetorrent': args.genetorrent,
'genetorrent_key': args.genetorrent_key,
'ssec': args.ssec,
's3_dir': args.s3_dir}
# Sanity checks
if args.ssec:
assert os.path.isfile(args.ssec)
if args.genetorrent:
assert os.path.isfile(args.genetorrent)
if args.genetorrent_key:
assert os.path.isfile(args.genetorrent_key)
samples = parse_genetorrent(args.genetorrent)
# Start pipeline
# map_job accepts a function, an iterable, and *args. The function is launched as a child
# process with one element from the iterable and *args, which in turn spawns a tree of child jobs.
Job.Runner.startToil(Job.wrapJobFn(map_job, download_and_transfer_sample, samples, inputs), args) |
<SYSTEM_TASK:>
Convert a hexidecimal IPv6 address to a network byte order 128-bit
<END_TASK>
<USER_TASK:>
Description:
def ip2long(ip):
"""Convert a hexidecimal IPv6 address to a network byte order 128-bit
integer.
>>> ip2long('::') == 0
True
>>> ip2long('::1') == 1
True
>>> expect = 0x20010db885a3000000008a2e03707334
>>> ip2long('2001:db8:85a3::8a2e:370:7334') == expect
True
>>> ip2long('2001:db8:85a3:0:0:8a2e:370:7334') == expect
True
>>> ip2long('2001:0db8:85a3:0000:0000:8a2e:0370:7334') == expect
True
>>> expect = 0x20010db8000000000001000000000001
>>> ip2long('2001:db8::1:0:0:1') == expect
True
>>> expect = 281473902969472
>>> ip2long('::ffff:192.0.2.128') == expect
True
>>> expect = 0xffffffffffffffffffffffffffffffff
>>> ip2long('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff') == expect
True
>>> ip2long('ff::ff::ff') == None
True
>>> expect = 21932261930451111902915077091070067066
>>> ip2long('1080:0:0:0:8:800:200C:417A') == expect
True
:param ip: Hexidecimal IPv6 address
:type ip: str
:returns: Network byte order 128-bit integer or ``None`` if ip is invalid.
""" |
if not validate_ip(ip):
return None
if '.' in ip:
# convert IPv4 suffix to hex
chunks = ip.split(':')
v4_int = ipv4.ip2long(chunks.pop())
if v4_int is None:
return None
chunks.append('%x' % ((v4_int >> 16) & 0xffff))
chunks.append('%x' % (v4_int & 0xffff))
ip = ':'.join(chunks)
halves = ip.split('::')
hextets = halves[0].split(':')
if len(halves) == 2:
h2 = halves[1].split(':')
for z in range(8 - (len(hextets) + len(h2))):
hextets.append('0')
for h in h2:
hextets.append(h)
# end if
lngip = 0
for h in hextets:
if '' == h:
h = '0'
lngip = (lngip << 16) | int(h, 16)
return lngip |
<SYSTEM_TASK:>
Convert a network byte order 128-bit integer to a canonical IPv6
<END_TASK>
<USER_TASK:>
Description:
def long2ip(l, rfc1924=False):
"""Convert a network byte order 128-bit integer to a canonical IPv6
address.
>>> long2ip(2130706433)
'::7f00:1'
>>> long2ip(42540766411282592856904266426630537217)
'2001:db8::1:0:0:1'
>>> long2ip(MIN_IP)
'::'
>>> long2ip(MAX_IP)
'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff'
>>> long2ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) for >>: 'NoneType' and 'int'
>>> long2ip(-1) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and <really big int> inclusive
>>> long2ip(MAX_IP + 1) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and <really big int> inclusive
>>> long2ip(ip2long('1080::8:800:200C:417A'), rfc1924=True)
'4)+k&C#VzJ4br>0wv%Yp'
>>> long2ip(ip2long('::'), rfc1924=True)
'00000000000000000000'
:param l: Network byte order 128-bit integer.
:type l: int
:param rfc1924: Encode in RFC 1924 notation (base 85)
:type rfc1924: bool
:returns: Canonical IPv6 address (eg. '::1').
:raises: TypeError
""" |
if MAX_IP < l or l < MIN_IP:
raise TypeError(
"expected int between %d and %d inclusive" % (MIN_IP, MAX_IP))
if rfc1924:
return long2rfc1924(l)
# format as one big hex value
hex_str = '%032x' % l
# split into double octet chunks without padding zeros
hextets = ['%x' % int(hex_str[x:x + 4], 16) for x in range(0, 32, 4)]
# find and remove left most longest run of zeros
dc_start, dc_len = (-1, 0)
run_start, run_len = (-1, 0)
for idx, hextet in enumerate(hextets):
if '0' == hextet:
run_len += 1
if -1 == run_start:
run_start = idx
if run_len > dc_len:
dc_len, dc_start = (run_len, run_start)
else:
run_len, run_start = (0, -1)
# end for
if dc_len > 1:
dc_end = dc_start + dc_len
if dc_end == len(hextets):
hextets += ['']
hextets[dc_start:dc_end] = ['']
if dc_start == 0:
hextets = [''] + hextets
# end if
return ':'.join(hextets) |
<SYSTEM_TASK:>
Convert an RFC 1924 IPv6 address to a network byte order 128-bit
<END_TASK>
<USER_TASK:>
Description:
def rfc19242long(s):
"""Convert an RFC 1924 IPv6 address to a network byte order 128-bit
integer.
>>> expect = 0
>>> rfc19242long('00000000000000000000') == expect
True
>>> expect = 21932261930451111902915077091070067066
>>> rfc19242long('4)+k&C#VzJ4br>0wv%Yp') == expect
True
>>> rfc19242long('pizza') == None
True
>>> rfc19242long('~~~~~~~~~~~~~~~~~~~~') == None
True
>>> rfc19242long('=r54lj&NUUO~Hi%c2ym0') == MAX_IP
True
:param ip: RFC 1924 IPv6 address
:type ip: str
:returns: Network byte order 128-bit integer or ``None`` if ip is invalid.
""" |
global _RFC1924_REV
if not _RFC1924_RE.match(s):
return None
if _RFC1924_REV is None:
_RFC1924_REV = {v: k for k, v in enumerate(_RFC1924_ALPHABET)}
x = 0
for c in s:
x = x * 85 + _RFC1924_REV[c]
if x > MAX_IP:
return None
return x |
<SYSTEM_TASK:>
Validate a CIDR notation ip address.
<END_TASK>
<USER_TASK:>
Description:
def validate_cidr(s):
"""Validate a CIDR notation ip address.
The string is considered a valid CIDR address if it consists of a valid
IPv6 address in hextet format followed by a forward slash (/) and a bit
mask length (0-128).
>>> validate_cidr('::/128')
True
>>> validate_cidr('::/0')
True
>>> validate_cidr('fc00::/7')
True
>>> validate_cidr('::ffff:0:0/96')
True
>>> validate_cidr('::')
False
>>> validate_cidr('::/129')
False
>>> validate_cidr(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected string or buffer
:param s: String to validate as a CIDR notation ip address.
:type s: str
:returns: ``True`` if a valid CIDR address, ``False`` otherwise.
:raises: TypeError
""" |
if _CIDR_RE.match(s):
ip, mask = s.split('/')
if validate_ip(ip):
if int(mask) > 128:
return False
else:
return False
return True
return False |
<SYSTEM_TASK:>
Download the input sample
<END_TASK>
<USER_TASK:>
Description:
def download_sample(job, sample, inputs):
"""
Download the input sample
:param JobFunctionWrappingJob job: passed by Toil automatically
:param tuple sample: Tuple containing (UUID,URL) of a sample
:param Namespace inputs: Stores input arguments (see main)
""" |
uuid, url = sample
job.fileStore.logToMaster('Downloading sample: {}'.format(uuid))
# Download sample
tar_id = job.addChildJobFn(download_url_job, url, s3_key_path=inputs.ssec, disk='30G').rv()
# Create copy of inputs for each sample
sample_inputs = argparse.Namespace(**vars(inputs))
sample_inputs.uuid = uuid
sample_inputs.cores = multiprocessing.cpu_count()
# Call children and follow-on jobs
job.addFollowOnJobFn(process_sample, sample_inputs, tar_id, cores=2, disk='60G') |
<SYSTEM_TASK:>
Filters out adapters that may be left in the RNA-seq files
<END_TASK>
<USER_TASK:>
Description:
def cutadapt(job, inputs, r1_id, r2_id):
"""
Filters out adapters that may be left in the RNA-seq files
:param JobFunctionWrappingJob job: passed by Toil automatically
:param Namespace inputs: Stores input arguments (see main)
:param str r1_id: FileStore ID of read 1 fastq
:param str r2_id: FileStore ID of read 2 fastq
""" |
job.fileStore.logToMaster('Running CutAdapt: {}'.format(inputs.uuid))
work_dir = job.fileStore.getLocalTempDir()
inputs.improper_pair = None
# Retrieve files
job.fileStore.readGlobalFile(r1_id, os.path.join(work_dir, 'R1.fastq'))
job.fileStore.readGlobalFile(r2_id, os.path.join(work_dir, 'R2.fastq'))
# Cutadapt parameters
parameters = ['-a', inputs.fwd_3pr_adapter,
'-m', '35',
'-A', inputs.rev_3pr_adapter,
'-o', '/data/R1_cutadapt.fastq',
'-p', '/data/R2_cutadapt.fastq',
'/data/R1.fastq', '/data/R2.fastq']
# Call: CutAdapt
base_docker_call = 'docker run --log-driver=none --rm -v {}:/data'.format(work_dir).split()
if inputs.sudo:
base_docker_call = ['sudo'] + base_docker_call
tool = 'quay.io/ucsc_cgl/cutadapt:1.9--6bd44edd2b8f8f17e25c5a268fedaab65fa851d2'
p = subprocess.Popen(base_docker_call + [tool] + parameters, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
if 'improperly paired' in stderr:
inputs.improper_pair = True
shutil.move(os.path.join(work_dir, 'R1.fastq'), os.path.join(work_dir, 'R1_cutadapt.fastq'))
shutil.move(os.path.join(work_dir, 'R2.fastq'), os.path.join(work_dir, 'R2_cutadapt.fastq'))
# Write to fileStore
if inputs.improper_pair:
r1_cutadapt = r1_id
r2_cutadapt = r2_id
else:
r1_cutadapt = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R1_cutadapt.fastq'))
r2_cutadapt = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R2_cutadapt.fastq'))
job.fileStore.deleteGlobalFile(r1_id)
job.fileStore.deleteGlobalFile(r2_id)
# start STAR
cores = min(inputs.cores, 16)
job.addChildJobFn(star, inputs, r1_cutadapt, r2_cutadapt, cores=cores, disk='100G', memory='40G').rv() |
<SYSTEM_TASK:>
Performs alignment of fastqs to BAM via STAR
<END_TASK>
<USER_TASK:>
Description:
def star(job, inputs, r1_cutadapt, r2_cutadapt):
"""
Performs alignment of fastqs to BAM via STAR
:param JobFunctionWrappingJob job: passed by Toil automatically
:param Namespace inputs: Stores input arguments (see main)
:param str r1_cutadapt: FileStore ID of read 1 fastq
:param str r2_cutadapt: FileStore ID of read 2 fastq
""" |
job.fileStore.logToMaster('Aligning with STAR: {}'.format(inputs.uuid))
work_dir = job.fileStore.getLocalTempDir()
cores = min(inputs.cores, 16)
# Retrieve files
job.fileStore.readGlobalFile(r1_cutadapt, os.path.join(work_dir, 'R1_cutadapt.fastq'))
job.fileStore.readGlobalFile(r2_cutadapt, os.path.join(work_dir, 'R2_cutadapt.fastq'))
# Get starIndex
download_url(job=job, url=inputs.star_index, work_dir=work_dir, name='starIndex.tar.gz')
subprocess.check_call(['tar', '-xvf', os.path.join(work_dir, 'starIndex.tar.gz'), '-C', work_dir])
# Parameters
parameters = ['--runThreadN', str(cores),
'--genomeDir', '/data/starIndex',
'--outFileNamePrefix', 'rna',
'--outSAMtype', 'BAM', 'SortedByCoordinate',
'--outSAMunmapped', 'Within',
'--quantMode', 'TranscriptomeSAM',
'--outSAMattributes', 'NH', 'HI', 'AS', 'NM', 'MD',
'--outFilterType', 'BySJout',
'--outFilterMultimapNmax', '20',
'--outFilterMismatchNmax', '999',
'--outFilterMismatchNoverReadLmax', '0.04',
'--alignIntronMin', '20',
'--alignIntronMax', '1000000',
'--alignMatesGapMax', '1000000',
'--alignSJoverhangMin', '8',
'--alignSJDBoverhangMin', '1',
'--sjdbScore', '1',
'--readFilesIn', '/data/R1_cutadapt.fastq', '/data/R2_cutadapt.fastq']
# Call: STAR Map
docker_call(job=job, tool='quay.io/ucsc_cgl/star:2.4.2a--bcbd5122b69ff6ac4ef61958e47bde94001cfe80',
work_dir=work_dir, parameters=parameters)
# Call Samtools Index
index_command = ['index', '/data/rnaAligned.sortedByCoord.out.bam']
docker_call(job=job, work_dir=work_dir, parameters=index_command,
tool='quay.io/ucsc_cgl/samtools:1.3--256539928ea162949d8a65ca5c79a72ef557ce7c')
# fileStore
bam_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'rnaAligned.sortedByCoord.out.bam'))
bai_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'rnaAligned.sortedByCoord.out.bam.bai'))
job.fileStore.deleteGlobalFile(r1_cutadapt)
job.fileStore.deleteGlobalFile(r2_cutadapt)
# Launch children and follow-on
vcqc_id = job.addChildJobFn(variant_calling_and_qc, inputs, bam_id, bai_id, cores=2, disk='30G').rv()
spladder_id = job.addChildJobFn(spladder, inputs, bam_id, bai_id, disk='30G').rv()
job.addFollowOnJobFn(consolidate_output_tarballs, inputs, vcqc_id, spladder_id, disk='30G') |
<SYSTEM_TASK:>
Perform variant calling with samtools nad QC with CheckBias
<END_TASK>
<USER_TASK:>
Description:
def variant_calling_and_qc(job, inputs, bam_id, bai_id):
"""
Perform variant calling with samtools nad QC with CheckBias
:param JobFunctionWrappingJob job: passed by Toil automatically
:param Namespace inputs: Stores input arguments (see main)
:param str bam_id: FileStore ID of bam
:param str bai_id: FileStore ID of bam index file
:return: FileStore ID of qc tarball
:rtype: str
""" |
job.fileStore.logToMaster('Variant calling and QC: {}'.format(inputs.uuid))
work_dir = job.fileStore.getLocalTempDir()
# Pull in alignment.bam from fileStore
job.fileStore.readGlobalFile(bam_id, os.path.join(work_dir, 'alignment.bam'))
job.fileStore.readGlobalFile(bai_id, os.path.join(work_dir, 'alignment.bam.bai'))
# Download input files
input_info = [(inputs.genome, 'genome.fa'), (inputs.positions, 'positions.tsv'),
(inputs.genome_index, 'genome.fa.fai'), (inputs.gtf, 'annotation.gtf'),
(inputs.gtf_m53, 'annotation.m53')]
for url, fname in input_info:
download_url(job=job, url=url, work_dir=work_dir, name=fname)
# Part 1: Variant Calling
variant_command = ['mpileup',
'-f', 'genome.fa',
'-l', 'positions.tsv',
'-v', 'alignment.bam',
'-t', 'DP,SP,INFO/AD,INFO/ADF,INFO/ADR,INFO/DPR,SP',
'-o', '/data/output.vcf.gz']
docker_call(job=job, work_dir=work_dir, parameters=variant_command,
tool='quay.io/ucsc_cgl/samtools:1.3--256539928ea162949d8a65ca5c79a72ef557ce7c')
# Part 2: QC
qc_command = ['-o', 'qc',
'-n', 'alignment.bam',
'-a', 'annotation.gtf',
'-m', 'annotation.m53']
docker_call(job=job, work_dir=work_dir, parameters=qc_command,
tool='jvivian/checkbias:612f129--b08a1fb6526a620bbb0304b08356f2ae7c3c0ec3')
# Write output to fileStore and return ids
output_tsv = glob(os.path.join(work_dir, '*counts.tsv*'))[0]
output_vcf = os.path.join(work_dir, 'output.vcf.gz')
tarball_files('vcqc.tar.gz', file_paths=[output_tsv, output_vcf], output_dir=work_dir)
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'vcqc.tar.gz')) |
<SYSTEM_TASK:>
Run SplAdder to detect and quantify alternative splicing events
<END_TASK>
<USER_TASK:>
Description:
def spladder(job, inputs, bam_id, bai_id):
"""
Run SplAdder to detect and quantify alternative splicing events
:param JobFunctionWrappingJob job: passed by Toil automatically
:param Namespace inputs: Stores input arguments (see main)
:param str bam_id: FileStore ID of bam
:param str bai_id: FileStore ID of bam index file
:return: FileStore ID of SplAdder tarball
:rtype: str
""" |
job.fileStore.logToMaster('SplAdder: {}'.format(inputs.uuid))
work_dir = job.fileStore.getLocalTempDir()
# Pull in alignment.bam from fileStore
job.fileStore.readGlobalFile(bam_id, os.path.join(work_dir, 'alignment.bam'))
job.fileStore.readGlobalFile(bai_id, os.path.join(work_dir, 'alignment.bam.bai'))
# Download input file
download_url(job=job, url=inputs.gtf, work_dir=work_dir, name='annotation.gtf')
download_url(job=job, url=inputs.gtf_pickle, work_dir=work_dir, name='annotation.gtf.pickle')
# Call Spladder
command = ['--insert_ir=y',
'--insert_es=y',
'--insert_ni=y',
'--remove_se=n',
'--validate_sg=n',
'-b', 'alignment.bam',
'-o ', '/data',
'-a', 'annotation.gtf',
'-v', 'y',
'-c', '3',
'-M', 'single',
'-T', 'n',
'-n', '50',
'-P', 'y',
'-p', 'n',
'--sparse_bam', 'y']
docker_call(job=job, work_dir=work_dir, parameters=command, sudo=inputs.sudo, tool='jvivian/spladder:1.0')
# Write output to fileStore and return ids
output_pickle = os.path.join(work_dir, ' ', 'spladder', 'genes_graph_conf3.alignment.pickle')
if not os.path.exists(output_pickle):
matches = []
for root, dirnames, filenames in os.walk(work_dir):
for filename in fnmatch.filter(filenames, '*genes_graph*'):
matches.append(os.path.join(root, filename))
if matches:
output_pickle = matches[0]
else:
raise RuntimeError("Couldn't find genes file!")
output_filt = os.path.join(work_dir, 'alignment.filt.hdf5')
output = os.path.join(work_dir, 'alignment.hdf5')
print os.listdir(work_dir)
tarball_files('spladder.tar.gz', file_paths=[output_pickle, output_filt, output], output_dir=work_dir)
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'spladder.tar.gz')) |
<SYSTEM_TASK:>
Combine the contents of separate tarballs into one.
<END_TASK>
<USER_TASK:>
Description:
def consolidate_output_tarballs(job, inputs, vcqc_id, spladder_id):
"""
Combine the contents of separate tarballs into one.
:param JobFunctionWrappingJob job: passed by Toil automatically
:param Namespace inputs: Stores input arguments (see main)
:param str vcqc_id: FileStore ID of variant calling and QC tarball
:param str spladder_id: FileStore ID of spladder tarball
""" |
job.fileStore.logToMaster('Consolidating files and uploading: {}'.format(inputs.uuid))
work_dir = job.fileStore.getLocalTempDir()
# Retrieve IDs
uuid = inputs.uuid
# Unpack IDs
# Retrieve output file paths to consolidate
vcqc_tar = job.fileStore.readGlobalFile(vcqc_id, os.path.join(work_dir, 'vcqc.tar.gz'))
spladder_tar = job.fileStore.readGlobalFile(spladder_id, os.path.join(work_dir, 'spladder.tar.gz'))
# I/O
fname = uuid + '.tar.gz' if not inputs.improper_pair else 'IMPROPER_PAIR' + uuid + '.tar.gz'
out_tar = os.path.join(work_dir, fname)
# Consolidate separate tarballs into one
with tarfile.open(os.path.join(work_dir, out_tar), 'w:gz') as f_out:
for tar in [vcqc_tar, spladder_tar]:
with tarfile.open(tar, 'r') as f_in:
for tarinfo in f_in:
with closing(f_in.extractfile(tarinfo)) as f_in_file:
if tar == vcqc_tar:
tarinfo.name = os.path.join(uuid, 'variants_and_qc', os.path.basename(tarinfo.name))
else:
tarinfo.name = os.path.join(uuid, 'spladder', os.path.basename(tarinfo.name))
f_out.addfile(tarinfo, fileobj=f_in_file)
# Move to output directory
if inputs.output_dir:
mkdir_p(inputs.output_dir)
shutil.copy(out_tar, os.path.join(inputs.output_dir, os.path.basename(out_tar)))
# Upload to S3
if inputs.output_s3_dir:
out_id = job.fileStore.writeGlobalFile(out_tar)
job.addChildJobFn(s3am_upload_job, file_id=out_id, s3_dir=inputs.output_s3_dir,
file_name=fname, key_path=inputs.ssec, cores=inputs.cores) |
<SYSTEM_TASK:>
This Toil pipeline aligns reads and performs alternative splicing analysis.
<END_TASK>
<USER_TASK:>
Description:
def main():
"""
This Toil pipeline aligns reads and performs alternative splicing analysis.
Please read the README.md located in the same directory for run instructions.
""" |
# Define Parser object and add to toil
url_prefix = 'https://s3-us-west-2.amazonaws.com/cgl-pipeline-inputs/'
parser = argparse.ArgumentParser(description=main.__doc__, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--config', required=True,
help='Path to configuration file for samples, one per line. UUID,URL_to_bamfile. '
'The URL may be a standard "http://", a "file://<abs_path>", or "s3://<bucket>/<key>"')
parser.add_argument('--gtf', help='URL to annotation GTF file',
default=url_prefix + 'rnaseq_cgl/gencode.v23.annotation.gtf')
parser.add_argument('--gtf-pickle', help='Pickled GTF file',
default=url_prefix + 'spladder/gencode.v23.annotation.gtf.pickle')
parser.add_argument('--gtf-m53', help='M53 preprocessing annotation table',
default=url_prefix + 'spladder/gencode.v23.annotation.gtf.m53')
parser.add_argument('--positions', help='URL to SNP positions over genes file (TSV)',
default=url_prefix + 'spladder/positions_fixed.tsv')
parser.add_argument('--genome', help='URL to Genome fasta',
default=url_prefix + 'rnaseq_cgl/hg38_no_alt.fa')
parser.add_argument('--genome-index', help='Index file (fai) of genome',
default=url_prefix + 'spladder/hg38_no_alt.fa.fai')
parser.add_argument('--ssec', default=None, help='Path to master key used for downloading encrypted files.')
parser.add_argument('--output-s3-dir', default=None, help='S3 Directory of the form: s3://bucket/directory')
parser.add_argument('--output-dir', default=None, help='full path where final results will be output')
parser.add_argument('--sudo', action='store_true', default=False,
help='Set flag if sudo is required to run Docker.')
parser.add_argument('--star-index', help='URL to download STAR Index built from HG38/gencodev23 annotation.',
default=url_prefix + 'rnaseq_cgl/starIndex_hg38_no_alt.tar.gz')
parser.add_argument('--fwd-3pr-adapter', help="Sequence for the FWD 3' Read Adapter.", default='AGATCGGAAGAG')
parser.add_argument('--rev-3pr-adapter', help="Sequence for the REV 3' Read Adapter.", default='AGATCGGAAGAG')
Job.Runner.addToilOptions(parser)
args = parser.parse_args()
# Sanity Checks
if args.config:
assert os.path.isfile(args.config), 'Config not found at: {}'.format(args.config)
if args.ssec:
assert os.path.isfile(args.ssec), 'Encryption key not found at: {}'.format(args.config)
if args.output_s3_dir:
assert args.output_s3_dir.startswith('s3://'), 'Wrong format for output s3 directory'
# Program checks
for program in ['curl', 'docker']:
assert which(program), 'Program "{}" must be installed on every node.'.format(program)
Job.Runner.startToil(Job.wrapJobFn(parse_input_samples, args), args) |
<SYSTEM_TASK:>
Validate a dotted-quad ip address.
<END_TASK>
<USER_TASK:>
Description:
def validate_ip(s):
"""Validate a dotted-quad ip address.
The string is considered a valid dotted-quad address if it consists of
one to four octets (0-255) seperated by periods (.).
>>> validate_ip('127.0.0.1')
True
>>> validate_ip('127.0')
True
>>> validate_ip('127.0.0.256')
False
>>> validate_ip(LOCALHOST)
True
>>> validate_ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected string or buffer
:param s: String to validate as a dotted-quad ip address.
:type s: str
:returns: ``True`` if a valid dotted-quad ip address, ``False`` otherwise.
:raises: TypeError
""" |
if _DOTTED_QUAD_RE.match(s):
quads = s.split('.')
for q in quads:
if int(q) > 255:
return False
return True
return False |
<SYSTEM_TASK:>
Validate that a dotted-quad ip address is a valid netmask.
<END_TASK>
<USER_TASK:>
Description:
def validate_netmask(s):
"""Validate that a dotted-quad ip address is a valid netmask.
>>> validate_netmask('0.0.0.0')
True
>>> validate_netmask('128.0.0.0')
True
>>> validate_netmask('255.0.0.0')
True
>>> validate_netmask('255.255.255.255')
True
>>> validate_netmask(BROADCAST)
True
>>> validate_netmask('128.0.0.1')
False
>>> validate_netmask('1.255.255.0')
False
>>> validate_netmask('0.255.255.0')
False
:param s: String to validate as a dotted-quad notation netmask.
:type s: str
:returns: ``True`` if a valid netmask, ``False`` otherwise.
:raises: TypeError
""" |
if validate_ip(s):
# Convert to binary string, strip '0b' prefix, 0 pad to 32 bits
mask = bin(ip2network(s))[2:].zfill(32)
# all left most bits must be 1, all right most must be 0
seen0 = False
for c in mask:
if '1' == c:
if seen0:
return False
else:
seen0 = True
return True
else:
return False |
<SYSTEM_TASK:>
Validate a dotted-quad ip address including a netmask.
<END_TASK>
<USER_TASK:>
Description:
def validate_subnet(s):
"""Validate a dotted-quad ip address including a netmask.
The string is considered a valid dotted-quad address with netmask if it
consists of one to four octets (0-255) seperated by periods (.) followed
by a forward slash (/) and a subnet bitmask which is expressed in
dotted-quad format.
>>> validate_subnet('127.0.0.1/255.255.255.255')
True
>>> validate_subnet('127.0/255.0.0.0')
True
>>> validate_subnet('127.0/255')
True
>>> validate_subnet('127.0.0.256/255.255.255.255')
False
>>> validate_subnet('127.0.0.1/255.255.255.256')
False
>>> validate_subnet('127.0.0.0')
False
>>> validate_subnet(None)
Traceback (most recent call last):
...
TypeError: expected string or unicode
:param s: String to validate as a dotted-quad ip address with netmask.
:type s: str
:returns: ``True`` if a valid dotted-quad ip address with netmask,
``False`` otherwise.
:raises: TypeError
""" |
if isinstance(s, basestring):
if '/' in s:
start, mask = s.split('/', 2)
return validate_ip(start) and validate_netmask(mask)
else:
return False
raise TypeError("expected string or unicode") |
<SYSTEM_TASK:>
Convert a dotted-quad ip address to a network byte order 32-bit
<END_TASK>
<USER_TASK:>
Description:
def ip2long(ip):
"""Convert a dotted-quad ip address to a network byte order 32-bit
integer.
>>> ip2long('127.0.0.1')
2130706433
>>> ip2long('127.1')
2130706433
>>> ip2long('127')
2130706432
>>> ip2long('127.0.0.256') is None
True
:param ip: Dotted-quad ip address (eg. '127.0.0.1').
:type ip: str
:returns: Network byte order 32-bit integer or ``None`` if ip is invalid.
""" |
if not validate_ip(ip):
return None
quads = ip.split('.')
if len(quads) == 1:
# only a network quad
quads = quads + [0, 0, 0]
elif len(quads) < 4:
# partial form, last supplied quad is host address, rest is network
host = quads[-1:]
quads = quads[:-1] + [0, ] * (4 - len(quads)) + host
lngip = 0
for q in quads:
lngip = (lngip << 8) | int(q)
return lngip |
<SYSTEM_TASK:>
Convert a dotted-quad ip to base network number.
<END_TASK>
<USER_TASK:>
Description:
def ip2network(ip):
"""Convert a dotted-quad ip to base network number.
This differs from :func:`ip2long` in that partial addresses as treated as
all network instead of network plus host (eg. '127.1' expands to
'127.1.0.0')
:param ip: dotted-quad ip address (eg. ‘127.0.0.1’).
:type ip: str
:returns: Network byte order 32-bit integer or `None` if ip is invalid.
""" |
if not validate_ip(ip):
return None
quads = ip.split('.')
netw = 0
for i in range(4):
netw = (netw << 8) | int(len(quads) > i and quads[i] or 0)
return netw |
<SYSTEM_TASK:>
Convert a network byte order 32-bit integer to a dotted quad ip
<END_TASK>
<USER_TASK:>
Description:
def long2ip(l):
"""Convert a network byte order 32-bit integer to a dotted quad ip
address.
>>> long2ip(2130706433)
'127.0.0.1'
>>> long2ip(MIN_IP)
'0.0.0.0'
>>> long2ip(MAX_IP)
'255.255.255.255'
>>> long2ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) for >>: 'NoneType' and 'int'
>>> long2ip(-1) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and 4294967295 inclusive
>>> long2ip(374297346592387463875) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and 4294967295 inclusive
>>> long2ip(MAX_IP + 1) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and 4294967295 inclusive
:param l: Network byte order 32-bit integer.
:type l: int
:returns: Dotted-quad ip address (eg. '127.0.0.1').
:raises: TypeError
""" |
if MAX_IP < l or l < MIN_IP:
raise TypeError(
"expected int between %d and %d inclusive" % (MIN_IP, MAX_IP))
return '%d.%d.%d.%d' % (
l >> 24 & 255, l >> 16 & 255, l >> 8 & 255, l & 255) |
<SYSTEM_TASK:>
Convert a dotted-quad ip address including a netmask into a tuple
<END_TASK>
<USER_TASK:>
Description:
def subnet2block(subnet):
"""Convert a dotted-quad ip address including a netmask into a tuple
containing the network block start and end addresses.
>>> subnet2block('127.0.0.1/255.255.255.255')
('127.0.0.1', '127.0.0.1')
>>> subnet2block('127/255')
('127.0.0.0', '127.255.255.255')
>>> subnet2block('127.0.1/255.255')
('127.0.0.0', '127.0.255.255')
>>> subnet2block('127.1/255.255.255.0')
('127.1.0.0', '127.1.0.255')
>>> subnet2block('127.0.0.3/255.255.255.248')
('127.0.0.0', '127.0.0.7')
>>> subnet2block('127/0')
('0.0.0.0', '255.255.255.255')
:param subnet: dotted-quad ip address with netmask
(eg. '127.0.0.1/255.0.0.0').
:type subnet: str
:returns: Tuple of block (start, end) or ``None`` if invalid.
:raises: TypeError
""" |
if not validate_subnet(subnet):
return None
ip, netmask = subnet.split('/')
prefix = netmask2prefix(netmask)
# convert dotted-quad ip to base network number
network = ip2network(ip)
return _block_from_ip_and_prefix(network, prefix) |
<SYSTEM_TASK:>
Downloads files shared by all samples in the pipeline
<END_TASK>
<USER_TASK:>
Description:
def download_shared_files(job, samples, config):
"""
Downloads files shared by all samples in the pipeline
:param JobFunctionWrappingJob job: passed automatically by Toil
:param Namespace config: Argparse Namespace object containing argument inputs
:param list[list] samples: A nested list of samples containing sample information
""" |
job.fileStore.logToMaster('Downloaded shared files')
file_names = ['reference', 'phase', 'mills', 'dbsnp', 'cosmic']
urls = [config.reference, config.phase, config.mills, config.dbsnp, config.cosmic]
for name, url in zip(file_names, urls):
if url:
vars(config)[name] = job.addChildJobFn(download_url_job, url=url).rv()
job.addFollowOnJobFn(reference_preprocessing, samples, config) |
<SYSTEM_TASK:>
Spawn the jobs that create index and dict file for reference
<END_TASK>
<USER_TASK:>
Description:
def reference_preprocessing(job, samples, config):
"""
Spawn the jobs that create index and dict file for reference
:param JobFunctionWrappingJob job: passed automatically by Toil
:param Namespace config: Argparse Namespace object containing argument inputs
:param list[list] samples: A nested list of samples containing sample information
""" |
job.fileStore.logToMaster('Processed reference files')
config.fai = job.addChildJobFn(run_samtools_faidx, config.reference).rv()
config.dict = job.addChildJobFn(run_picard_create_sequence_dictionary, config.reference).rv()
job.addFollowOnJobFn(map_job, download_sample, samples, config) |
<SYSTEM_TASK:>
Download sample and store sample specific attributes
<END_TASK>
<USER_TASK:>
Description:
def download_sample(job, sample, config):
"""
Download sample and store sample specific attributes
:param JobFunctionWrappingJob job: passed automatically by Toil
:param list sample: Contains uuid, normal URL, and tumor URL
:param Namespace config: Argparse Namespace object containing argument inputs
""" |
# Create copy of config that is sample specific
config = argparse.Namespace(**vars(config))
uuid, normal_url, tumor_url = sample
job.fileStore.logToMaster('Downloaded sample: ' + uuid)
config.uuid = uuid
config.normal = normal_url
config.tumor = tumor_url
config.cores = min(config.maxCores, int(multiprocessing.cpu_count()))
disk = '1G' if config.ci_test else '20G'
# Download sample bams and launch pipeline
config.normal_bam = job.addChildJobFn(download_url_job, url=config.normal, s3_key_path=config.ssec,
cghub_key_path=config.gtkey, disk=disk).rv()
config.tumor_bam = job.addChildJobFn(download_url_job, url=config.tumor, s3_key_path=config.ssec,
cghub_key_path=config.gtkey, disk=disk).rv()
job.addFollowOnJobFn(index_bams, config) |
<SYSTEM_TASK:>
Convenience job for handling bam indexing to make the workflow declaration cleaner
<END_TASK>
<USER_TASK:>
Description:
def index_bams(job, config):
"""
Convenience job for handling bam indexing to make the workflow declaration cleaner
:param JobFunctionWrappingJob job: passed automatically by Toil
:param Namespace config: Argparse Namespace object containing argument inputs
""" |
job.fileStore.logToMaster('Indexed sample BAMS: ' + config.uuid)
disk = '1G' if config.ci_test else '20G'
config.normal_bai = job.addChildJobFn(run_samtools_index, config.normal_bam, cores=1, disk=disk).rv()
config.tumor_bai = job.addChildJobFn(run_samtools_index, config.tumor_bam, cores=1, disk=disk).rv()
job.addFollowOnJobFn(preprocessing_declaration, config) |
<SYSTEM_TASK:>
Declare jobs related to preprocessing
<END_TASK>
<USER_TASK:>
Description:
def preprocessing_declaration(job, config):
"""
Declare jobs related to preprocessing
:param JobFunctionWrappingJob job: passed automatically by Toil
:param Namespace config: Argparse Namespace object containing argument inputs
""" |
if config.preprocessing:
job.fileStore.logToMaster('Ran preprocessing: ' + config.uuid)
disk = '1G' if config.ci_test else '20G'
mem = '2G' if config.ci_test else '10G'
processed_normal = job.wrapJobFn(run_gatk_preprocessing, config.normal_bam, config.normal_bai,
config.reference, config.dict, config.fai, config.phase, config.mills,
config.dbsnp, mem, cores=1, memory=mem, disk=disk)
processed_tumor = job.wrapJobFn(run_gatk_preprocessing, config.tumor_bam, config.tumor_bai,
config.reference, config.dict, config.fai, config.phase, config.mills,
config.dbsnp, mem, cores=1, memory=mem, disk=disk)
static_workflow = job.wrapJobFn(static_workflow_declaration, config, processed_normal.rv(0),
processed_normal.rv(1), processed_tumor.rv(0), processed_tumor.rv(1))
job.addChild(processed_normal)
job.addChild(processed_tumor)
job.addFollowOn(static_workflow)
else:
job.addFollowOnJobFn(static_workflow_declaration, config, config.normal_bam, config.normal_bai,
config.tumor_bam, config.tumor_bai) |
<SYSTEM_TASK:>
Statically declare workflow so sections can be modularly repurposed
<END_TASK>
<USER_TASK:>
Description:
def static_workflow_declaration(job, config, normal_bam, normal_bai, tumor_bam, tumor_bai):
"""
Statically declare workflow so sections can be modularly repurposed
:param JobFunctionWrappingJob job: passed automatically by Toil
:param Namespace config: Argparse Namespace object containing argument inputs
:param str normal_bam: Normal BAM FileStoreID
:param str normal_bai: Normal BAM index FileStoreID
:param str tumor_bam: Tumor BAM FileStoreID
:param str tumor_bai: Tumor BAM Index FileStoreID
""" |
# Mutation and indel tool wiring
memory = '1G' if config.ci_test else '10G'
disk = '1G' if config.ci_test else '75G'
mutect_results, pindel_results, muse_results = None, None, None
if config.run_mutect:
mutect_results = job.addChildJobFn(run_mutect, normal_bam, normal_bai, tumor_bam, tumor_bai, config.reference,
config.dict, config.fai, config.cosmic, config.dbsnp,
cores=1, memory=memory, disk=disk).rv()
if config.run_pindel:
pindel_results = job.addChildJobFn(run_pindel, normal_bam, normal_bai, tumor_bam, tumor_bai,
config.reference, config.fai,
cores=config.cores, memory=memory, disk=disk).rv()
if config.run_muse:
muse_results = job.addChildJobFn(run_muse, normal_bam, normal_bai, tumor_bam, tumor_bai,
config.reference, config.dict, config.fai, config.dbsnp,
cores=config.cores, memory=memory, disk=disk).rv()
# Pass tool results (whether None or a promised return value) to consolidation step
consolidation = job.wrapJobFn(consolidate_output, config, mutect_results, pindel_results, muse_results)
job.addFollowOn(consolidation) |
<SYSTEM_TASK:>
Combine the contents of separate tarball outputs into one via streaming
<END_TASK>
<USER_TASK:>
Description:
def consolidate_output(job, config, mutect, pindel, muse):
"""
Combine the contents of separate tarball outputs into one via streaming
:param JobFunctionWrappingJob job: passed automatically by Toil
:param Namespace config: Argparse Namespace object containing argument inputs
:param str mutect: MuTect tarball FileStoreID
:param str pindel: Pindel tarball FileStoreID
:param str muse: MuSe tarball FileStoreID
""" |
work_dir = job.fileStore.getLocalTempDir()
mutect_tar, pindel_tar, muse_tar = None, None, None
if mutect:
mutect_tar = job.fileStore.readGlobalFile(mutect, os.path.join(work_dir, 'mutect.tar.gz'))
if pindel:
pindel_tar = job.fileStore.readGlobalFile(pindel, os.path.join(work_dir, 'pindel.tar.gz'))
if muse:
muse_tar = job.fileStore.readGlobalFile(muse, os.path.join(work_dir, 'muse.tar.gz'))
out_tar = os.path.join(work_dir, config.uuid + '.tar.gz')
# Consolidate separate tarballs into one as streams (avoids unnecessary untaring)
tar_list = [x for x in [mutect_tar, pindel_tar, muse_tar] if x is not None]
with tarfile.open(os.path.join(work_dir, out_tar), 'w:gz') as f_out:
for tar in tar_list:
with tarfile.open(tar, 'r') as f_in:
for tarinfo in f_in:
with closing(f_in.extractfile(tarinfo)) as f_in_file:
if tar is mutect_tar:
tarinfo.name = os.path.join(config.uuid, 'mutect', os.path.basename(tarinfo.name))
elif tar is pindel_tar:
tarinfo.name = os.path.join(config.uuid, 'pindel', os.path.basename(tarinfo.name))
else:
tarinfo.name = os.path.join(config.uuid, 'muse', os.path.basename(tarinfo.name))
f_out.addfile(tarinfo, fileobj=f_in_file)
# Move to output location
if urlparse(config.output_dir).scheme == 's3':
job.fileStore.logToMaster('Uploading {} to S3: {}'.format(config.uuid, config.output_dir))
s3am_upload(job=job, fpath=out_tar, s3_dir=config.output_dir, num_cores=config.cores)
else:
job.fileStore.logToMaster('Moving {} to output dir: {}'.format(config.uuid, config.output_dir))
mkdir_p(config.output_dir)
copy_files(file_paths=[out_tar], output_dir=config.output_dir) |
<SYSTEM_TASK:>
Downloads shared files that are used by all samples for alignment, or generates them if they were not provided.
<END_TASK>
<USER_TASK:>
Description:
def download_reference_files(job, inputs, samples):
"""
Downloads shared files that are used by all samples for alignment, or generates them if they were not provided.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param Namespace inputs: Input arguments (see main)
:param list[list[str, list[str, str]]] samples: Samples in the format [UUID, [URL1, URL2]]
""" |
# Create dictionary to store FileStoreIDs of shared input files
shared_ids = {}
urls = [('amb', inputs.amb), ('ann', inputs.ann), ('bwt', inputs.bwt),
('pac', inputs.pac), ('sa', inputs.sa)]
# Alt file is optional and can only be provided, not generated
if inputs.alt:
urls.append(('alt', inputs.alt))
# Download reference
download_ref = job.wrapJobFn(download_url_job, inputs.ref, disk='3G') # Human genomes are typically ~3G
job.addChild(download_ref)
shared_ids['ref'] = download_ref.rv()
# If FAI is provided, download it. Otherwise, generate it
if inputs.fai:
shared_ids['fai'] = job.addChildJobFn(download_url_job, inputs.fai).rv()
else:
faidx = job.wrapJobFn(run_samtools_faidx, download_ref.rv())
shared_ids['fai'] = download_ref.addChild(faidx).rv()
# If all BWA index files are provided, download them. Otherwise, generate them
if all(x[1] for x in urls):
for name, url in urls:
shared_ids[name] = job.addChildJobFn(download_url_job, url).rv()
else:
job.fileStore.logToMaster('BWA index files not provided, creating now')
bwa_index = job.wrapJobFn(run_bwa_index, download_ref.rv())
download_ref.addChild(bwa_index)
for x, name in enumerate(['amb', 'ann', 'bwt', 'pac', 'sa']):
shared_ids[name] = bwa_index.rv(x)
# Map_job distributes one sample in samples to the downlaod_sample_and_align function
job.addFollowOnJobFn(map_job, download_sample_and_align, samples, inputs, shared_ids) |
<SYSTEM_TASK:>
Downloads the sample and runs BWA-kit
<END_TASK>
<USER_TASK:>
Description:
def download_sample_and_align(job, sample, inputs, ids):
"""
Downloads the sample and runs BWA-kit
:param JobFunctionWrappingJob job: Passed by Toil automatically
:param tuple(str, list) sample: UUID and URLS for sample
:param Namespace inputs: Contains input arguments
:param dict ids: FileStore IDs for shared inputs
""" |
uuid, urls = sample
r1_url, r2_url = urls if len(urls) == 2 else (urls[0], None)
job.fileStore.logToMaster('Downloaded sample: {0}. R1 {1}\nR2 {2}\nStarting BWA Run'.format(uuid, r1_url, r2_url))
# Read fastq samples from file store
ids['r1'] = job.addChildJobFn(download_url_job, r1_url, s3_key_path=inputs.ssec, disk=inputs.file_size).rv()
if r2_url:
ids['r2'] = job.addChildJobFn(download_url_job, r2_url, s3_key_path=inputs.ssec, disk=inputs.file_size).rv()
else:
ids['r2'] = None
# Create config for bwakit
inputs.cores = min(inputs.maxCores, multiprocessing.cpu_count())
inputs.uuid = uuid
config = dict(**vars(inputs)) # Create config as a copy of inputs since it has values we want
config.update(ids) # Overwrite attributes with the FileStoreIDs from ids
config = argparse.Namespace(**config)
# Define and wire job functions
bam_id = job.wrapJobFn(run_bwakit, config, sort=inputs.sort, trim=inputs.trim,
disk=inputs.file_size, cores=inputs.cores)
job.addFollowOn(bam_id)
output_name = uuid + '.bam' + str(inputs.suffix) if inputs.suffix else uuid + '.bam'
if urlparse(inputs.output_dir).scheme == 's3':
bam_id.addChildJobFn(s3am_upload_job, file_id=bam_id.rv(), file_name=output_name, s3_dir=inputs.output_dir,
s3_key_path=inputs.ssec, cores=inputs.cores, disk=inputs.file_size)
else:
mkdir_p(inputs.ouput_dir)
bam_id.addChildJobFn(copy_file_job, name=output_name, file_id=bam_id.rv(), output_dir=inputs.output_dir,
disk=inputs.file_size) |
<SYSTEM_TASK:>
Convert an address string to a long.
<END_TASK>
<USER_TASK:>
Description:
def _address2long(address):
"""
Convert an address string to a long.
""" |
parsed = ipv4.ip2long(address)
if parsed is None:
parsed = ipv6.ip2long(address)
return parsed |
<SYSTEM_TASK:>
Return the 0-based position of `item` in this IpRange.
<END_TASK>
<USER_TASK:>
Description:
def index(self, item):
"""
Return the 0-based position of `item` in this IpRange.
>>> r = IpRange('127.0.0.1', '127.255.255.255')
>>> r.index('127.0.0.1')
0
>>> r.index('127.255.255.255')
16777214
>>> r.index('10.0.0.1')
Traceback (most recent call last):
...
ValueError: 10.0.0.1 is not in range
:param item: Dotted-quad ip address.
:type item: str
:returns: Index of ip address in range
""" |
item = self._cast(item)
offset = item - self.startIp
if offset >= 0 and offset < self._len:
return offset
raise ValueError('%s is not in range' % self._ipver.long2ip(item)) |
<SYSTEM_TASK:>
Detach daemon process.
<END_TASK>
<USER_TASK:>
Description:
def _detach_process():
"""
Detach daemon process.
Forks the current process into a parent and a detached child. The
child process resides in its own process group, has no controlling
terminal attached and is cleaned up by the init process.
Returns ``True`` for the parent and ``False`` for the child.
""" |
# To detach from our process group we need to call ``setsid``. We
# can only do that if we aren't a process group leader. Therefore
# we fork once, which makes sure that the new child process is not
# a process group leader.
pid = os.fork()
if pid > 0:
# Parent process
# Use waitpid to "collect" the child process and avoid Zombies
os.waitpid(pid, 0)
return True
os.setsid()
# We now fork a second time and let the second's fork parent exit.
# This makes the second fork's child process an orphan. Orphans are
# cleaned up by the init process, so we won't end up with a zombie.
# In addition, the second fork's child is no longer a session
# leader and can therefore never acquire a controlling terminal.
pid = os.fork()
if pid > 0:
os._exit(os.EX_OK)
return False |
<SYSTEM_TASK:>
Block until a predicate becomes true.
<END_TASK>
<USER_TASK:>
Description:
def _block(predicate, timeout):
"""
Block until a predicate becomes true.
``predicate`` is a function taking no arguments. The call to
``_block`` blocks until ``predicate`` returns a true value. This
is done by polling ``predicate``.
``timeout`` is either ``True`` (block indefinitely) or a timeout
in seconds.
The return value is the value of the predicate after the
timeout.
""" |
if timeout:
if timeout is True:
timeout = float('Inf')
timeout = time.time() + timeout
while not predicate() and time.time() < timeout:
time.sleep(0.1)
return predicate() |
<SYSTEM_TASK:>
Return the PID of the process owning the lock.
<END_TASK>
<USER_TASK:>
Description:
def read_pid(self):
"""
Return the PID of the process owning the lock.
Returns ``None`` if no lock is present.
""" |
try:
with open(self._path, 'r') as f:
s = f.read().strip()
if not s:
return None
return int(s)
except IOError as e:
if e.errno == errno.ENOENT:
return None
raise |
<SYSTEM_TASK:>
Check if the daemon is running.
<END_TASK>
<USER_TASK:>
Description:
def is_running(self):
"""
Check if the daemon is running.
""" |
pid = self.get_pid()
if pid is None:
return False
# The PID file may still exist even if the daemon isn't running,
# for example if it has crashed.
try:
os.kill(pid, 0)
except OSError as e:
if e.errno == errno.ESRCH:
# In this case the PID file shouldn't have existed in
# the first place, so we remove it
self.pid_file.release()
return False
# We may also get an exception if we're not allowed to use
# kill on the process, but that means that the process does
# exist, which is all we care about here.
return True |
<SYSTEM_TASK:>
Send a signal to the daemon process.
<END_TASK>
<USER_TASK:>
Description:
def send_signal(self, s):
"""
Send a signal to the daemon process.
The signal must have been enabled using the ``signals``
parameter of :py:meth:`Service.__init__`. Otherwise, a
``ValueError`` is raised.
""" |
self._get_signal_event(s) # Check if signal has been enabled
pid = self.get_pid()
if not pid:
raise ValueError('Daemon is not running.')
os.kill(pid, s) |
<SYSTEM_TASK:>
Tell the daemon process to stop.
<END_TASK>
<USER_TASK:>
Description:
def stop(self, block=False):
"""
Tell the daemon process to stop.
Sends the SIGTERM signal to the daemon process, requesting it
to terminate.
If ``block`` is true then the call blocks until the daemon
process has exited. This may take some time since the daemon
process will complete its on-going backup activities before
shutting down. ``block`` can either be ``True`` (in which case
it blocks indefinitely) or a timeout in seconds.
The return value is ``True`` if the daemon process has been
stopped and ``False`` otherwise.
.. versionadded:: 0.3
The ``block`` parameter
""" |
self.send_signal(signal.SIGTERM)
return _block(lambda: not self.is_running(), block) |
<SYSTEM_TASK:>
Kill the daemon process.
<END_TASK>
<USER_TASK:>
Description:
def kill(self, block=False):
"""
Kill the daemon process.
Sends the SIGKILL signal to the daemon process, killing it. You
probably want to try :py:meth:`stop` first.
If ``block`` is true then the call blocks until the daemon
process has exited. ``block`` can either be ``True`` (in which
case it blocks indefinitely) or a timeout in seconds.
Returns ``True`` if the daemon process has (already) exited and
``False`` otherwise.
The PID file is always removed, whether the process has already
exited or not. Note that this means that subsequent calls to
:py:meth:`is_running` and :py:meth:`get_pid` will behave as if
the process has exited. If you need to be sure that the process
has already exited, set ``block`` to ``True``.
.. versionadded:: 0.5.1
The ``block`` parameter
""" |
pid = self.get_pid()
if not pid:
raise ValueError('Daemon is not running.')
try:
os.kill(pid, signal.SIGKILL)
return _block(lambda: not self.is_running(), block)
except OSError as e:
if e.errno == errno.ESRCH:
raise ValueError('Daemon is not running.')
raise
finally:
self.pid_file.release() |
<SYSTEM_TASK:>
Start the daemon process.
<END_TASK>
<USER_TASK:>
Description:
def start(self, block=False):
"""
Start the daemon process.
The daemon process is started in the background and the calling
process returns.
Once the daemon process is initialized it calls the
:py:meth:`run` method.
If ``block`` is true then the call blocks until the daemon
process has started. ``block`` can either be ``True`` (in which
case it blocks indefinitely) or a timeout in seconds.
The return value is ``True`` if the daemon process has been
started and ``False`` otherwise.
.. versionadded:: 0.3
The ``block`` parameter
""" |
pid = self.get_pid()
if pid:
raise ValueError('Daemon is already running at PID %d.' % pid)
# The default is to place the PID file into ``/var/run``. This
# requires root privileges. Since not having these is a common
# problem we check a priori whether we can create the lock file.
try:
self.pid_file.acquire()
finally:
self.pid_file.release()
# Clear previously received SIGTERMs. This must be done before
# the calling process returns so that the calling process can
# call ``stop`` directly after ``start`` returns without the
# signal being lost.
self.clear_signal(signal.SIGTERM)
if _detach_process():
# Calling process returns
return _block(lambda: self.is_running(), block)
# Daemon process continues here
self._debug('Daemon has detached')
def on_signal(s, frame):
self._debug('Received signal {}'.format(s))
self._signal_events[int(s)].set()
def runner():
try:
# We acquire the PID as late as possible, since its
# existence is used to verify whether the service
# is running.
self.pid_file.acquire()
self._debug('PID file has been acquired')
self._debug('Calling `run`')
self.run()
self._debug('`run` returned without exception')
except Exception as e:
self.logger.exception(e)
except SystemExit:
self._debug('`run` called `sys.exit`')
try:
self.pid_file.release()
self._debug('PID file has been released')
except Exception as e:
self.logger.exception(e)
os._exit(os.EX_OK) # FIXME: This seems redundant
try:
setproctitle.setproctitle(self.name)
self._debug('Process title has been set')
files_preserve = (self.files_preserve +
self._get_logger_file_handles())
signal_map = {s: on_signal for s in self._signal_events}
signal_map.update({
signal.SIGTTIN: None,
signal.SIGTTOU: None,
signal.SIGTSTP: None,
})
with DaemonContext(
detach_process=False,
signal_map=signal_map,
files_preserve=files_preserve):
self._debug('Daemon context has been established')
# Python's signal handling mechanism only forwards signals to
# the main thread and only when that thread is doing something
# (e.g. not when it's waiting for a lock, etc.). If we use the
# main thread for the ``run`` method this means that we cannot
# use the synchronization devices from ``threading`` for
# communicating the reception of SIGTERM to ``run``. Hence we
# use a separate thread for ``run`` and make sure that the
# main loop receives signals. See
# https://bugs.python.org/issue1167930
thread = threading.Thread(target=runner)
thread.start()
while thread.is_alive():
time.sleep(1)
except Exception as e:
self.logger.exception(e)
# We need to shutdown the daemon process at this point, because
# otherwise it will continue executing from after the original
# call to ``start``.
os._exit(os.EX_OK) |
<SYSTEM_TASK:>
Create a new environment
<END_TASK>
<USER_TASK:>
Description:
def create(opts):
"""Create a new environment
Usage:
datacats create [-bin] [--interactive] [-s NAME] [--address=IP] [--syslog]
[--ckan=CKAN_VERSION] [--no-datapusher] [--site-url SITE_URL]
[--no-init-db] ENVIRONMENT_DIR [PORT]
Options:
--address=IP Address to listen on (Linux-only)
--ckan=CKAN_VERSION Use CKAN version CKAN_VERSION [default: 2.4]
-b --bare Bare CKAN site with no example extension
-i --image-only Create the environment but don't start containers
--interactive Doesn't detach from the web container
--no-datapusher Don't install/enable ckanext-datapusher
--no-init-db Don't initialize the database. Useful for importing CKANs.
-n --no-sysadmin Don't prompt for an initial sysadmin user account
-s --site=NAME Pick a site to create [default: primary]
--site-url SITE_URL The site_url to use in API responses (e.g. http://example.org:{port}/)
--syslog Log to the syslog
ENVIRONMENT_DIR is a path for the new environment directory. The last
part of this path will be used as the environment name.
""" |
if opts['--address'] and is_boot2docker():
raise DatacatsError('Cannot specify address on boot2docker.')
return create_environment(
environment_dir=opts['ENVIRONMENT_DIR'],
port=opts['PORT'],
create_skin=not opts['--bare'],
start_web=not opts['--image-only'],
create_sysadmin=not opts['--no-sysadmin'],
site_name=opts['--site'],
ckan_version=opts['--ckan'],
address=opts['--address'],
log_syslog=opts['--syslog'],
datapusher=not opts['--no-datapusher'],
site_url=opts['--site-url'],
interactive=opts['--interactive'],
init_db=not opts['--no-init-db'],
) |
<SYSTEM_TASK:>
Resets a site to the default state. This will re-initialize the
<END_TASK>
<USER_TASK:>
Description:
def reset(environment, opts):
"""Resets a site to the default state. This will re-initialize the
database and recreate the administrator account.
Usage:
datacats reset [-iyn] [-s NAME] [ENVIRONMENT]
Options:
-i --interactive Don't detach from the web container
-s --site=NAME The site to reset [default: primary]
-y --yes Respond yes to all questions
-n --no-sysadmin Don't prompt for a sysadmin password""" |
# pylint: disable=unused-argument
if not opts['--yes']:
y_or_n_prompt('Reset will remove all data related to the '
'site {} and recreate the database'.format(opts['--site']))
print 'Resetting...'
environment.stop_supporting_containers()
environment.stop_ckan()
clean_pyc(environment)
# Save the port.
saved_port = environment.port
environment.purge_data([opts['--site']], never_delete=True)
init({
'ENVIRONMENT_DIR': opts['ENVIRONMENT'],
'--site': opts['--site'],
'PORT': saved_port,
'--syslog': None,
'--address': None,
'--image-only': False,
'--interactive': opts['--interactive'],
'--no-init-db': False,
'--no-sysadmin': opts['--no-sysadmin'],
'--site-url': None
}, no_install=True) |
<SYSTEM_TASK:>
Initialize a purged environment or copied environment directory
<END_TASK>
<USER_TASK:>
Description:
def init(opts, no_install=False, quiet=False):
"""Initialize a purged environment or copied environment directory
Usage:
datacats init [-in] [--syslog] [-s NAME] [--address=IP] [--interactive]
[--site-url SITE_URL] [ENVIRONMENT_DIR [PORT]] [--no-init-db]
Options:
--address=IP Address to listen on (Linux-only)
--interactive Don't detach from the web container
-i --image-only Create the environment but don't start containers
--no-init-db Don't initialize the database. Useful for importing other CKANs
-n --no-sysadmin Don't prompt for an initial sysadmin user account
-s --site=NAME Pick a site to initialize [default: primary]
--site-url SITE_URL The site_url to use in API responses (e.g. http://example.org:{port}/)
--syslog Log to the syslog
ENVIRONMENT_DIR is an existing datacats environment directory. Defaults to '.'
""" |
if opts['--address'] and is_boot2docker():
raise DatacatsError('Cannot specify address on boot2docker.')
environment_dir = opts['ENVIRONMENT_DIR']
port = opts['PORT']
address = opts['--address']
start_web = not opts['--image-only']
create_sysadmin = not opts['--no-sysadmin']
site_name = opts['--site']
site_url = opts['--site-url']
interactive = opts['--interactive']
init_db = not opts['--no-init-db']
environment_dir = abspath(environment_dir or '.')
log_syslog = opts['--syslog']
environment = Environment.load(environment_dir, site_name)
if address:
environment.address = address
if port:
environment.port = int(port)
if site_url:
environment.site_url = site_url
try:
if environment.sites and site_name in environment.sites:
raise DatacatsError('Site named {0} already exists.'
.format(site_name))
# There are a couple of steps we can/must skip if we're making a sub-site only
making_full_environment = not environment.data_exists()
if not quiet:
write('Creating environment {0}/{1} '
'from existing environment directory "{0}"'
.format(environment.name, environment.site_name))
steps = [
lambda: environment.create_directories(create_project_dir=False)] + ([
environment.save,
environment.create_virtualenv
] if making_full_environment else []) + [
environment.save_site,
environment.start_supporting_containers,
environment.fix_storage_permissions,
]
for fn in steps:
fn()
if not quiet:
write('.')
if not quiet:
write('\n')
except:
if not quiet:
print
raise
return finish_init(environment, start_web, create_sysadmin,
log_syslog=log_syslog, do_install=not no_install,
quiet=quiet, site_url=site_url, interactive=interactive,
init_db=init_db) |
<SYSTEM_TASK:>
Save profile settings into user profile directory
<END_TASK>
<USER_TASK:>
Description:
def save(self):
"""
Save profile settings into user profile directory
""" |
config = self.profiledir + '/config'
if not isdir(self.profiledir):
makedirs(self.profiledir)
cp = SafeConfigParser()
cp.add_section('ssh')
cp.set('ssh', 'private_key', self.ssh_private_key)
cp.set('ssh', 'public_key', self.ssh_public_key)
with open(config, 'w') as cfile:
cp.write(cfile) |
<SYSTEM_TASK:>
Generate a new ssh private and public key
<END_TASK>
<USER_TASK:>
Description:
def generate_ssh_key(self):
"""
Generate a new ssh private and public key
""" |
web_command(
command=["ssh-keygen", "-q", "-t", "rsa", "-N", "", "-C",
"datacats generated {0}@{1}".format(
getuser(), gethostname()),
"-f", "/output/id_rsa"],
rw={self.profiledir: '/output'},
) |
<SYSTEM_TASK:>
Sends "create project" command to the remote server
<END_TASK>
<USER_TASK:>
Description:
def create(self, environment, target_name):
"""
Sends "create project" command to the remote server
""" |
remote_server_command(
["ssh", environment.deploy_target, "create", target_name],
environment, self,
clean_up=True,
) |
<SYSTEM_TASK:>
Return True if password was set successfully
<END_TASK>
<USER_TASK:>
Description:
def admin_password(self, environment, target_name, password):
"""
Return True if password was set successfully
""" |
try:
remote_server_command(
["ssh", environment.deploy_target,
"admin_password", target_name, password],
environment, self,
clean_up=True
)
return True
except WebCommandError:
return False |
<SYSTEM_TASK:>
Return True if deployment was successful
<END_TASK>
<USER_TASK:>
Description:
def deploy(self, environment, target_name, stream_output=None):
"""
Return True if deployment was successful
""" |
try:
remote_server_command(
[
"rsync", "-lrv", "--safe-links", "--munge-links",
"--delete", "--inplace", "--chmod=ugo=rwX",
"--exclude=.datacats-environment",
"--exclude=.git",
"/project/.",
environment.deploy_target + ':' + target_name
],
environment, self,
include_project_dir=True,
stream_output=stream_output,
clean_up=True,
)
except WebCommandError as e:
raise DatacatsError(
"Unable to deploy `{0}` to remote server for some reason:\n"
" datacats was not able to copy data to the remote server"
.format((target_name,)),
parent_exception=e
)
try:
remote_server_command(
[
"ssh", environment.deploy_target, "install", target_name,
],
environment, self,
clean_up=True,
)
return True
except WebCommandError as e:
raise DatacatsError(
"Unable to deploy `{0}` to remote server for some reason:\n"
"datacats copied data to the server but failed to register\n"
"(or `install`) the new catalog"
.format((target_name,)),
parent_exception=e
) |
<SYSTEM_TASK:>
Compute the number of mini-batches required to cover a data set of
<END_TASK>
<USER_TASK:>
Description:
def num_batches(n, batch_size):
"""Compute the number of mini-batches required to cover a data set of
size `n` using batches of size `batch_size`.
Parameters
----------
n: int
the number of samples in the data set
batch_size: int
the mini-batch size
Returns
-------
int: the number of batches required
""" |
b = n // batch_size
if n % batch_size > 0:
b += 1
return b |
<SYSTEM_TASK:>
Get the number of indices that would be generated by this
<END_TASK>
<USER_TASK:>
Description:
def num_indices_generated(self):
"""
Get the number of indices that would be generated by this
sampler.
Returns
-------
int, `np.inf` or `None`.
An int if the number of samples is known, `np.inf` if it is
infinite or `None` if the number of samples is unknown.
""" |
if self.repeats == -1:
return np.inf
else:
return self.length * self.repeats |
<SYSTEM_TASK:>
Create an iterator that generates in-order mini-batches of sample
<END_TASK>
<USER_TASK:>
Description:
def in_order_indices_batch_iterator(self, batch_size):
"""
Create an iterator that generates in-order mini-batches of sample
indices. The batches will have `batch_size` elements, with the
exception of the final batch which will have less if there are not
enough samples left to fill it.
The generated mini-batches indices take the form of 1D NumPy integer
arrays.
Parameters
----------
batch_size: int
Mini-batch size
Returns
-------
iterator
An iterator that generates mini-batches in the form of 1D NumPy
integer arrays.
""" |
if self.repeats == 1:
for i in range(0, self.length, batch_size):
yield np.arange(i, min(i + batch_size, self.length))
else:
repeats = self.repeats
i = 0
while True:
j = i + batch_size
if j <= self.length:
# Within size of data
yield np.arange(i, j)
i = j
elif j <= self.length * 2:
# One restart is required
# Reduce the number of remaining repeats
if repeats != -1:
repeats -= 1
if repeats == 0:
# Finished; emit remaining elements
if i < self.length:
yield np.arange(i, self.length)
break
# Wrap over
# Compute number of elements required to make up
# the batch
k = batch_size - (self.length - i)
yield np.append(np.arange(i, self.length),
np.arange(0, k), axis=0)
i = k
else:
# Multiple restarts required to fill the batch
batch_ndx = np.arange(0)
# i = 0
while len(batch_ndx) < batch_size:
# Wrap over
k = min(batch_size - len(batch_ndx), self.length - i)
batch_ndx = np.append(
batch_ndx, np.arange(i, i + k), axis=0)
i += k
if i >= self.length:
i -= self.length
# Reduce the number of remaining repeats
if repeats != -1:
repeats -= 1
if repeats == 0:
break
if len(batch_ndx) > 0:
yield batch_ndx
if repeats == 0:
break |
<SYSTEM_TASK:>
Create an iterator that generates randomly shuffled mini-batches of
<END_TASK>
<USER_TASK:>
Description:
def shuffled_indices_batch_iterator(self, batch_size, shuffle_rng):
"""
Create an iterator that generates randomly shuffled mini-batches of
sample indices. The batches will have `batch_size` elements, with the
exception of the final batch which will have less if there are not
enough samples left to fill it.
The generated mini-batches indices take the form of 1D NumPy integer
arrays.
Parameters
----------
batch_size: int
Mini-batch size
shuffle_rng: a `numpy.random.RandomState` that will be used to
randomise element order.
Returns
-------
iterator
An iterator that generates mini-batches in the form of 1D NumPy
integer arrays.
""" |
if self.repeats == 1:
indices = shuffle_rng.permutation(self.length)
for i in range(0, self.length, batch_size):
yield indices[i:i + batch_size]
else:
repeats = self.repeats
indices = shuffle_rng.permutation(self.length)
i = 0
while True:
j = i + batch_size
if j <= self.length:
# Within size of data
yield indices[i:j]
i = j
else:
# Multiple restarts required to fill the batch
batch_ndx = np.arange(0)
while len(batch_ndx) < batch_size:
# Wrap over
k = min(batch_size - len(batch_ndx), self.length - i)
batch_ndx = np.append(
batch_ndx, indices[i:i + k], axis=0)
i += k
if i >= self.length:
# Loop over; new permutation
indices = shuffle_rng.permutation(self.length)
i -= self.length
# Reduce the number of remaining repeats
if repeats != -1:
repeats -= 1
if repeats == 0:
break
if len(batch_ndx) > 0:
yield batch_ndx
if repeats == 0:
break |
<SYSTEM_TASK:>
Compute sample weight given an array of sample classes. The weights
<END_TASK>
<USER_TASK:>
Description:
def class_balancing_sample_weights(y):
"""
Compute sample weight given an array of sample classes. The weights
are assigned on a per-class basis and the per-class weights are
inversely proportional to their frequency.
Parameters
----------
y: NumPy array, 1D dtype=int
sample classes, values must be 0 or positive
Returns
-------
NumPy array, 1D dtype=float
per sample weight array
""" |
h = np.bincount(y)
cls_weight = 1.0 / (h.astype(float) * len(np.nonzero(h)[0]))
cls_weight[np.isnan(cls_weight)] = 0.0
sample_weight = cls_weight[y]
return sample_weight |
<SYSTEM_TASK:>
Construct a `WeightedSubsetSampler` that compensates for class
<END_TASK>
<USER_TASK:>
Description:
def class_balancing_sampler(y, indices):
"""
Construct a `WeightedSubsetSampler` that compensates for class
imbalance.
Parameters
----------
y: NumPy array, 1D dtype=int
sample classes, values must be 0 or positive
indices: NumPy array, 1D dtype=int
An array of indices that identify the subset of samples drawn
from data that are to be used
Returns
-------
WeightedSubsetSampler instance
Sampler
""" |
weights = WeightedSampler.class_balancing_sample_weights(y[indices])
return WeightedSubsetSampler(weights, indices=indices) |
<SYSTEM_TASK:>
Get the path of the given file within the batchup data directory
<END_TASK>
<USER_TASK:>
Description:
def get_data_path(filename):
"""
Get the path of the given file within the batchup data directory
Parameters
----------
filename: str
The filename to locate within the batchup data directory
Returns
-------
str
The full path of the file
""" |
if os.path.isabs(filename):
return filename
else:
return os.path.join(get_data_dir(), filename) |
<SYSTEM_TASK:>
Download a file to a given path from a given URL, if it does not exist.
<END_TASK>
<USER_TASK:>
Description:
def download(path, source_url):
"""
Download a file to a given path from a given URL, if it does not exist.
Parameters
----------
path: str
The (destination) path of the file on the local filesystem
source_url: str
The URL from which to download the file
Returns
-------
str
The path of the file
""" |
dir_path = os.path.dirname(path)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
if not os.path.exists(path):
print('Downloading {} to {}'.format(source_url, path))
filename = source_url.split('/')[-1]
def _progress(count, block_size, total_size):
sys.stdout.write('\rDownloading {} {:.2%}'.format(
filename, float(count * block_size) / float(total_size)))
sys.stdout.flush()
try:
urlretrieve(source_url, path, reporthook=_progress)
except:
sys.stdout.write('\r')
# Exception; remove any partially downloaded file and re-raise
if os.path.exists(path):
os.remove(path)
raise
sys.stdout.write('\r')
return path |
<SYSTEM_TASK:>
Compute the SHA-256 hash of the file at the given path
<END_TASK>
<USER_TASK:>
Description:
def compute_sha256(path):
"""
Compute the SHA-256 hash of the file at the given path
Parameters
----------
path: str
The path of the file
Returns
-------
str
The SHA-256 HEX digest
""" |
hasher = hashlib.sha256()
with open(path, 'rb') as f:
# 10MB chunks
for chunk in iter(lambda: f.read(10 * 1024 * 1024), b''):
hasher.update(chunk)
return hasher.hexdigest() |
<SYSTEM_TASK:>
Verify the integrity of a file by checking its SHA-256 hash.
<END_TASK>
<USER_TASK:>
Description:
def verify_file(path, sha256):
"""
Verify the integrity of a file by checking its SHA-256 hash.
If no digest is supplied, the digest is printed to the console.
Closely follows the code in `torchvision.datasets.utils.check_integrity`
Parameters
----------
path: str
The path of the file to check
sha256: str
The expected SHA-256 hex digest of the file, or `None` to print the
digest of the file to the console
Returns
-------
bool
Indicates if the file passes the integrity check or not
""" |
if not os.path.isfile(path):
return False
digest = compute_sha256(path)
if sha256 is None:
# No digest supplied; report it to the console so a develop can fill
# it in
print('SHA-256 of {}:'.format(path))
print(' "{}"'.format(digest))
else:
if digest != sha256:
return False
return True |
<SYSTEM_TASK:>
Download a file to a given path from a given URL, if it does not exist.
<END_TASK>
<USER_TASK:>
Description:
def download_and_verify(path, source_url, sha256):
"""
Download a file to a given path from a given URL, if it does not exist.
After downloading it, verify it integrity by checking the SHA-256 hash.
Parameters
----------
path: str
The (destination) path of the file on the local filesystem
source_url: str
The URL from which to download the file
sha256: str
The expected SHA-256 hex digest of the file, or `None` to print the
digest of the file to the console
Returns
-------
str or None
The path of the file if successfully downloaded otherwise `None`
""" |
if os.path.exists(path):
# Already exists?
# Nothing to do, except print the SHA-256 if necessary
if sha256 is None:
print('The SHA-256 of {} is "{}"'.format(
path, compute_sha256(path)))
return path
# Compute the path of the unverified file
unverified_path = path + '.unverified'
for i in range(_MAX_DOWNLOAD_TRIES):
# Download it
try:
unverified_path = download(unverified_path, source_url)
except Exception as e:
# Report failure
print(
'Download of {} unsuccessful; error {}; '
'deleting and re-trying...'.format(source_url, e))
# Delete so that we can retry
if os.path.exists(unverified_path):
os.remove(unverified_path)
else:
if os.path.exists(unverified_path):
# Got something...
if verify_file(unverified_path, sha256):
# Success: rename the unverified file to the destination
# filename
os.rename(unverified_path, path)
return path
else:
# Report failure
print(
'Download of {} unsuccessful; verification failed; '
'deleting and re-trying...'.format(source_url))
# Delete so that we can retry
os.remove(unverified_path)
print('Did not succeed in downloading {} (tried {} times)'.format(
source_url, _MAX_DOWNLOAD_TRIES
))
return None |
<SYSTEM_TASK:>
Copy a file to a given path from a given path, if it does not exist.
<END_TASK>
<USER_TASK:>
Description:
def copy_and_verify(path, source_path, sha256):
"""
Copy a file to a given path from a given path, if it does not exist.
After copying it, verify it integrity by checking the SHA-256 hash.
Parameters
----------
path: str
The (destination) path of the file on the local filesystem
source_path: str
The path from which to copy the file
sha256: str
The expected SHA-256 hex digest of the file, or `None` to print the
digest of the file to the console
Returns
-------
str or None
The path of the file if successfully downloaded otherwise `None`
""" |
if os.path.exists(path):
# Already exists?
# Nothing to do, except print the SHA-256 if necessary
if sha256 is None:
print('The SHA-256 of {} is "{}"'.format(
path, compute_sha256(path)))
return path
if not os.path.exists(source_path):
return None
# Compute the path of the unverified file
unverified_path = path + '.unverified'
# Copy it
dir_path = os.path.dirname(path)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
shutil.copy(source_path, unverified_path)
if os.path.exists(unverified_path):
# Got something...
if verify_file(unverified_path, sha256):
# Success: rename the unverified file to the destination
# filename
os.rename(unverified_path, path)
return path
else:
# Report failure
print('SHA verification of file {} failed'.format(source_path))
# Delete
os.remove(unverified_path)
return None |
<SYSTEM_TASK:>
Run a command or interactive shell within this environment
<END_TASK>
<USER_TASK:>
Description:
def shell(environment, opts):
"""Run a command or interactive shell within this environment
Usage:
datacats [-d] [-s NAME] shell [ENVIRONMENT [COMMAND...]]
Options:
-d --detach Run the resulting container in the background
-s --site=NAME Specify a site to run the shell on [default: primary]
ENVIRONMENT may be an environment name or a path to an environment directory.
Default: '.'
""" |
environment.require_data()
environment.start_supporting_containers()
return environment.interactive_shell(
opts['COMMAND'],
detach=opts['--detach']
) |
<SYSTEM_TASK:>
Run a paster command from the current directory
<END_TASK>
<USER_TASK:>
Description:
def paster(opts):
"""Run a paster command from the current directory
Usage:
datacats paster [-d] [-s NAME] [COMMAND...]
Options:
-s --site=NAME Specify a site to run this paster command on [default: primary]
-d --detach Run the resulting container in the background
You must be inside a datacats environment to run this. The paster command will
run within your current directory inside the environment. You don't need to
specify the --plugin option. The --config option also need not be specified.
""" |
environment = Environment.load('.')
environment.require_data()
environment.start_supporting_containers()
if not opts['COMMAND']:
opts['COMMAND'] = ['--', 'help']
assert opts['COMMAND'][0] == '--'
return environment.interactive_shell(
opts['COMMAND'][1:],
paster=True,
detach=opts['--detach']
) |
<SYSTEM_TASK:>
Add a site's configuration to the source dir and site dir
<END_TASK>
<USER_TASK:>
Description:
def save_new_site(site_name, sitedir, srcdir, port, address, site_url,
passwords):
"""
Add a site's configuration to the source dir and site dir
""" |
cp = ConfigParser.SafeConfigParser()
cp.read([srcdir + '/.datacats-environment'])
section_name = 'site_' + site_name
if not cp.has_section(section_name):
cp.add_section(section_name)
cp.set(section_name, 'port', str(port))
if address:
cp.set(section_name, 'address', address)
if site_url:
cp.set(section_name, 'site_url', site_url)
with open(srcdir + '/.datacats-environment', 'w') as config:
cp.write(config)
# save passwords to datadir
cp = ConfigParser.SafeConfigParser()
cp.add_section('passwords')
for n in sorted(passwords):
cp.set('passwords', n.lower(), passwords[n])
# Write to the sitedir so we maintain separate passwords.
with open(sitedir + '/passwords.ini', 'w') as config:
cp.write(config) |
<SYSTEM_TASK:>
Save an environment's configuration to the source dir and data dir
<END_TASK>
<USER_TASK:>
Description:
def save_new_environment(name, datadir, srcdir, ckan_version,
deploy_target=None, always_prod=False):
"""
Save an environment's configuration to the source dir and data dir
""" |
with open(datadir + '/.version', 'w') as f:
f.write('2')
cp = ConfigParser.SafeConfigParser()
cp.read(srcdir + '/.datacats-environment')
if not cp.has_section('datacats'):
cp.add_section('datacats')
cp.set('datacats', 'name', name)
cp.set('datacats', 'ckan_version', ckan_version)
if deploy_target:
if not cp.has_section('deploy'):
cp.add_section('deploy')
cp.set('deploy', 'target', deploy_target)
if always_prod:
cp.set('datacats', 'always_prod', 'true')
with open(srcdir + '/.datacats-environment', 'w') as config:
cp.write(config)
save_srcdir_location(datadir, srcdir) |
<SYSTEM_TASK:>
Load configuration values for an environment
<END_TASK>
<USER_TASK:>
Description:
def load_environment(srcdir, datadir=None, allow_old=False):
"""
Load configuration values for an environment
:param srcdir: environment source directory
:param datadir: environment data direcory, if None will be discovered
from srcdir
:param allow_old: Don't throw an exception if this is an old site
This is only valid for sites that you are purging.
if datadir is None it will be discovered from srcdir
Returns (datadir, name, ckan_version, always_prod, deploy_target,
remote_server_key)
""" |
cp = ConfigParser.SafeConfigParser()
try:
cp.read([srcdir + '/.datacats-environment'])
except ConfigParser.Error:
raise DatacatsError('Error reading environment information')
name = cp.get('datacats', 'name')
if datadir:
# update the link in case user moved their srcdir
save_srcdir_location(datadir, srcdir)
else:
datadir = path.expanduser('~/.datacats/' + name)
# FIXME: check if datadir is sane, project-dir points back to srcdir
if migrate.needs_format_conversion(datadir) and not allow_old:
raise DatacatsError('This environment uses an old format. You must'
' migrate to the new format. To do so, use the'
' "datacats migrate" command.')
if migrate.is_locked(datadir):
raise DatacatsError('Migration in progress, cannot continue.\n'
'If you interrupted a migration, you should'
' attempt manual recovery or contact us by'
' filing an issue at http://github.com/datacats/'
'datacats.\nAs a last resort, you could delete'
' all your stored data and create a new environment'
' by running "datacats purge" followed by'
' "datacats init".')
# FIXME: consider doing data_complete check here
ckan_version = cp.get('datacats', 'ckan_version')
try:
always_prod = cp.getboolean('datacats', 'always_prod')
except ConfigParser.NoOptionError:
always_prod = False
try:
extra_containers = cp.get('datacats', 'extra_containers').split(' ')
except ConfigParser.NoOptionError:
extra_containers = ()
# if remote_server's custom ssh connection
# address is defined,
# we overwrite the default datacats.com one
try:
deploy_target = cp.get('deploy', 'remote_server_user') \
+ "@" + cp.get('deploy', 'remote_server')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
deploy_target = DEFAULT_REMOTE_SERVER_TARGET
# if remote_server's ssh public key is given,
# we overwrite the default datacats.com one
try:
remote_server_key = cp.get('deploy', 'remote_server_key')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
remote_server_key = None
return (datadir, name, ckan_version, always_prod, deploy_target,
remote_server_key, extra_containers) |
<SYSTEM_TASK:>
Return True if the directories and containers we're expecting
<END_TASK>
<USER_TASK:>
Description:
def data_complete(datadir, sitedir, get_container_name):
"""
Return True if the directories and containers we're expecting
are present in datadir, sitedir and containers
""" |
if any(not path.isdir(sitedir + x)
for x in ('/files', '/run', '/solr')):
return False
if docker.is_boot2docker():
# Inspect returns None if the container doesn't exist.
return all(docker.inspect_container(get_container_name(x))
for x in ('pgdata', 'venv'))
return path.isdir(datadir + '/venv') and path.isdir(sitedir + '/postgres') |
<SYSTEM_TASK:>
Create expected directories in datadir, sitedir
<END_TASK>
<USER_TASK:>
Description:
def create_directories(datadir, sitedir, srcdir=None):
"""
Create expected directories in datadir, sitedir
and optionally srcdir
""" |
# It's possible that the datadir already exists
# (we're making a secondary site)
if not path.isdir(datadir):
os.makedirs(datadir, mode=0o700)
try:
# This should take care if the 'site' subdir if needed
os.makedirs(sitedir, mode=0o700)
except OSError:
raise DatacatsError("Site already exists.")
# venv isn't site-specific, the rest are.
if not docker.is_boot2docker():
if not path.isdir(datadir + '/venv'):
os.makedirs(datadir + '/venv')
os.makedirs(sitedir + '/postgres')
os.makedirs(sitedir + '/solr')
os.makedirs(sitedir + '/files')
os.makedirs(sitedir + '/run')
if srcdir:
os.makedirs(srcdir) |
<SYSTEM_TASK:>
Stop postgres and solr containers, along with any specified extra containers
<END_TASK>
<USER_TASK:>
Description:
def stop_supporting_containers(get_container_name, extra_containers):
"""
Stop postgres and solr containers, along with any specified extra containers
""" |
docker.remove_container(get_container_name('postgres'))
docker.remove_container(get_container_name('solr'))
for container in extra_containers:
docker.remove_container(get_container_name(container)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.