_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
---|---|---|---|---|---|
q276300
|
ConnectorDB.reset_apikey
|
test
|
def reset_apikey(self):
"""invalidates the device's current api key, and generates a new one. Resets current auth to use the new apikey,
since the change would have future queries fail if they use the old api key."""
apikey = Device.reset_apikey(self)
self.db.setauth(apikey)
return apikey
|
python
|
{
"resource": ""
}
|
q276301
|
ConnectorDB.users
|
test
|
def users(self):
"""Returns the list of users in the database"""
result = self.db.read("", {"q": "ls"})
if result is None or result.json() is None:
return []
users = []
for u in result.json():
usr = self(u["name"])
usr.metadata = u
users.append(usr)
return users
|
python
|
{
"resource": ""
}
|
q276302
|
run_bwa_index
|
test
|
def run_bwa_index(job, ref_id):
"""
Use BWA to create reference index files
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str ref_id: FileStoreID for the reference genome
:return: FileStoreIDs for BWA index files
:rtype: tuple(str, str, str, str, str)
"""
job.fileStore.logToMaster('Created BWA index files')
work_dir = job.fileStore.getLocalTempDir()
job.fileStore.readGlobalFile(ref_id, os.path.join(work_dir, 'ref.fa'))
command = ['index', '/data/ref.fa']
dockerCall(job=job, workDir=work_dir, parameters=command,
tool='quay.io/ucsc_cgl/bwa:0.7.12--256539928ea162949d8a65ca5c79a72ef557ce7c')
ids = {}
for output in ['ref.fa.amb', 'ref.fa.ann', 'ref.fa.bwt', 'ref.fa.pac', 'ref.fa.sa']:
ids[output.split('.')[-1]] = (job.fileStore.writeGlobalFile(os.path.join(work_dir, output)))
return ids['amb'], ids['ann'], ids['bwt'], ids['pac'], ids['sa']
|
python
|
{
"resource": ""
}
|
q276303
|
Logger.connectordb
|
test
|
def connectordb(self):
"""Returns the ConnectorDB object that the logger uses. Raises an error if Logger isn't able to connect"""
if self.__cdb is None:
logging.debug("Logger: Connecting to " + self.serverurl)
self.__cdb = ConnectorDB(self.apikey, url=self.serverurl)
return self.__cdb
|
python
|
{
"resource": ""
}
|
q276304
|
Logger.addStream
|
test
|
def addStream(self, streamname, schema=None, **kwargs):
"""Adds the given stream to the logger. Requires an active connection to the ConnectorDB database.
If a schema is not specified, loads the stream from the database. If a schema is specified, and the stream
does not exist, creates the stream. You can also add stream properties such as description or nickname to be added
during creation."""
stream = self.connectordb[streamname]
if not stream.exists():
if schema is not None:
stream.create(schema, **kwargs)
else:
raise Exception(
"The stream '%s' was not found" % (streamname, ))
self.addStream_force(streamname, stream.schema)
|
python
|
{
"resource": ""
}
|
q276305
|
Logger.addStream_force
|
test
|
def addStream_force(self, streamname, schema=None):
"""This function adds the given stream to the logger, but does not check with a ConnectorDB database
to make sure that the stream exists. Use at your own risk."""
c = self.database.cursor()
c.execute("INSERT OR REPLACE INTO streams VALUES (?,?);",
(streamname, json.dumps(schema)))
self.streams[streamname] = schema
|
python
|
{
"resource": ""
}
|
q276306
|
Logger.insert
|
test
|
def insert(self, streamname, value):
"""Insert the datapoint into the logger for the given stream name. The logger caches the datapoint
and eventually synchronizes it with ConnectorDB"""
if streamname not in self.streams:
raise Exception("The stream '%s' was not found" % (streamname, ))
# Validate the schema
validate(value, self.streams[streamname])
# Insert the datapoint - it fits the schema
value = json.dumps(value)
logging.debug("Logger: %s <= %s" % (streamname, value))
c = self.database.cursor()
c.execute("INSERT INTO cache VALUES (?,?,?);",
(streamname, time.time(), value))
|
python
|
{
"resource": ""
}
|
q276307
|
Logger.sync
|
test
|
def sync(self):
"""Attempt to sync with the ConnectorDB server"""
logging.debug("Logger: Syncing...")
failed = False
try:
# Get the connectordb object
cdb = self.connectordb
# Ping the database - most connection errors will happen here
cdb.ping()
with self.synclock:
c = self.database.cursor()
for stream in self.streams:
s = cdb[stream]
c.execute(
"SELECT * FROM cache WHERE stream=? ORDER BY timestamp ASC;",
(stream, ))
datapointArray = []
for dp in c.fetchall():
datapointArray.append(
{"t": dp[1],
"d": json.loads(dp[2])})
# First, check if the data already inserted has newer timestamps,
# and in that case, assume that there was an error, and remove the datapoints
# with an older timestamp, so that we don't have an error when syncing
if len(s) > 0:
newtime = s[-1]["t"]
while (len(datapointArray) > 0 and datapointArray[0]["t"] < newtime):
logging.debug("Datapoint exists with older timestamp. Removing the datapoint.")
datapointArray = datapointArray[1:]
if len(datapointArray) > 0:
logging.debug("%s: syncing %i datapoints" %
(stream, len(datapointArray)))
while (len(datapointArray) > DATAPOINT_INSERT_LIMIT):
# We insert datapoints in chunks of a couple
# thousand so that they fit in the insert size
# limit of ConnectorDB
s.insert_array(
datapointArray[:DATAPOINT_INSERT_LIMIT])
# Clear the written datapoints
datapointArray = datapointArray[
DATAPOINT_INSERT_LIMIT:]
# If there was no error inserting, delete the
# datapoints from the cache
c.execute(
"DELETE FROM cache WHERE stream=? AND timestamp <?",
(stream, datapointArray[0]["t"]))
s.insert_array(datapointArray)
# If there was no error inserting, delete the
# datapoints from the cache
c.execute(
"DELETE FROM cache WHERE stream=? AND timestamp <=?",
(stream, datapointArray[-1]["t"]))
self.lastsynctime = time.time()
if self.onsync is not None:
self.onsync()
except Exception as e:
# Handle the sync failure callback
falied = True
reraise = self.syncraise
if self.onsyncfail is not None:
reraise = self.onsyncfail(e)
if reraise:
raise
|
python
|
{
"resource": ""
}
|
q276308
|
Logger.start
|
test
|
def start(self):
"""Start the logger background synchronization service. This allows you to not need to
worry about syncing with ConnectorDB - you just insert into the Logger, and the Logger
will by synced every syncperiod."""
with self.synclock:
if self.syncthread is not None:
logging.warn(
"Logger: Start called on a syncer that is already running")
return
self.sync() # Attempt a sync right away
self.__setsync()
|
python
|
{
"resource": ""
}
|
q276309
|
Logger.stop
|
test
|
def stop(self):
"""Stops the background synchronization thread"""
with self.synclock:
if self.syncthread is not None:
self.syncthread.cancel()
self.syncthread = None
|
python
|
{
"resource": ""
}
|
q276310
|
download_url_job
|
test
|
def download_url_job(job, url, name=None, s3_key_path=None, cghub_key_path=None):
"""Job version of `download_url`"""
work_dir = job.fileStore.getLocalTempDir()
fpath = download_url(job=job, url=url, work_dir=work_dir, name=name,
s3_key_path=s3_key_path, cghub_key_path=cghub_key_path)
return job.fileStore.writeGlobalFile(fpath)
|
python
|
{
"resource": ""
}
|
q276311
|
s3am_upload_job
|
test
|
def s3am_upload_job(job, file_id, file_name, s3_dir, s3_key_path=None):
"""Job version of s3am_upload"""
work_dir = job.fileStore.getLocalTempDir()
fpath = job.fileStore.readGlobalFile(file_id, os.path.join(work_dir, file_name))
s3am_upload(job=job, fpath=fpath, s3_dir=s3_dir, num_cores=job.cores, s3_key_path=s3_key_path)
|
python
|
{
"resource": ""
}
|
q276312
|
labels
|
test
|
def labels(ontology, output, ols_base):
"""Output the names to the given file"""
for label in get_labels(ontology=ontology, ols_base=ols_base):
click.echo(label, file=output)
|
python
|
{
"resource": ""
}
|
q276313
|
tree
|
test
|
def tree(ontology, output, ols_base):
"""Output the parent-child relations to the given file"""
for parent, child in get_hierarchy(ontology=ontology, ols_base=ols_base):
click.echo('{}\t{}'.format(parent, child), file=output)
|
python
|
{
"resource": ""
}
|
q276314
|
get_mean_insert_size
|
test
|
def get_mean_insert_size(work_dir, bam_name):
"""Function taken from MC3 Pipeline"""
cmd = "docker run --log-driver=none --rm -v {}:/data quay.io/ucsc_cgl/samtools " \
"view -f66 {}".format(work_dir, os.path.join(work_dir, bam_name))
process = subprocess.Popen(args=cmd, shell=True, stdout=subprocess.PIPE)
b_sum = 0.0
b_count = 0.0
while True:
line = process.stdout.readline()
if not line:
break
tmp = line.split("\t")
if abs(long(tmp[8])) < 10000:
b_sum += abs(long(tmp[8]))
b_count += 1
process.wait()
try:
mean = b_sum / b_count
except ZeroDivisionError:
mean = 150
print "Using insert size: %d" % mean
return int(mean)
|
python
|
{
"resource": ""
}
|
q276315
|
current_docker_container_id
|
test
|
def current_docker_container_id():
"""
Returns a string that represents the container ID of the current Docker container. If this
function is invoked outside of a container a NotInsideContainerError is raised.
>>> import subprocess
>>> import sys
>>> a = subprocess.check_output(['docker', 'run', '-v',
... sys.modules[__name__].__file__ + ':/foo.py',
... 'python:2.7.12','python', '-c',
... 'from foo import current_docker_container_id;\\
... print current_docker_container_id()'])
int call will fail if a is not a valid hex string
>>> int(a, 16) > 0
True
"""
try:
with open('/proc/1/cgroup', 'r') as readable:
raw = readable.read()
ids = set(re.compile('[0-9a-f]{12,}').findall(raw))
assert len(ids) == 1
return ids.pop()
except:
logging.exception('Failed to obtain current container ID')
raise NotInsideContainerError()
|
python
|
{
"resource": ""
}
|
q276316
|
run_star
|
test
|
def run_star(job, r1_id, r2_id, star_index_url, wiggle=False, sort=True):
"""
Performs alignment of fastqs to bam via STAR
--limitBAMsortRAM step added to deal with memory explosion when sorting certain samples.
The value was chosen to complement the recommended amount of memory to have when running STAR (60G)
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str r1_id: FileStoreID of fastq (pair 1)
:param str r2_id: FileStoreID of fastq (pair 2 if applicable, else pass None)
:param str star_index_url: STAR index tarball
:param bool wiggle: If True, will output a wiggle file and return it
:return: FileStoreID from RSEM
:rtype: str
"""
work_dir = job.fileStore.getLocalTempDir()
download_url(job, url=star_index_url, name='starIndex.tar.gz', work_dir=work_dir)
subprocess.check_call(['tar', '-xvf', os.path.join(work_dir, 'starIndex.tar.gz'), '-C', work_dir])
os.remove(os.path.join(work_dir, 'starIndex.tar.gz'))
# Determine tarball structure - star index contains are either in a subdir or in the tarball itself
star_index = os.path.join('/data', os.listdir(work_dir)[0]) if len(os.listdir(work_dir)) == 1 else '/data'
# Parameter handling for paired / single-end data
parameters = ['--runThreadN', str(job.cores),
'--genomeDir', star_index,
'--outFileNamePrefix', 'rna',
'--outSAMunmapped', 'Within',
'--quantMode', 'TranscriptomeSAM',
'--outSAMattributes', 'NH', 'HI', 'AS', 'NM', 'MD',
'--outFilterType', 'BySJout',
'--outFilterMultimapNmax', '20',
'--outFilterMismatchNmax', '999',
'--outFilterMismatchNoverReadLmax', '0.04',
'--alignIntronMin', '20',
'--alignIntronMax', '1000000',
'--alignMatesGapMax', '1000000',
'--alignSJoverhangMin', '8',
'--alignSJDBoverhangMin', '1',
'--sjdbScore', '1',
'--limitBAMsortRAM', '49268954168']
# Modify paramaters based on function arguments
if sort:
parameters.extend(['--outSAMtype', 'BAM', 'SortedByCoordinate'])
aligned_bam = 'rnaAligned.sortedByCoord.out.bam'
else:
parameters.extend(['--outSAMtype', 'BAM', 'Unsorted'])
aligned_bam = 'rnaAligned.out.bam'
if wiggle:
parameters.extend(['--outWigType', 'bedGraph',
'--outWigStrand', 'Unstranded',
'--outWigReferencesPrefix', 'chr'])
if r1_id and r2_id:
job.fileStore.readGlobalFile(r1_id, os.path.join(work_dir, 'R1.fastq'))
job.fileStore.readGlobalFile(r2_id, os.path.join(work_dir, 'R2.fastq'))
parameters.extend(['--readFilesIn', '/data/R1.fastq', '/data/R2.fastq'])
else:
job.fileStore.readGlobalFile(r1_id, os.path.join(work_dir, 'R1.fastq'))
parameters.extend(['--readFilesIn', '/data/R1.fastq'])
# Call: STAR Mapping
dockerCall(job=job, tool='quay.io/ucsc_cgl/star:2.4.2a--bcbd5122b69ff6ac4ef61958e47bde94001cfe80',
workDir=work_dir, parameters=parameters)
# Check output bam isnt size zero if sorted
aligned_bam_path = os.path.join(work_dir, aligned_bam)
if sort:
assert(os.stat(aligned_bam_path).st_size > 0, 'Aligned bam failed to sort. Ensure sufficient memory is free.')
# Write to fileStore
aligned_id = job.fileStore.writeGlobalFile(aligned_bam_path)
transcriptome_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'rnaAligned.toTranscriptome.out.bam'))
log_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'rnaLog.final.out'))
sj_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'rnaSJ.out.tab'))
if wiggle:
wiggle_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'rnaSignal.UniqueMultiple.str1.out.bg'))
return transcriptome_id, aligned_id, wiggle_id, log_id, sj_id
else:
return transcriptome_id, aligned_id, log_id, sj_id
|
python
|
{
"resource": ""
}
|
q276317
|
Stream.create
|
test
|
def create(self, schema="{}", **kwargs):
"""Creates a stream given an optional JSON schema encoded as a python dict. You can also add other properties
of the stream, such as the icon, datatype or description. Create accepts both a string schema and
a dict-encoded schema."""
if isinstance(schema, basestring):
strschema = schema
schema = json.loads(schema)
else:
strschema = json.dumps(schema)
Draft4Validator.check_schema(schema)
kwargs["schema"] = strschema
self.metadata = self.db.create(self.path, kwargs).json()
|
python
|
{
"resource": ""
}
|
q276318
|
Stream.export
|
test
|
def export(self, directory):
"""Exports the stream to the given directory. The directory can't exist.
You can later import this device by running import_stream on a device.
"""
if os.path.exists(directory):
raise FileExistsError(
"The stream export directory already exists")
os.mkdir(directory)
# Write the stream's info
with open(os.path.join(directory, "stream.json"), "w") as f:
json.dump(self.data, f)
# Now write the stream's data
# We sort it first, since older versions of ConnectorDB had a bug
# where sometimes datapoints would be returned out of order.
self[:].sort().writeJSON(os.path.join(directory, "data.json"))
# And if the stream is a downlink, write the downlink data
if self.downlink:
self(i1=0, i2=0, downlink=True).sort().writeJSON(os.path.join(directory, "downlink.json"))
|
python
|
{
"resource": ""
}
|
q276319
|
Stream.device
|
test
|
def device(self):
"""returns the device which owns the given stream"""
splitted_path = self.path.split("/")
return Device(self.db,
splitted_path[0] + "/" + splitted_path[1])
|
python
|
{
"resource": ""
}
|
q276320
|
get_labels
|
test
|
def get_labels(ontology, ols_base=None):
"""Iterates over the labels of terms in the ontology
:param str ontology: The name of the ontology
:param str ols_base: An optional, custom OLS base url
:rtype: iter[str]
"""
client = OlsClient(ols_base=ols_base)
return client.iter_labels(ontology)
|
python
|
{
"resource": ""
}
|
q276321
|
get_hierarchy
|
test
|
def get_hierarchy(ontology, ols_base=None):
"""Iterates over the parent-child relationships in an ontolog
:param str ontology: The name of the ontology
:param str ols_base: An optional, custom OLS base url
:rtype: iter[tuple[str,str]]
"""
client = OlsClient(ols_base=ols_base)
return client.iter_hierarchy(ontology)
|
python
|
{
"resource": ""
}
|
q276322
|
AbstractPipelineWrapper.run
|
test
|
def run(cls, name, desc):
"""
Prepares and runs the pipeline. Note this method must be invoked both from inside a
Docker container and while the docker daemon is reachable.
:param str name: The name of the command to start the workflow.
:param str desc: The description of the workflow.
"""
wrapper = cls(name, desc)
mount_path = wrapper._get_mount_path()
# prepare parser
arg_parser = wrapper._create_argument_parser()
wrapper._extend_argument_parser(arg_parser)
# prepare config file
empty_config = wrapper.__get_empty_config()
config_yaml = ruamel.yaml.load(empty_config)
wrapper.__populate_parser_from_config(arg_parser, config_yaml)
args = arg_parser.parse_args()
for k,v in vars(args).items():
k = k.replace('_', '-')
if k in config_yaml:
config_yaml[k] = v
config_path = wrapper._get_config_path()
with open(config_path, 'w') as writable:
ruamel.yaml.dump(config_yaml, stream=writable)
# prepare workdir
workdir_path = os.path.join(mount_path, 'Toil-' + wrapper._name)
if os.path.exists(workdir_path):
if args.restart:
log.info('Reusing temporary directory: %s', workdir_path)
else:
raise UserError('Temporary directory {} already exists. Run with --restart '
'option or remove directory.'.format(workdir_path))
else:
os.makedirs(workdir_path)
log.info('Temporary directory created: %s', workdir_path)
command = wrapper._create_pipeline_command(args, workdir_path, config_path)
wrapper._extend_pipeline_command(command, args)
# run command
try:
subprocess.check_call(command)
except subprocess.CalledProcessError as e:
print(e, file=sys.stderr)
finally:
stat = os.stat(mount_path)
log.info('Pipeline terminated, changing ownership of output files in %s from root to '
'uid %s and gid %s.', mount_path, stat.st_uid, stat.st_gid)
chown_command = ['chown', '-R', '%s:%s' % (stat.st_uid, stat.st_gid), mount_path]
subprocess.check_call(chown_command)
if args.no_clean:
log.info('Flag "--no-clean" was used, therefore %s was not deleted.', workdir_path)
else:
log.info('Cleaning up temporary directory: %s', workdir_path)
shutil.rmtree(workdir_path)
|
python
|
{
"resource": ""
}
|
q276323
|
AbstractPipelineWrapper.__populate_parser_from_config
|
test
|
def __populate_parser_from_config(self, arg_parser, config_data, prefix=''):
"""
Populates an ArgumentParser object with arguments where each argument is a key from the
given config_data dictionary.
:param str prefix: Prepends the key with this prefix delimited by a single '.' character.
:param argparse.ArgumentParser arg_parser:
:param dict config_data: The parsed yaml data from the config.
>>> pw = AbstractPipelineWrapper('test', 'this is a test')
>>> parser = argparse.ArgumentParser()
>>> pw._PipelineWrapperBuilder__populate_parser_from_config(parser, {'a':None, 'b':2})
>>> vars(parser.parse_args(['--a', '1']))
{'a': '1', 'b': 2}
>>> vars(parser.parse_args(['--b', '3']))
{'a': None, 'b': '3'}
>>> parser = argparse.ArgumentParser()
>>> pw._PipelineWrapperBuilder__populate_parser_from_config(parser, {})
>>> vars(parser.parse_args([]))
{}
>>> parser = argparse.ArgumentParser()
>>> pw._PipelineWrapperBuilder__populate_parser_from_config(parser,
... dict(a={'a':'b', 'c':{'d':'e'}},
... f='g', h={}))
>>> vars(parser.parse_args([]))
{'f': 'g', 'a.a': 'b', 'a.c.d': 'e'}
"""
for k,v in config_data.items():
k = prefix + '.' + k if prefix else k
if isinstance(v, dict):
self.__populate_parser_from_config(arg_parser, v, prefix=k)
else:
self._add_option(arg_parser, name=k, default=v)
|
python
|
{
"resource": ""
}
|
q276324
|
AbstractPipelineWrapper.__get_empty_config
|
test
|
def __get_empty_config(self):
"""
Returns the config file contents as a string. The config file is generated and then deleted.
"""
self._generate_config()
path = self._get_config_path()
with open(path, 'r') as readable:
contents = readable.read()
os.remove(path)
return contents
|
python
|
{
"resource": ""
}
|
q276325
|
AbstractPipelineWrapper._get_mount_path
|
test
|
def _get_mount_path(self):
"""
Returns the path of the mount point of the current container. If this method is invoked
outside of a Docker container a NotInsideContainerError is raised. Likewise if the docker
daemon is unreachable from inside the container a UserError is raised. This method is
idempotent.
"""
if self._mount_path is None:
name = current_docker_container_id()
if dockerd_is_reachable():
# Get name of mounted volume
blob = json.loads(subprocess.check_output(['docker', 'inspect', name]))
mounts = blob[0]['Mounts']
# Ensure docker.sock is mounted correctly
sock_mnt = [x['Source'] == x['Destination']
for x in mounts if 'docker.sock' in x['Source']]
require(len(sock_mnt) == 1,
'Missing socket mount. Requires the following: '
'docker run -v /var/run/docker.sock:/var/run/docker.sock')
# Ensure formatting of command for 2 mount points
if len(mounts) == 2:
require(all(x['Source'] == x['Destination'] for x in mounts),
'Docker Src/Dst mount points, invoked with the -v argument, '
'must be the same if only using one mount point aside from the docker '
'socket.')
work_mount = [x['Source'] for x in mounts if 'docker.sock' not in x['Source']]
else:
# Ensure only one mirror mount exists aside from docker.sock
mirror_mounts = [x['Source'] for x in mounts if x['Source'] == x['Destination']]
work_mount = [x for x in mirror_mounts if 'docker.sock' not in x]
require(len(work_mount) == 1, 'Wrong number of mirror mounts provided, see '
'documentation.')
self._mount_path = work_mount[0]
log.info('The work mount is: %s', self._mount_path)
else:
raise UserError('Docker daemon is not reachable, ensure Docker is being run with: '
'"-v /var/run/docker.sock:/var/run/docker.sock" as an argument.')
return self._mount_path
|
python
|
{
"resource": ""
}
|
q276326
|
AbstractPipelineWrapper._add_option
|
test
|
def _add_option(self, arg_parser, name, *args, **kwargs):
"""
Add an argument to the given arg_parser with the given name.
:param argparse.ArgumentParser arg_parser:
:param str name: The name of the option.
"""
arg_parser.add_argument('--' + name, *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q276327
|
AbstractPipelineWrapper._create_argument_parser
|
test
|
def _create_argument_parser(self):
"""
Creates and returns an ArgumentParser object prepopulated with 'no clean', 'cores' and
'restart' arguments.
"""
parser = argparse.ArgumentParser(description=self._desc,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--no-clean', action='store_true',
help='If this flag is used, temporary work directory is not cleaned.')
parser.add_argument('--restart', action='store_true',
help='If this flag is used, a previously uncleaned workflow in the same'
' directory will be resumed')
parser.add_argument('--cores', type=int, default=None,
help='Will set a cap on number of cores to use, default is all '
'available cores.')
return parser
|
python
|
{
"resource": ""
}
|
q276328
|
AbstractPipelineWrapper._create_pipeline_command
|
test
|
def _create_pipeline_command(self, args, workdir_path, config_path):
"""
Creates and returns a list that represents a command for running the pipeline.
"""
return ([self._name, 'run', os.path.join(workdir_path, 'jobStore'),
'--config', config_path,
'--workDir', workdir_path, '--retryCount', '1']
+ (['--restart'] if args.restart else []))
|
python
|
{
"resource": ""
}
|
q276329
|
DatabaseConnection.setauth
|
test
|
def setauth(self, user_or_apikey=None, user_password=None):
""" setauth sets the authentication header for use in the session.
It is for use when apikey is updated or something of the sort, such that
there is a seamless experience. """
auth = None
if user_or_apikey is not None:
# ConnectorDB allows login using both basic auth or an apikey url param.
# The python client uses basic auth for all logins
if user_password is None:
# Login by api key - the basic auth login uses "" user and
# apikey as password
user_password = user_or_apikey
user_or_apikey = ""
auth = HTTPBasicAuth(user_or_apikey, user_password)
self.r.auth = auth
# Set the websocket's authentication
self.ws.setauth(auth)
|
python
|
{
"resource": ""
}
|
q276330
|
DatabaseConnection.handleresult
|
test
|
def handleresult(self, r):
"""Handles HTTP error codes for the given request
Raises:
AuthenticationError on the appropriate 4** errors
ServerError if the response is not an ok (2**)
Arguments:
r -- The request result
"""
if r.status_code >= 400 and r.status_code < 500:
msg = r.json()
raise AuthenticationError(str(msg["code"]) + ": " + msg["msg"] +
" (" + msg["ref"] + ")")
elif r.status_code > 300:
err = None
try:
msg = r.json()
err = ServerError(str(msg["code"]) + ": " + msg["msg"] + " (" +
msg["ref"] + ")")
except:
raise ServerError(
"Server returned error, but did not give a valid error message")
raise err
return r
|
python
|
{
"resource": ""
}
|
q276331
|
DatabaseConnection.ping
|
test
|
def ping(self):
"""Attempts to ping the server using current credentials, and responds with the path of the currently
authenticated device"""
return self.handleresult(self.r.get(self.url,
params={"q": "this"})).text
|
python
|
{
"resource": ""
}
|
q276332
|
DatabaseConnection.create
|
test
|
def create(self, path, data=None):
"""Send a POST CRUD API request to the given path using the given data which will be converted
to json"""
return self.handleresult(self.r.post(urljoin(self.url + CRUD_PATH,
path),
data=json.dumps(data)))
|
python
|
{
"resource": ""
}
|
q276333
|
DatabaseConnection.update
|
test
|
def update(self, path, data=None):
"""Send an update request to the given path of the CRUD API, with the given data dict, which will be converted
into json"""
return self.handleresult(self.r.put(urljoin(self.url + CRUD_PATH,
path),
data=json.dumps(data)))
|
python
|
{
"resource": ""
}
|
q276334
|
DatabaseConnection.delete
|
test
|
def delete(self, path):
"""Send a delete request to the given path of the CRUD API. This deletes the object. Or at least tries to."""
return self.handleresult(self.r.delete(urljoin(self.url + CRUD_PATH,
path)))
|
python
|
{
"resource": ""
}
|
q276335
|
DatabaseConnection.subscribe
|
test
|
def subscribe(self, stream, callback, transform=""):
"""Subscribe to the given stream with the callback"""
return self.ws.subscribe(stream, callback, transform)
|
python
|
{
"resource": ""
}
|
q276336
|
User.create
|
test
|
def create(self, email, password, role="user", public=True, **kwargs):
"""Creates the given user - using the passed in email and password.
You can also set other default properties by passing in the relevant information::
usr.create("my@email","mypass",description="I like trains.")
Furthermore, ConnectorDB permits immediate initialization of an entire user tree,
so that you can create all relevant devices and streams in one go::
usr.create("my@email","mypass",devices={
"device1": {
"nickname": "My train",
"streams": {
"stream1": {
"schema": "{\"type\":\"string\"}",
"datatype": "train.choochoo"
}
},
}
})
The user and meta devices are created by default. If you want to add streams to the user device,
use the "streams" option in place of devices in create.
"""
kwargs["email"] = email
kwargs["password"] = password
kwargs["role"] = role
kwargs["public"] = public
self.metadata = self.db.create(
self.path, kwargs).json()
|
python
|
{
"resource": ""
}
|
q276337
|
User.devices
|
test
|
def devices(self):
"""Returns the list of devices that belong to the user"""
result = self.db.read(self.path, {"q": "ls"})
if result is None or result.json() is None:
return []
devices = []
for d in result.json():
dev = self[d["name"]]
dev.metadata = d
devices.append(dev)
return devices
|
python
|
{
"resource": ""
}
|
q276338
|
run_cutadapt
|
test
|
def run_cutadapt(job, r1_id, r2_id, fwd_3pr_adapter, rev_3pr_adapter):
"""
Adapter trimming for RNA-seq data
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str r1_id: FileStoreID of fastq read 1
:param str r2_id: FileStoreID of fastq read 2 (if paired data)
:param str fwd_3pr_adapter: Adapter sequence for the forward 3' adapter
:param str rev_3pr_adapter: Adapter sequence for the reverse 3' adapter (second fastq pair)
:return: R1 and R2 FileStoreIDs
:rtype: tuple
"""
work_dir = job.fileStore.getLocalTempDir()
if r2_id:
require(rev_3pr_adapter, "Paired end data requires a reverse 3' adapter sequence.")
# Retrieve files
parameters = ['-a', fwd_3pr_adapter,
'-m', '35']
if r1_id and r2_id:
job.fileStore.readGlobalFile(r1_id, os.path.join(work_dir, 'R1.fastq'))
job.fileStore.readGlobalFile(r2_id, os.path.join(work_dir, 'R2.fastq'))
parameters.extend(['-A', rev_3pr_adapter,
'-o', '/data/R1_cutadapt.fastq',
'-p', '/data/R2_cutadapt.fastq',
'/data/R1.fastq', '/data/R2.fastq'])
else:
job.fileStore.readGlobalFile(r1_id, os.path.join(work_dir, 'R1.fastq'))
parameters.extend(['-o', '/data/R1_cutadapt.fastq', '/data/R1.fastq'])
# Call: CutAdapt
dockerCall(job=job, tool='quay.io/ucsc_cgl/cutadapt:1.9--6bd44edd2b8f8f17e25c5a268fedaab65fa851d2',
workDir=work_dir, parameters=parameters)
# Write to fileStore
if r1_id and r2_id:
r1_cut_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R1_cutadapt.fastq'))
r2_cut_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R2_cutadapt.fastq'))
else:
r1_cut_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R1_cutadapt.fastq'))
r2_cut_id = None
return r1_cut_id, r2_cut_id
|
python
|
{
"resource": ""
}
|
q276339
|
run_samtools_faidx
|
test
|
def run_samtools_faidx(job, ref_id):
"""
Use SAMtools to create reference index file
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str ref_id: FileStoreID for the reference genome
:return: FileStoreID for reference index
:rtype: str
"""
job.fileStore.logToMaster('Created reference index')
work_dir = job.fileStore.getLocalTempDir()
job.fileStore.readGlobalFile(ref_id, os.path.join(work_dir, 'ref.fasta'))
command = ['faidx', 'ref.fasta']
dockerCall(job=job, workDir=work_dir, parameters=command,
tool='quay.io/ucsc_cgl/samtools:0.1.19--dd5ac549b95eb3e5d166a5e310417ef13651994e')
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'ref.fasta.fai'))
|
python
|
{
"resource": ""
}
|
q276340
|
run_samtools_index
|
test
|
def run_samtools_index(job, bam):
"""
Runs SAMtools index to create a BAM index file
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str bam: FileStoreID of the BAM file
:return: FileStoreID for BAM index file
:rtype: str
"""
work_dir = job.fileStore.getLocalTempDir()
job.fileStore.readGlobalFile(bam, os.path.join(work_dir, 'sample.bam'))
# Call: index the bam
parameters = ['index', '/data/sample.bam']
dockerCall(job=job, workDir=work_dir, parameters=parameters,
tool='quay.io/ucsc_cgl/samtools:0.1.19--dd5ac549b95eb3e5d166a5e310417ef13651994e')
# Write to fileStore
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'sample.bam.bai'))
|
python
|
{
"resource": ""
}
|
q276341
|
run_sambamba_markdup
|
test
|
def run_sambamba_markdup(job, bam):
"""
Marks reads as PCR duplicates using Sambamba
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str bam: FileStoreID for BAM file
:return: FileStoreID for sorted BAM file
:rtype: str
"""
work_dir = job.fileStore.getLocalTempDir()
job.fileStore.readGlobalFile(bam, os.path.join(work_dir, 'input.bam'))
command = ['/usr/local/bin/sambamba',
'markdup',
'-t', str(int(job.cores)),
'/data/input.bam',
'/data/output.bam']
start_time = time.time()
dockerCall(job=job, workDir=work_dir,
parameters=command,
tool='quay.io/biocontainers/sambamba:0.6.6--0')
end_time = time.time()
_log_runtime(job, start_time, end_time, "sambamba mkdup")
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'output.bam'))
|
python
|
{
"resource": ""
}
|
q276342
|
run_samblaster
|
test
|
def run_samblaster(job, sam):
"""
Marks reads as PCR duplicates using SAMBLASTER
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str sam: FileStoreID for SAM file
:return: FileStoreID for deduped SAM file
:rtype: str
"""
work_dir = job.fileStore.getLocalTempDir()
job.fileStore.readGlobalFile(sam, os.path.join(work_dir, 'input.sam'))
command = ['/usr/local/bin/samblaster',
'-i', '/data/input.sam',
'-o', '/data/output.sam',
'--ignoreUnmated']
start_time = time.time()
dockerCall(job=job, workDir=work_dir,
parameters=command,
tool='quay.io/biocontainers/samblaster:0.1.24--0')
end_time = time.time()
_log_runtime(job, start_time, end_time, "SAMBLASTER")
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'output.sam'))
|
python
|
{
"resource": ""
}
|
q276343
|
picard_mark_duplicates
|
test
|
def picard_mark_duplicates(job, bam, bai, validation_stringency='LENIENT'):
"""
Runs Picard MarkDuplicates on a BAM file. Requires that the BAM file be coordinate sorted.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str bam: FileStoreID for BAM file
:param str bai: FileStoreID for BAM index file
:param str validation_stringency: BAM file validation stringency, default is LENIENT
:return: FileStoreIDs for BAM and BAI files
:rtype: tuple
"""
work_dir = job.fileStore.getLocalTempDir()
# Retrieve file path
job.fileStore.readGlobalFile(bam, os.path.join(work_dir, 'sorted.bam'))
job.fileStore.readGlobalFile(bai, os.path.join(work_dir, 'sorted.bai'))
# Call: picardtools
command = ['MarkDuplicates',
'INPUT=sorted.bam',
'OUTPUT=mkdups.bam',
'METRICS_FILE=metrics.txt',
'ASSUME_SORTED=true',
'CREATE_INDEX=true',
'VALIDATION_STRINGENCY=%s' % validation_stringency.upper()]
# picard-tools container doesn't have JAVA_OPTS variable
# Set TMPDIR to /data to prevent writing temporary files to /tmp
docker_parameters = ['--rm',
'--log-driver', 'none',
'-e', 'JAVA_OPTIONS=-Djava.io.tmpdir=/data/ -Xmx{}'.format(job.memory),
'-v', '{}:/data'.format(work_dir)]
start_time = time.time()
dockerCall(job=job, workDir=work_dir,
parameters=command,
tool='quay.io/ucsc_cgl/picardtools:1.95--dd5ac549b95eb3e5d166a5e310417ef13651994e',
dockerParameters=docker_parameters)
end_time = time.time()
_log_runtime(job, start_time, end_time, "Picard MarkDuplicates")
bam = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'mkdups.bam'))
bai = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'mkdups.bai'))
return bam, bai
|
python
|
{
"resource": ""
}
|
q276344
|
run_picard_sort
|
test
|
def run_picard_sort(job, bam, sort_by_name=False):
"""
Sorts BAM file using Picard SortSam
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str bam: FileStoreID for BAM file
:param boolean sort_by_name: If true, sorts by read name instead of coordinate.
:return: FileStoreID for sorted BAM file
:rtype: str
"""
work_dir = job.fileStore.getLocalTempDir()
job.fileStore.readGlobalFile(bam, os.path.join(work_dir, 'input.bam'))
command = ['SortSam',
'O=/data/output.bam',
'I=/data/input.bam']
# picard-tools container doesn't have JAVA_OPTS variable
# Set TMPDIR to /data to prevent writing temporary files to /tmp
docker_parameters = ['--rm',
'--log-driver', 'none',
'-e', 'JAVA_OPTIONS=-Djava.io.tmpdir=/data/ -Xmx{}'.format(job.memory),
'-v', '{}:/data'.format(work_dir)]
if sort_by_name:
command.append('SO=queryname')
else:
command.append('SO=coordinate')
start_time = time.time()
dockerCall(job=job, workDir=work_dir,
parameters=command,
tool='quay.io/ucsc_cgl/picardtools:1.95--dd5ac549b95eb3e5d166a5e310417ef13651994e',
dockerParameters=docker_parameters)
end_time = time.time()
_log_runtime(job, start_time, end_time, "Picard SortSam")
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'output.bam'))
|
python
|
{
"resource": ""
}
|
q276345
|
run_base_recalibration
|
test
|
def run_base_recalibration(job, bam, bai, ref, ref_dict, fai, dbsnp, mills, unsafe=False):
"""
Creates recalibration table for Base Quality Score Recalibration
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str bam: FileStoreID for BAM file
:param str bai: FileStoreID for BAM index file
:param str ref: FileStoreID for reference genome fasta file
:param str ref_dict: FileStoreID for reference genome sequence dictionary file
:param str fai: FileStoreID for reference genome fasta index file
:param str dbsnp: FileStoreID for dbSNP VCF file
:param str mills: FileStoreID for Mills VCF file
:param bool unsafe: If True, runs GATK in UNSAFE mode: "-U ALLOW_SEQ_DICT_INCOMPATIBILITY"
:return: FileStoreID for the recalibration table file
:rtype: str
"""
inputs = {'ref.fasta': ref,
'ref.fasta.fai': fai,
'ref.dict': ref_dict,
'input.bam': bam,
'input.bai': bai,
'dbsnp.vcf': dbsnp,
'mills.vcf': mills}
work_dir = job.fileStore.getLocalTempDir()
for name, file_store_id in inputs.iteritems():
job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name))
# Call: GATK -- BaseRecalibrator
parameters = ['-T', 'BaseRecalibrator',
'-nct', str(int(job.cores)),
'-R', '/data/ref.fasta',
'-I', '/data/input.bam',
# Recommended known sites:
# https://software.broadinstitute.org/gatk/guide/article?id=1247
'-knownSites', '/data/dbsnp.vcf',
'-knownSites', '/data/mills.vcf',
'-o', '/data/recal_data.table']
if unsafe:
parameters.extend(['-U', 'ALLOW_SEQ_DICT_INCOMPATIBILITY'])
# Set TMPDIR to /data to prevent writing temporary files to /tmp
docker_parameters = ['--rm',
'--log-driver', 'none',
'-e', 'JAVA_OPTS=-Djava.io.tmpdir=/data/ -Xmx{}'.format(job.memory),
'-v', '{}:/data'.format(work_dir)]
start_time = time.time()
dockerCall(job=job, tool='quay.io/ucsc_cgl/gatk:3.5--dba6dae49156168a909c43330350c6161dc7ecc2',
workDir=work_dir,
parameters=parameters,
dockerParameters=docker_parameters)
end_time = time.time()
_log_runtime(job, start_time, end_time, "GATK3 BaseRecalibrator")
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'recal_data.table'))
|
python
|
{
"resource": ""
}
|
q276346
|
run_kallisto
|
test
|
def run_kallisto(job, r1_id, r2_id, kallisto_index_url):
"""
RNA quantification via Kallisto
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str r1_id: FileStoreID of fastq (pair 1)
:param str r2_id: FileStoreID of fastq (pair 2 if applicable, otherwise pass None for single-end)
:param str kallisto_index_url: FileStoreID for Kallisto index file
:return: FileStoreID from Kallisto output
:rtype: str
"""
work_dir = job.fileStore.getLocalTempDir()
download_url(job, url=kallisto_index_url, name='kallisto_hg38.idx', work_dir=work_dir)
# Retrieve files
parameters = ['quant',
'-i', '/data/kallisto_hg38.idx',
'-t', str(job.cores),
'-o', '/data/',
'-b', '100',
'--fusion']
if r1_id and r2_id:
job.fileStore.readGlobalFile(r1_id, os.path.join(work_dir, 'R1.fastq'))
job.fileStore.readGlobalFile(r2_id, os.path.join(work_dir, 'R2.fastq'))
parameters.extend(['/data/R1.fastq', '/data/R2.fastq'])
else:
job.fileStore.readGlobalFile(r1_id, os.path.join(work_dir, 'R1.fastq'))
parameters.extend(['--single', '-l', '200', '-s', '15', '/data/R1.fastq'])
# Call: Kallisto
dockerCall(job=job, tool='quay.io/ucsc_cgl/kallisto:0.42.4--35ac87df5b21a8e8e8d159f26864ac1e1db8cf86',
workDir=work_dir, parameters=parameters)
# Tar output files together and store in fileStore
output_files = [os.path.join(work_dir, x) for x in ['run_info.json', 'abundance.tsv', 'abundance.h5', 'fusion.txt']]
tarball_files(tar_name='kallisto.tar.gz', file_paths=output_files, output_dir=work_dir)
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'kallisto.tar.gz'))
|
python
|
{
"resource": ""
}
|
q276347
|
run_rsem
|
test
|
def run_rsem(job, bam_id, rsem_ref_url, paired=True):
"""
RNA quantification with RSEM
:param JobFunctionWrappingJob job: Passed automatically by Toil
:param str bam_id: FileStoreID of transcriptome bam for quantification
:param str rsem_ref_url: URL of RSEM reference (tarball)
:param bool paired: If True, uses parameters for paired end data
:return: FileStoreIDs for RSEM's gene and isoform output
:rtype: str
"""
work_dir = job.fileStore.getLocalTempDir()
download_url(job, url=rsem_ref_url, name='rsem_ref.tar.gz', work_dir=work_dir)
subprocess.check_call(['tar', '-xvf', os.path.join(work_dir, 'rsem_ref.tar.gz'), '-C', work_dir])
os.remove(os.path.join(work_dir, 'rsem_ref.tar.gz'))
# Determine tarball structure - based on it, ascertain folder name and rsem reference prefix
rsem_files = []
for root, directories, files in os.walk(work_dir):
rsem_files.extend([os.path.join(root, x) for x in files])
# "grp" is a required RSEM extension that should exist in the RSEM reference
ref_prefix = [os.path.basename(os.path.splitext(x)[0]) for x in rsem_files if 'grp' in x][0]
ref_folder = os.path.join('/data', os.listdir(work_dir)[0]) if len(os.listdir(work_dir)) == 1 else '/data'
# I/O
job.fileStore.readGlobalFile(bam_id, os.path.join(work_dir, 'transcriptome.bam'))
output_prefix = 'rsem'
# Call: RSEM
parameters = ['--quiet',
'--no-qualities',
'-p', str(job.cores),
'--forward-prob', '0.5',
'--seed-length', '25',
'--fragment-length-mean', '-1.0',
'--bam', '/data/transcriptome.bam',
os.path.join(ref_folder, ref_prefix),
output_prefix]
if paired:
parameters = ['--paired-end'] + parameters
dockerCall(job=job, tool='quay.io/ucsc_cgl/rsem:1.2.25--d4275175cc8df36967db460b06337a14f40d2f21',
parameters=parameters, workDir=work_dir)
# Write to FileStore
gene_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, output_prefix + '.genes.results'))
isoform_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, output_prefix + '.isoforms.results'))
return gene_id, isoform_id
|
python
|
{
"resource": ""
}
|
q276348
|
SARPlus.get_user_affinity
|
test
|
def get_user_affinity(self, test):
"""Prepare test set for C++ SAR prediction code.
Find all items the test users have seen in the past.
Arguments:
test (pySpark.DataFrame): input dataframe which contains test users.
"""
test.createOrReplaceTempView(self.f("{prefix}df_test"))
query = self.f(
"SELECT DISTINCT {col_user} FROM {prefix}df_test CLUSTER BY {col_user}"
)
df_test_users = self.spark.sql(query)
df_test_users.write.mode("overwrite").saveAsTable(
self.f("{prefix}df_test_users")
)
query = self.f(
"""
SELECT a.{col_user}, a.{col_item}, CAST(a.{col_rating} AS double) {col_rating}
FROM {prefix}df_train a INNER JOIN {prefix}df_test_users b ON a.{col_user} = b.{col_user}
DISTRIBUTE BY {col_user}
SORT BY {col_user}, {col_item}
"""
)
return self.spark.sql(query)
|
python
|
{
"resource": ""
}
|
q276349
|
WebsocketHandler.send
|
test
|
def send(self, cmd):
"""Send the given command thru the websocket"""
with self.ws_sendlock:
self.ws.send(json.dumps(cmd))
|
python
|
{
"resource": ""
}
|
q276350
|
WebsocketHandler.subscribe
|
test
|
def subscribe(self, stream, callback, transform=""):
"""Given a stream, a callback and an optional transform, sets up the subscription"""
if self.status == "disconnected" or self.status == "disconnecting" or self.status == "connecting":
self.connect()
if self.status is not "connected":
return False
logging.debug("Subscribing to %s", stream)
self.send({"cmd": "subscribe", "arg": stream, "transform": transform})
with self.subscription_lock:
self.subscriptions[stream + ":" + transform] = callback
return True
|
python
|
{
"resource": ""
}
|
q276351
|
WebsocketHandler.connect
|
test
|
def connect(self):
"""Attempt to connect to the websocket - and returns either True or False depending on if
the connection was successful or not"""
# Wait for the lock to be available (ie, the websocket is not being used (yet))
self.ws_openlock.acquire()
self.ws_openlock.release()
if self.status == "connected":
return True # Already connected
if self.status == "disconnecting":
# If currently disconnecting, wait a moment, and retry connect
time.sleep(0.1)
return self.connect()
if self.status == "disconnected" or self.status == "reconnecting":
self.ws = websocket.WebSocketApp(self.ws_url,
header=self.headers,
on_message=self.__on_message,
on_ping=self.__on_ping,
on_open=self.__on_open,
on_close=self.__on_close,
on_error=self.__on_error)
self.ws_thread = threading.Thread(target=self.ws.run_forever)
self.ws_thread.daemon = True
self.status = "connecting"
self.ws_openlock.acquire()
self.ws_thread.start()
self.ws_openlock.acquire()
self.ws_openlock.release()
return self.status == "connected"
|
python
|
{
"resource": ""
}
|
q276352
|
WebsocketHandler.__reconnect
|
test
|
def __reconnect(self):
"""This is called when a connection is lost - it attempts to reconnect to the server"""
self.status = "reconnecting"
# Reset the disconnect time after 15 minutes
if self.disconnected_time - self.connected_time > 15 * 60:
self.reconnect_time = self.reconnect_time_starting_seconds
else:
self.reconnect_time *= self.reconnect_time_backoff_multiplier
if self.reconnect_time > self.reconnect_time_max_seconds:
self.reconnect_time = self.reconnect_time_max_seconds
# We want to add some randomness to the reconnect rate - necessary so that we don't pound the server
# if it goes down
self.reconnect_time *= 1 + random.uniform(-0.2, 0.2)
if self.reconnect_time < self.reconnect_time_starting_seconds:
self.reconnect_time = self.reconnect_time_starting_seconds
logging.warn("ConnectorDB:WS: Attempting to reconnect in %fs",
self.reconnect_time)
self.reconnector = threading.Timer(self.reconnect_time,
self.__reconnect_fnc)
self.reconnector.daemon = True
self.reconnector.start()
|
python
|
{
"resource": ""
}
|
q276353
|
WebsocketHandler.__resubscribe
|
test
|
def __resubscribe(self):
"""Send subscribe command for all existing subscriptions. This allows to resume a connection
that was closed"""
with self.subscription_lock:
for sub in self.subscriptions:
logging.debug("Resubscribing to %s", sub)
stream_transform = sub.split(":", 1)
self.send({
"cmd": "subscribe",
"arg": stream_transform[0],
"transform": stream_transform[1]
})
|
python
|
{
"resource": ""
}
|
q276354
|
WebsocketHandler.__on_open
|
test
|
def __on_open(self, ws):
"""Called when the websocket is opened"""
logging.debug("ConnectorDB: Websocket opened")
# Connection success - decrease the wait time for next connection
self.reconnect_time /= self.reconnect_time_backoff_multiplier
self.status = "connected"
self.lastpingtime = time.time()
self.__ensure_ping()
self.connected_time = time.time()
# Release the lock that connect called
self.ws_openlock.release()
|
python
|
{
"resource": ""
}
|
q276355
|
WebsocketHandler.__on_close
|
test
|
def __on_close(self, ws):
"""Called when the websocket is closed"""
if self.status == "disconnected":
return # This can be double-called on disconnect
logging.debug("ConnectorDB:WS: Websocket closed")
# Turn off the ping timer
if self.pingtimer is not None:
self.pingtimer.cancel()
self.disconnected_time = time.time()
if self.status == "disconnecting":
self.status = "disconnected"
elif self.status == "connected":
self.__reconnect()
|
python
|
{
"resource": ""
}
|
q276356
|
WebsocketHandler.__on_error
|
test
|
def __on_error(self, ws, err):
"""Called when there is an error in the websocket"""
logging.debug("ConnectorDB:WS: Connection Error")
if self.status == "connecting":
self.status = "errored"
self.ws_openlock.release()
|
python
|
{
"resource": ""
}
|
q276357
|
WebsocketHandler.__on_message
|
test
|
def __on_message(self, ws, msg):
"""This function is called whenever there is a message received from the server"""
msg = json.loads(msg)
logging.debug("ConnectorDB:WS: Msg '%s'", msg["stream"])
# Build the subcription key
stream_key = msg["stream"] + ":"
if "transform" in msg:
stream_key += msg["transform"]
self.subscription_lock.acquire()
if stream_key in self.subscriptions:
subscription_function = self.subscriptions[stream_key]
self.subscription_lock.release()
fresult = subscription_function(msg["stream"], msg["data"])
if fresult is True:
# This is a special result - if the subscription function of a downlink returns True,
# then the datapoint is acknowledged automatically (ie, reinserted in non-downlink stream)
fresult = msg["data"]
if fresult is not False and fresult is not None and msg["stream"].endswith(
"/downlink") and msg["stream"].count("/") == 3:
# If the above conditions are true, it means that the datapoints were from a downlink,
# and the subscriber function chooses to acknowledge them, so we reinsert them.
self.insert(msg["stream"][:-9], fresult)
else:
self.subscription_lock.release()
logging.warn(
"ConnectorDB:WS: Msg '%s' not subscribed! Subscriptions: %s",
msg["stream"], list(self.subscriptions.keys()))
|
python
|
{
"resource": ""
}
|
q276358
|
WebsocketHandler.__ensure_ping
|
test
|
def __ensure_ping(self):
"""Each time the server sends a ping message, we record the timestamp. If we haven't received a ping
within the given interval, then we assume that the connection was lost, close the websocket and
attempt to reconnect"""
logging.debug("ConnectorDB:WS: pingcheck")
if (time.time() - self.lastpingtime > self.connection_ping_timeout):
logging.warn("ConnectorDB:WS: Websocket ping timed out!")
if self.ws is not None:
self.ws.close()
self.__on_close(self.ws)
else:
# reset the ping timer
self.pingtimer = threading.Timer(self.connection_ping_timeout,
self.__ensure_ping)
self.pingtimer.daemon = True
self.pingtimer.start()
|
python
|
{
"resource": ""
}
|
q276359
|
gatk_select_variants
|
test
|
def gatk_select_variants(job, mode, vcf_id, ref_fasta, ref_fai, ref_dict):
"""
Isolates a particular variant type from a VCF file using GATK SelectVariants
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str mode: variant type (i.e. SNP or INDEL)
:param str vcf_id: FileStoreID for input VCF file
:param str ref_fasta: FileStoreID for reference genome fasta
:param str ref_fai: FileStoreID for reference genome index file
:param str ref_dict: FileStoreID for reference genome sequence dictionary file
:return: FileStoreID for filtered VCF
:rtype: str
"""
job.fileStore.logToMaster('Running GATK SelectVariants to select %ss' % mode)
inputs = {'genome.fa': ref_fasta,
'genome.fa.fai': ref_fai,
'genome.dict': ref_dict,
'input.vcf': vcf_id}
work_dir = job.fileStore.getLocalTempDir()
for name, file_store_id in inputs.iteritems():
job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name))
command = ['-T', 'SelectVariants',
'-R', 'genome.fa',
'-V', 'input.vcf',
'-o', 'output.vcf',
'-selectType', mode]
docker_parameters = ['--rm', 'log-driver', 'none',
'-e', 'JAVA_OPTS=-Djava.io.tmpdir=/data/ -Xmx{}'.format(job.memory)]
dockerCall(job=job, workDir=work_dir,
parameters=command,
tool='quay.io/ucsc_cgl/gatk:3.5--dba6dae49156168a909c43330350c6161dc7ecc2',
dockerParameters=docker_parameters)
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'output.vcf'))
|
python
|
{
"resource": ""
}
|
q276360
|
gatk_variant_filtration
|
test
|
def gatk_variant_filtration(job, vcf_id, filter_name, filter_expression, ref_fasta, ref_fai, ref_dict):
"""
Filters VCF file using GATK VariantFiltration. Fixes extra pair of quotation marks in VCF header that
may interfere with other VCF tools.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str vcf_id: FileStoreID for input VCF file
:param str filter_name: Name of filter for VCF header
:param str filter_expression: JEXL filter expression
:param str ref_fasta: FileStoreID for reference genome fasta
:param str ref_fai: FileStoreID for reference genome index file
:param str ref_dict: FileStoreID for reference genome sequence dictionary file
:return: FileStoreID for filtered VCF file
:rtype: str
"""
inputs = {'genome.fa': ref_fasta,
'genome.fa.fai': ref_fai,
'genome.dict': ref_dict,
'input.vcf': vcf_id}
work_dir = job.fileStore.getLocalTempDir()
for name, file_store_id in inputs.iteritems():
job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name))
command = ['-T', 'VariantFiltration',
'-R', 'genome.fa',
'-V', 'input.vcf',
'--filterName', filter_name, # Documents filter name in header
'--filterExpression', filter_expression,
'-o', 'filtered_variants.vcf']
job.fileStore.logToMaster('Running GATK VariantFiltration using {name}: '
'{expression}'.format(name=filter_name, expression=filter_expression))
docker_parameters = ['--rm', 'log-driver', 'none',
'-e', 'JAVA_OPTS=-Djava.io.tmpdir=/data/ -Xmx{}'.format(job.memory)]
dockerCall(job=job, workDir=work_dir,
parameters=command,
tool='quay.io/ucsc_cgl/gatk:3.5--dba6dae49156168a909c43330350c6161dc7ecc2',
dockerParameters=docker_parameters)
# Remove extra quotation marks around filter expression.
malformed_header = os.path.join(work_dir, 'filtered_variants.vcf')
fixed_header = os.path.join(work_dir, 'fixed_header.vcf')
filter_regex = re.escape('"%s"' % filter_expression)
with open(malformed_header, 'r') as f, open(fixed_header, 'w') as g:
for line in f:
g.write(re.sub(filter_regex, filter_expression, line))
return job.fileStore.writeGlobalFile(fixed_header)
|
python
|
{
"resource": ""
}
|
q276361
|
gatk_variant_recalibrator
|
test
|
def gatk_variant_recalibrator(job,
mode,
vcf,
ref_fasta, ref_fai, ref_dict,
annotations,
hapmap=None, omni=None, phase=None, dbsnp=None, mills=None,
max_gaussians=4,
unsafe_mode=False):
"""
Runs either SNP or INDEL variant quality score recalibration using GATK VariantRecalibrator. Because the VQSR method
models SNPs and INDELs differently, VQSR must be run separately for these variant types.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str mode: Determines variant recalibration mode (SNP or INDEL)
:param str vcf: FileStoreID for input VCF file
:param str ref_fasta: FileStoreID for reference genome fasta
:param str ref_fai: FileStoreID for reference genome index file
:param str ref_dict: FileStoreID for reference genome sequence dictionary file
:param list[str] annotations: List of GATK variant annotations to filter on
:param str hapmap: FileStoreID for HapMap resource file, required for SNP VQSR
:param str omni: FileStoreID for Omni resource file, required for SNP VQSR
:param str phase: FileStoreID for 1000G resource file, required for SNP VQSR
:param str dbsnp: FilesStoreID for dbSNP resource file, required for SNP and INDEL VQSR
:param str mills: FileStoreID for Mills resource file, required for INDEL VQSR
:param int max_gaussians: Number of Gaussians used during training, default is 4
:param bool unsafe_mode: If True, runs gatk UNSAFE mode: "-U ALLOW_SEQ_DICT_INCOMPATIBILITY"
:return: FileStoreID for the variant recalibration table, tranche file, and plots file
:rtype: tuple
"""
mode = mode.upper()
inputs = {'genome.fa': ref_fasta,
'genome.fa.fai': ref_fai,
'genome.dict': ref_dict,
'input.vcf': vcf}
# Refer to GATK documentation for description of recommended parameters:
# https://software.broadinstitute.org/gatk/documentation/article?id=1259
# https://software.broadinstitute.org/gatk/documentation/article?id=2805
# This base command includes parameters for both INDEL and SNP VQSR.
command = ['-T', 'VariantRecalibrator',
'-R', 'genome.fa',
'-input', 'input.vcf',
'-tranche', '100.0',
'-tranche', '99.9',
'-tranche', '99.0',
'-tranche', '90.0',
'--maxGaussians', str(max_gaussians),
'-recalFile', 'output.recal',
'-tranchesFile', 'output.tranches',
'-rscriptFile', 'output.plots.R']
# Parameters and resource files for SNP VQSR.
if mode == 'SNP':
command.extend(
['-resource:hapmap,known=false,training=true,truth=true,prior=15.0', 'hapmap.vcf',
'-resource:omni,known=false,training=true,truth=true,prior=12.0', 'omni.vcf',
'-resource:dbsnp,known=true,training=false,truth=false,prior=2.0', 'dbsnp.vcf',
'-resource:1000G,known=false,training=true,truth=false,prior=10.0', '1000G.vcf',
'-mode', 'SNP'])
inputs['hapmap.vcf'] = hapmap
inputs['omni.vcf'] = omni
inputs['dbsnp.vcf'] = dbsnp
inputs['1000G.vcf'] = phase
# Parameters and resource files for INDEL VQSR
elif mode == 'INDEL':
command.extend(
['-resource:mills,known=false,training=true,truth=true,prior=12.0', 'mills.vcf',
'-resource:dbsnp,known=true,training=false,truth=false,prior=2.0', 'dbsnp.vcf',
'-mode', 'INDEL'])
inputs['mills.vcf'] = mills
inputs['dbsnp.vcf'] = dbsnp
else:
raise ValueError('Variant filter modes can be SNP or INDEL, got %s' % mode)
for annotation in annotations:
command.extend(['-an', annotation])
if unsafe_mode:
command.extend(['-U', 'ALLOW_SEQ_DICT_INCOMPATIBILITY'])
# Delay reading in files until function is configured
work_dir = job.fileStore.getLocalTempDir()
for name, file_store_id in inputs.iteritems():
job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name))
job.fileStore.logToMaster('Running GATK VariantRecalibrator on {mode}s using the following annotations:\n'
'{annotations}'.format(mode=mode, annotations='\n'.join(annotations)))
docker_parameters = ['--rm', 'log-driver', 'none',
'-e', 'JAVA_OPTS=-Djava.io.tmpdir=/data/ -Xmx{}'.format(job.memory)]
dockerCall(job=job, workDir=work_dir,
parameters=command,
tool='quay.io/ucsc_cgl/gatk:3.5--dba6dae49156168a909c43330350c6161dc7ecc2',
dockerParameters=docker_parameters)
recal_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'output.recal'))
tranches_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'output.tranches'))
plots_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'output.plots.R'))
return recal_id, tranches_id, plots_id
|
python
|
{
"resource": ""
}
|
q276362
|
gatk_apply_variant_recalibration
|
test
|
def gatk_apply_variant_recalibration(job,
mode,
vcf,
recal_table, tranches,
ref_fasta, ref_fai, ref_dict,
ts_filter_level=99.0,
unsafe_mode=False):
"""
Applies variant quality score recalibration to VCF file using GATK ApplyRecalibration
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str mode: Determines variant recalibration mode (SNP or INDEL)
:param str vcf: FileStoreID for input VCF file
:param str recal_table: FileStoreID for recalibration table file
:param str tranches: FileStoreID for tranches file
:param str ref_fasta: FileStoreID for reference genome fasta
:param str ref_fai: FileStoreID for reference genome index file
:param str ref_dict: FileStoreID for reference genome sequence dictionary file
:param float ts_filter_level: Sensitivity expressed as a percentage, default is 99.0
:param bool unsafe_mode: If True, runs gatk UNSAFE mode: "-U ALLOW_SEQ_DICT_INCOMPATIBILITY"
:return: FileStoreID for recalibrated VCF file
:rtype: str
"""
inputs = {'genome.fa': ref_fasta,
'genome.fa.fai': ref_fai,
'genome.dict': ref_dict,
'input.vcf': vcf,
'recal': recal_table,
'tranches': tranches}
work_dir = job.fileStore.getLocalTempDir()
for name, file_store_id in inputs.iteritems():
job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name))
mode = mode.upper()
# GATK recommended parameters:
# https://software.broadinstitute.org/gatk/documentation/article?id=2805
command = ['-T', 'ApplyRecalibration',
'-mode', mode,
'-R', 'genome.fa',
'-input', 'input.vcf',
'-o', 'vqsr.vcf',
'-ts_filter_level', str(ts_filter_level),
'-recalFile', 'recal',
'-tranchesFile', 'tranches']
if unsafe_mode:
command.extend(['-U', 'ALLOW_SEQ_DICT_INCOMPATIBILITY'])
job.fileStore.logToMaster('Running GATK ApplyRecalibration on {mode}s '
'with a sensitivity of {sensitivity}%'.format(mode=mode,
sensitivity=ts_filter_level))
docker_parameters = ['--rm', 'log-driver', 'none',
'-e', 'JAVA_OPTS=-Djava.io.tmpdir=/data/ -Xmx{}'.format(job.memory)]
dockerCall(job=job, workDir=work_dir,
parameters=command,
tool='quay.io/ucsc_cgl/gatk:3.5--dba6dae49156168a909c43330350c6161dc7ecc2',
dockerParameters=docker_parameters)
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'vqsr.vcf'))
|
python
|
{
"resource": ""
}
|
q276363
|
gatk_combine_variants
|
test
|
def gatk_combine_variants(job, vcfs, ref_fasta, ref_fai, ref_dict, merge_option='UNIQUIFY'):
"""
Merges VCF files using GATK CombineVariants
:param JobFunctionWrappingJob job: Toil Job instance
:param dict vcfs: Dictionary of VCF FileStoreIDs {sample identifier: FileStoreID}
:param str ref_fasta: FileStoreID for reference genome fasta
:param str ref_fai: FileStoreID for reference genome index file
:param str ref_dict: FileStoreID for reference genome sequence dictionary file
:param str merge_option: Value for --genotypemergeoption flag (Default: 'UNIQUIFY')
'UNIQUIFY': Multiple variants at a single site are merged into a
single variant record.
'UNSORTED': Used to merge VCFs from the same sample
:return: FileStoreID for merged VCF file
:rtype: str
"""
job.fileStore.logToMaster('Running GATK CombineVariants')
inputs = {'genome.fa': ref_fasta,
'genome.fa.fai': ref_fai,
'genome.dict': ref_dict}
inputs.update(vcfs)
work_dir = job.fileStore.getLocalTempDir()
for name, file_store_id in inputs.iteritems():
job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name))
command = ['-T', 'CombineVariants',
'-R', '/data/genome.fa',
'-o', '/data/merged.vcf',
'--genotypemergeoption', merge_option]
for uuid, vcf_id in vcfs.iteritems():
command.extend(['--variant', os.path.join('/data', uuid)])
docker_parameters = ['--rm', 'log-driver', 'none',
'-e', 'JAVA_OPTS=-Djava.io.tmpdir=/data/ -Xmx{}'.format(job.memory)]
dockerCall(job=job, workDir=work_dir,
parameters=command,
tool='quay.io/ucsc_cgl/gatk:3.5--dba6dae49156168a909c43330350c6161dc7ecc2',
dockerParameters=docker_parameters)
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'merged.vcf'))
|
python
|
{
"resource": ""
}
|
q276364
|
bam_quickcheck
|
test
|
def bam_quickcheck(bam_path):
"""
Perform a quick check on a BAM via `samtools quickcheck`.
This will detect obvious BAM errors such as truncation.
:param str bam_path: path to BAM file to checked
:rtype: boolean
:return: True if the BAM is valid, False is BAM is invalid or something related to the call went wrong
"""
directory, bam_name = os.path.split(bam_path)
exit_code = subprocess.call(['docker', 'run', '-v', directory + ':/data',
'quay.io/ucsc_cgl/samtools:1.3--256539928ea162949d8a65ca5c79a72ef557ce7c',
'quickcheck', '-vv', '/data/' + bam_name])
if exit_code != 0:
return False
return True
|
python
|
{
"resource": ""
}
|
q276365
|
load_handlers
|
test
|
def load_handlers(handler_mapping):
"""
Given a dictionary mapping which looks like the following, import the
objects based on the dotted path and yield the packet type and handler as
pairs.
If the special string '*' is passed, don't process that, pass it on as it
is a wildcard.
If an non-string object is given for either packet or handler (key or
value) assume these are the objects to use and yield them.
::
{
'rfxcom.protocol.Status': 'home.collect.logging_handler',
'rfxcom.protocol.Elec': 'home.collect.elec_handler',
'rfxcom.protocol.TempHumidity': 'home.collect.temp_humidity_handler',
'*': 'home.collect.logging_handler'
}
"""
handlers = {}
for packet_type, handler in handler_mapping.items():
if packet_type == '*':
Packet = packet_type
elif isinstance(packet_type, str):
Packet = importer(packet_type)
else:
Packet = packet_type
if isinstance(handler, str):
Handler = importer(handler)
else:
Handler = handler
if Packet in handlers:
raise HandlerConfigError(
"Handler already provided for packet %s" % Packet)
handlers[Packet] = Handler
return handlers
|
python
|
{
"resource": ""
}
|
q276366
|
write_config
|
test
|
def write_config(configuration):
"""Helper to write the JSON configuration to a file"""
with open(CONFIG_PATH, 'w') as f:
json.dump(configuration, f, indent=2, sort_keys=True)
|
python
|
{
"resource": ""
}
|
q276367
|
get_config
|
test
|
def get_config():
"""Gets the configuration for this project from the default JSON file, or writes one if it doesn't exist
:rtype: dict
"""
if not os.path.exists(CONFIG_PATH):
write_config({})
with open(CONFIG_PATH) as f:
return json.load(f)
|
python
|
{
"resource": ""
}
|
q276368
|
OlsClient.get_term
|
test
|
def get_term(self, ontology, iri):
"""Gets the data for a given term
:param str ontology: The name of the ontology
:param str iri: The IRI of a term
:rtype: dict
"""
url = self.ontology_term_fmt.format(ontology, iri)
response = requests.get(url)
return response.json()
|
python
|
{
"resource": ""
}
|
q276369
|
OlsClient.search
|
test
|
def search(self, name, query_fields=None):
"""Searches the OLS with the given term
:param str name:
:param list[str] query_fields: Fields to query
:return: dict
"""
params = {'q': name}
if query_fields is not None:
params['queryFields'] = '{{{}}}'.format(','.join(query_fields))
response = requests.get(self.ontology_search, params=params)
return response.json()
|
python
|
{
"resource": ""
}
|
q276370
|
OlsClient.suggest
|
test
|
def suggest(self, name, ontology=None):
"""Suggest terms from an optional list of ontologies
:param str name:
:param list[str] ontology:
:rtype: dict
.. seealso:: https://www.ebi.ac.uk/ols/docs/api#_suggest_term
"""
params = {'q': name}
if ontology:
params['ontology'] = ','.join(ontology)
response = requests.get(self.ontology_suggest, params=params)
return response.json()
|
python
|
{
"resource": ""
}
|
q276371
|
OlsClient.iter_descendants
|
test
|
def iter_descendants(self, ontology, iri, size=None, sleep=None):
"""Iterates over the descendants of a given term
:param str ontology: The name of the ontology
:param str iri: The IRI of a term
:param int size: The size of each page. Defaults to 500, which is the maximum allowed by the EBI.
:param int sleep: The amount of time to sleep between pages. Defaults to 0 seconds.
:rtype: iter[dict]
"""
url = self.ontology_term_descendants_fmt.format(ontology=ontology, iri=iri)
log.info('getting %s', url)
for term in self._iter_terms_helper(url, size=size, sleep=sleep):
yield term
|
python
|
{
"resource": ""
}
|
q276372
|
OlsClient.iter_descendants_labels
|
test
|
def iter_descendants_labels(self, ontology, iri, size=None, sleep=None):
"""Iterates over the labels for the descendants of a given term
:param str ontology: The name of the ontology
:param str iri: The IRI of a term
:param int size: The size of each page. Defaults to 500, which is the maximum allowed by the EBI.
:param int sleep: The amount of time to sleep between pages. Defaults to 0 seconds.
:rtype: iter[str]
"""
for label in _help_iterate_labels(self.iter_descendants(ontology, iri, size=size, sleep=sleep)):
yield label
|
python
|
{
"resource": ""
}
|
q276373
|
OlsClient.iter_labels
|
test
|
def iter_labels(self, ontology, size=None, sleep=None):
"""Iterates over the labels of terms in the ontology. Automatically wraps the pager returned by the OLS.
:param str ontology: The name of the ontology
:param int size: The size of each page. Defaults to 500, which is the maximum allowed by the EBI.
:param int sleep: The amount of time to sleep between pages. Defaults to 0 seconds.
:rtype: iter[str]
"""
for label in _help_iterate_labels(self.iter_terms(ontology=ontology, size=size, sleep=sleep)):
yield label
|
python
|
{
"resource": ""
}
|
q276374
|
OlsClient.iter_hierarchy
|
test
|
def iter_hierarchy(self, ontology, size=None, sleep=None):
"""Iterates over parent-child relations
:param str ontology: The name of the ontology
:param int size: The size of each page. Defaults to 500, which is the maximum allowed by the EBI.
:param int sleep: The amount of time to sleep between pages. Defaults to 0 seconds.
:rtype: iter[tuple[str,str]]
"""
for term in self.iter_terms(ontology=ontology, size=size, sleep=sleep):
try:
hierarchy_children_link = term['_links'][HIERARCHICAL_CHILDREN]['href']
except KeyError: # there's no children for this one
continue
response = requests.get(hierarchy_children_link).json()
for child_term in response['_embedded']['terms']:
yield term['label'], child_term['label']
|
python
|
{
"resource": ""
}
|
q276375
|
run_fastqc
|
test
|
def run_fastqc(job, r1_id, r2_id):
"""
Run Fastqc on the input reads
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str r1_id: FileStoreID of fastq read 1
:param str r2_id: FileStoreID of fastq read 2
:return: FileStoreID of fastQC output (tarball)
:rtype: str
"""
work_dir = job.fileStore.getLocalTempDir()
job.fileStore.readGlobalFile(r1_id, os.path.join(work_dir, 'R1.fastq'))
parameters = ['/data/R1.fastq']
output_names = ['R1_fastqc.html', 'R1_fastqc.zip']
if r2_id:
job.fileStore.readGlobalFile(r2_id, os.path.join(work_dir, 'R2.fastq'))
parameters.extend(['-t', '2', '/data/R2.fastq'])
output_names.extend(['R2_fastqc.html', 'R2_fastqc.zip'])
dockerCall(job=job, tool='quay.io/ucsc_cgl/fastqc:0.11.5--be13567d00cd4c586edf8ae47d991815c8c72a49',
workDir=work_dir, parameters=parameters)
output_files = [os.path.join(work_dir, x) for x in output_names]
tarball_files(tar_name='fastqc.tar.gz', file_paths=output_files, output_dir=work_dir)
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'fastqc.tar.gz'))
|
python
|
{
"resource": ""
}
|
q276376
|
Merge.addStream
|
test
|
def addStream(self, stream, t1=None, t2=None, limit=None, i1=None, i2=None, transform=None):
"""Adds the given stream to the query construction. The function supports both stream
names and Stream objects."""
params = query_maker(t1, t2, limit, i1, i2, transform)
params["stream"] = get_stream(self.cdb, stream)
# Now add the stream to the query parameters
self.query.append(params)
|
python
|
{
"resource": ""
}
|
q276377
|
create_app
|
test
|
def create_app(config=None):
""" This needs some tidying up. To avoid circular imports we import
everything here but it makes this method a bit more gross.
"""
# Initialise the app
from home.config import TEMPLATE_FOLDER, STATIC_FOLDER
app = Flask(__name__, static_folder=STATIC_FOLDER,
template_folder=TEMPLATE_FOLDER)
app.config['SECRET_KEY'] = 'ssh, its a secret.'
# Load the default config, the specified config file and then any
# overwrites that are manually passed in.
app.config.from_object('home.config')
if 'HOME_SETTINGS' in environ:
app.config.from_envvar('HOME_SETTINGS')
app.config.from_object(config)
# Register the web front end and the API.
from home.dash.web import web
from home.dash.api import api
app.register_blueprint(web)
app.register_blueprint(api, url_prefix='/api')
login_manager.init_app(app)
login_manager.login_view = 'Dashboard Web.login'
from home.dash.models import User
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
# Initialise the migrations app, we want to store all migrations within
# the project directory for easier packaging.
Migrate(app, db, directory=app.config['MIGRATE_DIRECTORY'])
admin = Admin(app)
from home.dash.admin import setup_admin
setup_admin(admin)
# Wire up the database to the app so it gets the config.
db.init_app(app)
return app
|
python
|
{
"resource": ""
}
|
q276378
|
SparkService.start
|
test
|
def start(self, job):
"""
Start spark and hdfs master containers
:param job: The underlying job.
"""
if self.hostname is None:
self.hostname = subprocess.check_output(["hostname", "-f",])[:-1]
_log.info("Started Spark master container.")
self.sparkContainerID = dockerCheckOutput(job=job,
defer=STOP,
workDir=os.getcwd(),
tool="quay.io/ucsc_cgl/apache-spark-master:1.5.2",
dockerParameters=["--net=host",
"-d",
"-v", "/mnt/ephemeral/:/ephemeral/:rw",
"-e", "SPARK_MASTER_IP=" + self.hostname,
"-e", "SPARK_LOCAL_DIRS=/ephemeral/spark/local",
"-e", "SPARK_WORKER_DIR=/ephemeral/spark/work"],
parameters=[self.hostname])[:-1]
_log.info("Started HDFS Datanode.")
self.hdfsContainerID = dockerCheckOutput(job=job,
defer=STOP,
workDir=os.getcwd(),
tool="quay.io/ucsc_cgl/apache-hadoop-master:2.6.2",
dockerParameters=["--net=host",
"-d"],
parameters=[self.hostname])[:-1]
return self.hostname
|
python
|
{
"resource": ""
}
|
q276379
|
WorkerService.start
|
test
|
def start(self, job):
"""
Start spark and hdfs worker containers
:param job: The underlying job.
"""
# start spark and our datanode
self.sparkContainerID = dockerCheckOutput(job=job,
defer=STOP,
workDir=os.getcwd(),
tool="quay.io/ucsc_cgl/apache-spark-worker:1.5.2",
dockerParameters=["--net=host",
"-d",
"-v", "/mnt/ephemeral/:/ephemeral/:rw",
"-e",
"\"SPARK_MASTER_IP=" + self.masterIP + ":" + _SPARK_MASTER_PORT + "\"",
"-e", "SPARK_LOCAL_DIRS=/ephemeral/spark/local",
"-e", "SPARK_WORKER_DIR=/ephemeral/spark/work"],
parameters=[self.masterIP + ":" + _SPARK_MASTER_PORT])[:-1]
self.__start_datanode(job)
# fake do/while to check if HDFS is up
hdfs_down = True
retries = 0
while hdfs_down and (retries < 5):
_log.info("Sleeping 30 seconds before checking HDFS startup.")
time.sleep(30)
clusterID = ""
try:
clusterID = subprocess.check_output(["docker",
"exec",
self.hdfsContainerID,
"grep",
"clusterID",
"-R",
"/opt/apache-hadoop/logs"])
except:
# grep returns a non-zero exit code if the pattern is not found
# we expect to not find the pattern, so a non-zero code is OK
pass
if "Incompatible" in clusterID:
_log.warning("Hadoop Datanode failed to start with: %s", clusterID)
_log.warning("Retrying container startup, retry #%d.", retries)
retries += 1
_log.warning("Removing ephemeral hdfs directory.")
subprocess.check_call(["docker",
"exec",
self.hdfsContainerID,
"rm",
"-rf",
"/ephemeral/hdfs"])
_log.warning("Killing container %s.", self.hdfsContainerID)
subprocess.check_call(["docker",
"kill",
self.hdfsContainerID])
# todo: this is copied code. clean up!
_log.info("Restarting datanode.")
self.__start_datanode(job)
else:
_log.info("HDFS datanode started up OK!")
hdfs_down = False
if retries >= 5:
raise RuntimeError("Failed %d times trying to start HDFS datanode." % retries)
return
|
python
|
{
"resource": ""
}
|
q276380
|
WorkerService.__start_datanode
|
test
|
def __start_datanode(self, job):
"""
Launches the Hadoop datanode.
:param job: The underlying job.
"""
self.hdfsContainerID = dockerCheckOutput(job=job,
defer=STOP,
workDir=os.getcwd(),
tool="quay.io/ucsc_cgl/apache-hadoop-worker:2.6.2",
dockerParameters=["--net=host",
"-d",
"-v", "/mnt/ephemeral/:/ephemeral/:rw"],
parameters=[self.masterIP])[:-1]
|
python
|
{
"resource": ""
}
|
q276381
|
WorkerService.stop
|
test
|
def stop(self, fileStore):
"""
Stop spark and hdfs worker containers
:param job: The underlying job.
"""
subprocess.call(["docker", "exec", self.sparkContainerID, "rm", "-r", "/ephemeral/spark"])
subprocess.call(["docker", "stop", self.sparkContainerID])
subprocess.call(["docker", "rm", self.sparkContainerID])
_log.info("Stopped Spark worker.")
subprocess.call(["docker", "exec", self.hdfsContainerID, "rm", "-r", "/ephemeral/hdfs"])
subprocess.call(["docker", "stop", self.hdfsContainerID])
subprocess.call(["docker", "rm", self.hdfsContainerID])
_log.info("Stopped HDFS datanode.")
return
|
python
|
{
"resource": ""
}
|
q276382
|
WorkerService.check
|
test
|
def check(self):
"""
Checks to see if Spark worker and HDFS datanode are still running.
"""
status = _checkContainerStatus(self.sparkContainerID,
self.hdfsContainerID,
sparkNoun='worker',
hdfsNoun='datanode')
return status
|
python
|
{
"resource": ""
}
|
q276383
|
base_tokenizer
|
test
|
def base_tokenizer(fp):
'Tokenizer. Generates tokens stream from text'
if isinstance(fp, StringIO):
template_file = fp
size = template_file.len
else:
#empty file check
if os.fstat(fp.fileno()).st_size == 0:
yield TOKEN_EOF, 'EOF', 0, 0
return
template_file = mmap.mmap(fp.fileno(), 0, access=mmap.ACCESS_READ)
size = template_file.size()
lineno = 0
while 1:
lineno += 1
pos = 1
# end of file
if template_file.tell() == size:
yield TOKEN_EOF, 'EOF', lineno, 0
break
# now we tokinize line by line
line = template_file.readline().decode('utf-8')
line = line.replace('\r\n', '')
line = line.replace('\n', '')
# ignoring non XML comments
if re_comment.match(line):
continue
last_text = deque()
while line:
line_len = len(line)
for token in tokens:
m = token.regex.match(line)
if m:
if last_text:
yield TOKEN_TEXT, ''.join(last_text), lineno, pos
pos += len(last_text)
last_text.clear()
offset, value = m.end(), m.group()
line = line[offset:]
yield token, value, lineno, pos
pos += offset
break
# we did not get right in tokens list, so next char is text
if line_len == len(line):
last_text.append(line[0])
line = line[1:]
if last_text:
yield TOKEN_TEXT, ''.join(last_text), lineno, pos
pos += len(last_text)
last_text.clear()
yield TOKEN_NEWLINE, '\n', lineno, pos
# all work is done
template_file.close()
|
python
|
{
"resource": ""
}
|
q276384
|
lookup_zone
|
test
|
def lookup_zone(conn, zone):
"""Look up a zone ID for a zone string.
Args: conn: boto.route53.Route53Connection
zone: string eg. foursquare.com
Returns: zone ID eg. ZE2DYFZDWGSL4.
Raises: ZoneNotFoundError if zone not found."""
all_zones = conn.get_all_hosted_zones()
for resp in all_zones['ListHostedZonesResponse']['HostedZones']:
if resp['Name'].rstrip('.') == zone.rstrip('.'):
return resp['Id'].replace('/hostedzone/', '')
raise ZoneNotFoundError('zone %s not found in response' % zone)
|
python
|
{
"resource": ""
}
|
q276385
|
fetch_config
|
test
|
def fetch_config(zone, conn):
"""Fetch all pieces of a Route 53 config from Amazon.
Args: zone: string, hosted zone id.
conn: boto.route53.Route53Connection
Returns: list of ElementTrees, one for each piece of config."""
more_to_fetch = True
cfg_chunks = []
next_name = None
next_type = None
next_identifier = None
while more_to_fetch == True:
more_to_fetch = False
getstr = '/%s/hostedzone/%s/rrset' % (R53_API_VERSION, zone)
if next_name is not None:
getstr += '?name=%s&type=%s' % (next_name, next_type)
if next_identifier is not None:
getstr += '&identifier=%s' % next_identifier
log.debug('requesting %s' % getstr)
resp = conn.make_request('GET', getstr)
etree = lxml.etree.parse(resp)
cfg_chunks.append(etree)
root = etree.getroot()
truncated = root.find('{%s}IsTruncated' % R53_XMLNS)
if truncated is not None and truncated.text == 'true':
more_to_fetch = True
next_name = root.find('{%s}NextRecordName' % R53_XMLNS).text
next_type = root.find('{%s}NextRecordType' % R53_XMLNS).text
try:
next_identifier = root.find('{%s}NextRecordIdentifier' % R53_XMLNS).text
except AttributeError: # may not have next_identifier
next_identifier = None
return cfg_chunks
|
python
|
{
"resource": ""
}
|
q276386
|
merge_config
|
test
|
def merge_config(cfg_chunks):
"""Merge a set of fetched Route 53 config Etrees into a canonical form.
Args: cfg_chunks: [ lxml.etree.ETree ]
Returns: lxml.etree.Element"""
root = lxml.etree.XML('<ResourceRecordSets xmlns="%s"></ResourceRecordSets>' % R53_XMLNS, parser=XML_PARSER)
for chunk in cfg_chunks:
for rrset in chunk.iterfind('.//{%s}ResourceRecordSet' % R53_XMLNS):
root.append(rrset)
return root
|
python
|
{
"resource": ""
}
|
q276387
|
validate_changeset
|
test
|
def validate_changeset(changeset):
"""Validate a changeset is compatible with Amazon's API spec.
Args: changeset: lxml.etree.Element (<ChangeResourceRecordSetsRequest>)
Returns: [ errors ] list of error strings or []."""
errors = []
changes = changeset.findall('.//{%s}Change' % R53_XMLNS)
num_changes = len(changes)
if num_changes == 0:
errors.append('changeset must have at least one <Change> element')
if num_changes > 100:
errors.append('changeset has %d <Change> elements: max is 100' % num_changes)
rrs = changeset.findall('.//{%s}ResourceRecord' % R53_XMLNS)
num_rrs = len(rrs)
if num_rrs > 1000:
errors.append('changeset has %d ResourceRecord elements: max is 1000' % num_rrs)
values = changeset.findall('.//{%s}Value' % R53_XMLNS)
num_chars = 0
for value in values:
num_chars += len(value.text)
if num_chars > 10000:
errors.append('changeset has %d chars in <Value> text: max is 10000' % num_chars)
return errors
|
python
|
{
"resource": ""
}
|
q276388
|
minimize_best_n
|
test
|
def minimize_best_n(Members):
'''
Orders population members from lowest fitness to highest fitness
Args:
Members (list): list of PyGenetics Member objects
Returns:
lsit: ordered lsit of Members, from highest fitness to lowest fitness
'''
return(list(reversed(sorted(
Members, key=lambda Member: Member.fitness_score
))))
|
python
|
{
"resource": ""
}
|
q276389
|
Population.fitness
|
test
|
def fitness(self):
'''Population fitness == average member fitness score'''
if len(self.__members) != 0:
if self.__num_processes > 1:
members = [m.get() for m in self.__members]
else:
members = self.__members
return sum(m.fitness_score for m in members) / len(members)
else:
return None
|
python
|
{
"resource": ""
}
|
q276390
|
Population.ave_cost_fn_val
|
test
|
def ave_cost_fn_val(self):
'''Returns average cost function return value for all members'''
if len(self.__members) != 0:
if self.__num_processes > 1:
members = [m.get() for m in self.__members]
else:
members = self.__members
return sum(m.cost_fn_val for m in members) / len(members)
else:
return None
|
python
|
{
"resource": ""
}
|
q276391
|
Population.med_cost_fn_val
|
test
|
def med_cost_fn_val(self):
'''Returns median cost function return value for all members'''
if len(self.__members) != 0:
if self.__num_processes > 1:
members = [m.get() for m in self.__members]
else:
members = self.__members
return median([m.cost_fn_val for m in members])
else:
return None
|
python
|
{
"resource": ""
}
|
q276392
|
Population.parameters
|
test
|
def parameters(self):
'''Population parameter vals == average member parameter vals'''
if len(self.__members) != 0:
if self.__num_processes > 1:
members = [m.get() for m in self.__members]
else:
members = self.__members
params = {}
for p in self.__parameters:
params[p.name] = sum(
m.parameters[p.name] for m in members
) / len(members)
return params
else:
return None
|
python
|
{
"resource": ""
}
|
q276393
|
Population.members
|
test
|
def members(self):
'''Returns Member objects of population'''
if self.__num_processes > 1:
return [m.get() for m in self.__members]
else:
return self.__members
|
python
|
{
"resource": ""
}
|
q276394
|
Population.add_parameter
|
test
|
def add_parameter(self, name, min_val, max_val):
'''Adds a paramber to the Population
Args:
name (str): name of the parameter
min_val (int or float): minimum value for the parameter
max_val (int or float): maximum value for the parameter
'''
self.__parameters.append(Parameter(name, min_val, max_val))
|
python
|
{
"resource": ""
}
|
q276395
|
Population.next_generation
|
test
|
def next_generation(self, mut_rate=0, max_mut_amt=0, log_base=10):
'''Generates the next population from a previously evaluated generation
Args:
mut_rate (float): mutation rate for new members (0.0 - 1.0)
max_mut_amt (float): how much the member is allowed to mutate
(0.0 - 1.0, proportion change of mutated parameter)
log_base (int): the higher this number, the more likely the first
Members (chosen with supplied selection function) are chosen
as parents for the next generation
'''
if self.__num_processes > 1:
process_pool = Pool(processes=self.__num_processes)
members = [m.get() for m in self.__members]
else:
members = self.__members
if len(members) == 0:
raise Exception(
'Generation 0 not found: use generate_population() first'
)
selected_members = self.__select_fn(members)
reproduction_probs = list(reversed(logspace(0.0, 1.0,
num=len(selected_members), base=log_base)))
reproduction_probs = reproduction_probs / sum(reproduction_probs)
self.__members = []
for _ in range(self.__pop_size):
parent_1 = nrandom.choice(selected_members, p=reproduction_probs)
parent_2 = nrandom.choice(selected_members, p=reproduction_probs)
feed_dict = {}
for param in self.__parameters:
which_parent = uniform(0, 1)
if which_parent < 0.5:
feed_dict[param.name] = parent_1.parameters[param.name]
else:
feed_dict[param.name] = parent_2.parameters[param.name]
feed_dict[param.name] = self.__mutate_parameter(
feed_dict[param.name], param, mut_rate, max_mut_amt
)
if self.__num_processes > 1:
self.__members.append(process_pool.apply_async(
self._start_process,
[self.__cost_fn, feed_dict, self.__cost_fn_args])
)
else:
self.__members.append(
Member(
feed_dict,
self.__cost_fn(feed_dict, self.__cost_fn_args)
)
)
if self.__num_processes > 1:
process_pool.close()
process_pool.join()
self.__determine_best_member()
|
python
|
{
"resource": ""
}
|
q276396
|
ConfigOptionParser.normalize_keys
|
test
|
def normalize_keys(self, items):
"""Return a config dictionary with normalized keys regardless of
whether the keys were specified in environment variables or in config
files"""
normalized = {}
for key, val in items:
key = key.replace('_', '-')
if not key.startswith('--'):
key = '--%s' % key # only prefer long opts
normalized[key] = val
return normalized
|
python
|
{
"resource": ""
}
|
q276397
|
ConfigOptionParser.get_environ_vars
|
test
|
def get_environ_vars(self):
"""Returns a generator with all environmental vars with prefix PIP_"""
for key, val in os.environ.items():
if _environ_prefix_re.search(key):
yield (_environ_prefix_re.sub("", key).lower(), val)
|
python
|
{
"resource": ""
}
|
q276398
|
throws_exception
|
test
|
def throws_exception(callable, *exceptions):
"""
Return True if the callable throws the specified exception
>>> throws_exception(lambda: int('3'))
False
>>> throws_exception(lambda: int('a'))
True
>>> throws_exception(lambda: int('a'), KeyError)
False
"""
with context.ExceptionTrap():
with context.ExceptionTrap(*exceptions) as exc:
callable()
return bool(exc)
|
python
|
{
"resource": ""
}
|
q276399
|
transform_hits
|
test
|
def transform_hits(hits):
"""
The list from pypi is really a list of versions. We want a list of
packages with the list of versions stored inline. This converts the
list from pypi into one we can use.
"""
packages = {}
for hit in hits:
name = hit['name']
summary = hit['summary']
version = hit['version']
score = hit['_pypi_ordering']
if score is None:
score = 0
if name not in packages.keys():
packages[name] = {
'name': name,
'summary': summary,
'versions': [version],
'score': score,
}
else:
packages[name]['versions'].append(version)
# if this is the highest version, replace summary and score
if version == highest_version(packages[name]['versions']):
packages[name]['summary'] = summary
packages[name]['score'] = score
# each record has a unique name now, so we will convert the dict into a
# list sorted by score
package_list = sorted(
packages.values(),
key=lambda x: x['score'],
reverse=True,
)
return package_list
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.