text_prompt
stringlengths 157
13.1k
| code_prompt
stringlengths 7
19.8k
⌀ |
---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def flatten_urlinfo(urlinfo, shorter_keys=True):
""" Takes a urlinfo object and returns a flat dictionary.""" |
def flatten(value, prefix=""):
if is_string(value):
_result[prefix[1:]] = value
return
try:
len(value)
except (AttributeError, TypeError): # a leaf
_result[prefix[1:]] = value
return
try:
items = value.items()
except AttributeError: # an iterable, but not a dict
last_prefix = prefix.split(".")[-1]
if shorter_keys:
prefix = "." + last_prefix
if last_prefix == "Country":
for v in value:
country = v.pop("@Code")
flatten(v, ".".join([prefix, country]))
elif last_prefix in ["RelatedLink", "CategoryData"]:
for i, v in enumerate(value):
flatten(v, ".".join([prefix, str(i)]))
elif value[0].get("TimeRange"):
for v in value:
time_range = ".".join(tuple(v.pop("TimeRange").items())[0])
# python 3 odict_items don't support indexing
if v.get("DataUrl"):
time_range = ".".join([v.pop("DataUrl"), time_range])
flatten(v, ".".join([prefix, time_range]))
else:
msg = prefix + " contains a list we don't know how to flatten."
raise NotImplementedError(msg)
else: # a dict, go one level deeper
for k, v in items:
flatten(v, ".".join([prefix, k]))
_result = {}
info = xmltodict.parse(str(urlinfo))
flatten(info["aws:UrlInfoResponse"]["Response"]["UrlInfoResult"]["Alexa"])
_result["OutputTimestamp"] = datetime.datetime.utcnow().strftime('%Y%m%dT%H%M%SZ')
return _result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def response_minify(self, response):
""" minify response html to decrease traffic """ |
if response.content_type == u'text/html; charset=utf-8':
endpoint = request.endpoint or ''
view_func = current_app.view_functions.get(endpoint, None)
name = (
'%s.%s' % (view_func.__module__, view_func.__name__)
if view_func else ''
)
if name in self._exempt_routes:
return response
response.direct_passthrough = False
response.set_data(
self._html_minify.minify(response.get_data(as_text=True))
)
return response
return response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def exempt(self, obj):
""" decorator to mark a view as exempt from htmlmin. """ |
name = '%s.%s' % (obj.__module__, obj.__name__)
@wraps(obj)
def __inner(*a, **k):
return obj(*a, **k)
self._exempt_routes.add(name)
return __inner |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dmql(query):
"""Client supplied raw DMQL, ensure quote wrap.""" |
if isinstance(query, dict):
raise ValueError("You supplied a dictionary to the dmql_query parameter, but a string is required."
" Did you mean to pass this to the search_filter parameter? ")
# automatically surround the given query with parentheses if it doesn't have them already
if len(query) > 0 and query != "*" and query[0] != '(' and query[-1] != ')':
query = '({})'.format(query)
return query |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ids(self, content_ids, object_ids):
"""Appends the content and object ids how RETS expects them""" |
result = []
content_ids = self.split(content_ids, False)
object_ids = self.split(object_ids)
for cid in content_ids:
result.append('{}:{}'.format(cid, ':'.join(object_ids)))
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_value(self, key, value):
""" Set key value to the file. The fuction will be make the key and value to dictinary formate. If its exist then it will update the current new key value to the file. Arg: key : cache key value : cache value """ |
file_cache = self.read_file()
if file_cache:
file_cache[key] = value
else:
file_cache = {}
file_cache[key] = value
self.update_file(file_cache) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def delete_value(self, key):
""" Delete the key if the token is expired. Arg: key : cache key """ |
response = {}
response['status'] = False
response['msg'] = "key does not exist"
file_cache = self.read_file()
if key in file_cache:
del file_cache[key]
self.update_file(file_cache)
response['status'] = True
response['msg'] = "success"
return response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update_file(self, content):
""" It will convert json content to json string and update into file. Return: Boolean True/False """ |
updated_content = json.dumps(content)
file_obj = open(self.file, 'r+')
file_obj.write(str(updated_content))
file_obj.close()
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def auth(self):
""" Auth is used to call the AUTH API of CricketAPI. Access token required for every request call to CricketAPI. Auth functional will post user Cricket API app details to server and return the access token. Return: Access token """ |
if not self.store_handler.has_value('access_token'):
params = {}
params["access_key"] = self.access_key
params["secret_key"] = self.secret_key
params["app_id"] = self.app_id
params["device_id"] = self.device_id
auth_url = self.api_path + "auth/"
response = self.get_response(auth_url, params, "post")
if 'auth' in response:
self.store_handler.set_value("access_token", response['auth']['access_token'])
self.store_handler.set_value("expires", response['auth']['expires'])
logger.info('Getting new access token')
else:
msg = "Error getting access_token, " + \
"please verify your access_key, secret_key and app_id"
logger.error(msg)
raise Exception("Auth Failed, please check your access details") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_active_token(self):
""" Getting the valid access token. Access token expires every 24 hours, It will expires then it will generate a new token. Return: active access token """ |
expire_time = self.store_handler.has_value("expires")
access_token = self.store_handler.has_value("access_token")
if expire_time and access_token:
expire_time = self.store_handler.get_value("expires")
if not datetime.now() < datetime.fromtimestamp(float(expire_time)):
self.store_handler.delete_value("access_token")
self.store_handler.delete_value("expires")
logger.info('Access token expired, going to get new token')
self.auth()
else:
logger.info('Access token noy expired yet')
else:
self.auth()
return self.store_handler.get_value("access_token") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_recent_matches(self, card_type="micro_card"):
""" Calling the Recent Matches API. Arg: card_type: optional, default to micro_card. Accepted values are micro_card & summary_card. Return: json data """ |
recent_matches_url = self.api_path + "recent_matches/"
params = {}
params["card_type"] = card_type
response = self.get_response(recent_matches_url, params)
return response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_recent_season_matches(self, season_key):
""" Calling specific season recent matches. Arg: season_key: key of the season. Return: json date """ |
season_recent_matches_url = self.api_path + "season/" + season_key + "/recent_matches/"
response = self.get_response(season_recent_matches_url)
return response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_recent_seasons(self):
""" Calling the Recent Season API. Return: json data """ |
recent_seasons_url = self.api_path + "recent_seasons/"
response = self.get_response(recent_seasons_url)
return response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_schedule(self, date=None):
""" Calling the Schedule API. Return: json data """ |
schedule_url = self.api_path + "schedule/"
params = {}
if date:
params['date'] = date
response = self.get_response(schedule_url, params)
return response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_season_schedule(self, season_key):
""" Calling specific season schedule Arg: season_key: key of the season Return: json data """ |
schedule_url = self.api_path + "season/" + season_key + "/schedule/"
response = self.get_response(schedule_url)
return response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_season(self, season_key, card_type="micro_card"):
""" Calling Season API. Arg: season_key: key of the season card_type: optional, default to micro_card. Accepted values are micro_card & summary_card Return: json data """ |
season_url = self.api_path + "season/" + season_key + "/"
params = {}
params["card_type"] = card_type
response = self.get_response(season_url, params)
return response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_season_stats(self, season_key):
""" Calling Season Stats API. Arg: season_key: key of the season Return: json data """ |
season_stats_url = self.api_path + "season/" + season_key + "/stats/"
response = self.get_response(season_stats_url)
return response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_season_team(self, season_key, season_team_key,stats_type=None):
""" Calling Season teams API Arg: season_key: key of the season Return: json data """ |
params = {"stats_type": stats_type}
season_team_url = self.api_path + 'season/' + season_key + '/team/' + season_team_key + '/'
response = self.get_response(season_team_url, params=params)
return response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_season_points(self, season_key):
""" Calling Season Points API. Arg: season_key: key of the season Return: json data """ |
season_points_url = self.api_path + "season/" + season_key + "/points/"
response = self.get_response(season_points_url)
return response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_season_player_stats(self, season_key, player_key):
""" Calling Season Player Stats API. Arg: season_key: key of the season player_key: key of the player Return: json data """ |
season_player_stats_url = self.api_path + "season/" + season_key + "/player/" + player_key + "/stats/"
response = self.get_response(season_player_stats_url)
return response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_overs_summary(self, match_key):
""" Calling Overs Summary API Arg: match_key: key of the match Return: json data """ |
overs_summary_url = self.api_path + "match/" + match_key + "/overs_summary/"
response = self.get_response(overs_summary_url)
return response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_news_aggregation(self):
""" Calling News Aggregation API Return: json data """ |
news_aggregation_url = self.api_path + "news_aggregation" + "/"
response = self.get_response(news_aggregation_url)
return response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_fantasy_credits(self, match_key):
""" Calling Fantasy Credit API Arg: match_key: key of the match Return: json data """ |
fantasy_credit_url = self.api_path_v3 + "fantasy-match-credits/" + match_key + "/"
response = self.get_response(fantasy_credit_url)
return response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_fantasy_points(self, match_key):
""" Calling Fantasy Points API Arg: match_key: key of the match Return: json data """ |
fantasy_points_url = self.api_path_v3 + "fantasy-match-points/" + match_key + "/"
response = self.get_response(fantasy_points_url)
return response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def main(self):
""" Generates an output string by replacing the keywords in the format string with the corresponding values from a submission dictionary. """ |
self.manage_submissions()
out_string = self.options['format']
# Pop until we get something which len(title) <= max-chars
length = float('inf')
while length > self.options['max_chars']:
self.selected_submission = self.submissions.pop()
length = len(self.selected_submission['title'])
for k, v in self.selected_submission.items():
out_string = out_string.replace(k, self.h.unescape(str(v)))
return self.output(out_string, out_string) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def login(self):
""" Logs into Reddit in order to display a personalised front page. """ |
data = {'user': self.options['username'], 'passwd':
self.options['password'], 'api_type': 'json'}
response = self.client.post('http://www.reddit.com/api/login', data=data)
self.client.modhash = response.json()['json']['data']['modhash'] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def manage_submissions(self):
""" If there are no or only one submissions left, get new submissions. This function manages URL creation and the specifics for front page or subreddit mode. """ |
if not hasattr(self, 'submissions') or len(self.submissions) == 1:
self.submissions = []
if self.options['mode'] == 'front':
# If there are no login details, the standard front
# page will be displayed.
if self.options['password'] and self.options['username']:
self.login()
url = 'http://reddit.com/.json?sort={0}'.format(self.options['sort'])
self.submissions = self.get_submissions(url)
elif self.options['mode'] == 'subreddit':
for subreddit in self.options['subreddits']:
url = 'http://reddit.com/r/{0}/.json?sort={1}'.format(
subreddit, self.options['limit'])
self.submissions += self.get_submissions(url)
else:
return |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_submissions(self, url):
""" Connects to Reddit and gets a JSON representation of submissions. This JSON data is then processed and returned. url: A url that requests for submissions should be sent to. """ |
response = self.client.get(url, params={'limit': self.options['limit']})
submissions = [x['data'] for x in response.json()['data']['children']]
return submissions |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def main(self):
""" A compulsary function that gets the output of the cmus-remote -Q command and converts it to unicode in order for it to be processed and finally output. """ |
try:
# Setting stderr to subprocess.STDOUT seems to stop the error
# message returned by the process from being output to STDOUT.
cmus_output = subprocess.check_output(['cmus-remote', '-Q'],
stderr=subprocess.STDOUT).decode('utf-8')
except subprocess.CalledProcessError:
return self.output(None, None)
if 'duration' in cmus_output:
status = self.convert_cmus_output(cmus_output)
out_string = self.options['format']
for k, v in status.items():
out_string = out_string.replace(k, v)
else:
out_string = None
return self.output(out_string, out_string) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def convert_cmus_output(self, cmus_output):
""" Change the newline separated string of output data into a dictionary which can then be used to replace the strings in the config format. cmus_output: A string with information about cmus that is newline seperated. Running cmus-remote -Q in a terminal will show you what you're dealing with. """ |
cmus_output = cmus_output.split('\n')
cmus_output = [x.replace('tag ', '') for x in cmus_output if not x in '']
cmus_output = [x.replace('set ', '') for x in cmus_output]
status = {}
partitioned = (item.partition(' ') for item in cmus_output)
status = {item[0]: item[2] for item in partitioned}
status['duration'] = self.convert_time(status['duration'])
status['position'] = self.convert_time(status['position'])
return status |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def output(self, full_text, short_text):
""" Output all of the options and data for a segment. full_text: A string representing the data that should be output to i3bar. short_text: A more concise version of full_text, in case there is minimal room on the i3bar. """ |
full_text = full_text.replace('\n', '')
short_text = short_text.replace('\n', '')
self.output_options.update({'full_text': full_text, 'short_text': short_text})
self.output_options = {k: v for k, v in self.output_options.items() if v}
return self.output_options |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def on_click(self, event):
""" A function that should be overwritten by a plugin that wishes to react to events, if it wants to perform any action other than running the supplied command related to a button. event: A dictionary passed from i3bar (after being decoded from JSON) that has the folowing format: event = {'name': 'my_plugin', 'x': 231, 'y': 423} Note: It is also possible to have an instance key, but i3situation doesn't set it. """ |
if event['button'] == 1 and 'button1' in self.options:
subprocess.call(self.options['button1'].split())
elif event['button'] == 2 and 'button2' in self.options:
subprocess.call(self.options['button2'].split())
elif event['button'] == 3 and 'button3' in self.options:
subprocess.call(self.options['button3'].split()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def url_image(request, image_id, thumb_options=None, width=None, height=None):
""" Converts a filer image ID in a complete path :param request: Request object :param image_id: Filer image ID :param thumb_options: ThumbnailOption ID :param width: user-provided width :param height: user-provided height :return: JSON serialized URL components ('url', 'width', 'height') """ |
image = File.objects.get(pk=image_id)
if getattr(image, 'canonical_url', None):
url = image.canonical_url
else:
url = image.url
thumb = _return_thumbnail(image, thumb_options, width, height)
if thumb:
image = thumb
url = image.url
data = {
'url': url,
'width': image.width,
'height': image.height,
}
return http.HttpResponse(json.dumps(data), content_type='application/json') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def thumbnail_options(request):
""" Returns the requested ThumbnailOption as JSON :param request: Request object :return: JSON serialized ThumbnailOption """ |
response_data = [{'id': opt.pk, 'name': opt.name} for opt in ThumbnailOption.objects.all()]
return http.HttpResponse(json.dumps(response_data), content_type="application/json") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def serve_image(request, image_id, thumb_options=None, width=None, height=None):
""" returns the content of an image sized according to the parameters :param request: Request object :param image_id: Filer image ID :param thumb_options: ThumbnailOption ID :param width: user-provided width :param height: user-provided height :return: JSON serialized URL components ('url', 'width', 'height') """ |
image = File.objects.get(pk=image_id)
if getattr(image, 'canonical_url', None):
url = image.canonical_url
else:
url = image.url
thumb = _return_thumbnail(image, thumb_options, width, height)
if thumb:
return server.serve(request, file_obj=thumb, save_as=False)
else:
return HttpResponseRedirect(url) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _touch_dir(self, path):
""" A helper function to create a directory if it doesn't exist. path: A string containing a full path to the directory to be created. """ |
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def reload(self):
""" Reload the configuration from the file. This is in its own function so that it can be called at any time by another class. """ |
self._conf = configparser.ConfigParser()
# Preserve the case of sections and keys.
self._conf.optionxform = str
self._conf.read(self.config_file_path)
if 'general' not in self._conf.keys():
raise IncompleteConfigurationFile('Missing the general section')
general = self._replace_data_types(dict(self._conf.items('general')))
self._conf.remove_section('general')
plugin = []
for section in self._conf.sections():
plugin.append(dict(self._conf.items(section)))
plugin[-1].update({'name': section})
plugin[-1] = self._replace_data_types(plugin[-1])
return (plugin, general) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def coerce_retention_period(value):
""" Coerce a retention period to a Python value. :param value: A string containing the text 'always', a number or an expression that can be evaluated to a number. :returns: A number or the string 'always'. :raises: :exc:`~exceptions.ValueError` when the string can't be coerced. """ |
# Numbers pass through untouched.
if not isinstance(value, numbers.Number):
# Other values are expected to be strings.
if not isinstance(value, string_types):
msg = "Expected string, got %s instead!"
raise ValueError(msg % type(value))
# Check for the literal string `always'.
value = value.strip()
if value.lower() == 'always':
value = 'always'
else:
# Evaluate other strings as expressions.
value = simple_eval(value)
if not isinstance(value, numbers.Number):
msg = "Expected numeric result, got %s instead!"
raise ValueError(msg % type(value))
return value |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_config_file(configuration_file=None, expand=True):
""" Load a configuration file with backup directories and rotation schemes. :param configuration_file: Override the pathname of the configuration file to load (a string or :data:`None`). :param expand: :data:`True` to expand filename patterns to their matches, :data:`False` otherwise. :returns: A generator of tuples with four values each: 1. An execution context created using :mod:`executor.contexts`. 2. The pathname of a directory with backups (a string). 3. A dictionary with the rotation scheme. 4. A dictionary with additional options. :raises: :exc:`~exceptions.ValueError` when `configuration_file` is given but doesn't exist or can't be loaded. This function is used by :class:`RotateBackups` to discover user defined rotation schemes and by :mod:`rotate_backups.cli` to discover directories for which backup rotation is configured. When `configuration_file` isn't given :class:`~update_dotdee.ConfigLoader` is used to search for configuration files in the following locations: - ``/etc/rotate-backups.ini`` and ``/etc/rotate-backups.d/*.ini`` - ``~/.rotate-backups.ini`` and ``~/.rotate-backups.d/*.ini`` - ``~/.config/rotate-backups.ini`` and ``~/.config/rotate-backups.d/*.ini`` All of the available configuration files are loaded in the order given above, so that sections in user-specific configuration files override sections by the same name in system-wide configuration files. """ |
expand_notice_given = False
if configuration_file:
loader = ConfigLoader(available_files=[configuration_file], strict=True)
else:
loader = ConfigLoader(program_name='rotate-backups', strict=False)
for section in loader.section_names:
items = dict(loader.get_options(section))
context_options = {}
if coerce_boolean(items.get('use-sudo')):
context_options['sudo'] = True
if items.get('ssh-user'):
context_options['ssh_user'] = items['ssh-user']
location = coerce_location(section, **context_options)
rotation_scheme = dict((name, coerce_retention_period(items[name]))
for name in SUPPORTED_FREQUENCIES
if name in items)
options = dict(include_list=split(items.get('include-list', '')),
exclude_list=split(items.get('exclude-list', '')),
io_scheduling_class=items.get('ionice'),
strict=coerce_boolean(items.get('strict', 'yes')),
prefer_recent=coerce_boolean(items.get('prefer-recent', 'no')))
# Don't override the value of the 'removal_command' property unless the
# 'removal-command' configuration file option has a value set.
if items.get('removal-command'):
options['removal_command'] = shlex.split(items['removal-command'])
# Expand filename patterns?
if expand and location.have_wildcards:
logger.verbose("Expanding filename pattern %s on %s ..", location.directory, location.context)
if location.is_remote and not expand_notice_given:
logger.notice("Expanding remote filename patterns (may be slow) ..")
expand_notice_given = True
for match in sorted(location.context.glob(location.directory)):
if location.context.is_directory(match):
logger.verbose("Matched directory: %s", match)
expanded = Location(context=location.context, directory=match)
yield expanded, rotation_scheme, options
else:
logger.verbose("Ignoring match (not a directory): %s", match)
else:
yield location, rotation_scheme, options |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def rotate_concurrent(self, *locations, **kw):
""" Rotate the backups in the given locations concurrently. :param locations: One or more values accepted by :func:`coerce_location()`. :param kw: Any keyword arguments are passed on to :func:`rotate_backups()`. This function uses :func:`rotate_backups()` to prepare rotation commands for the given locations and then it removes backups in parallel, one backup per mount point at a time. The idea behind this approach is that parallel rotation is most useful when the files to be removed are on different disks and so multiple devices can be utilized at the same time. Because mount points are per system :func:`rotate_concurrent()` will also parallelize over backups located on multiple remote systems. """ |
timer = Timer()
pool = CommandPool(concurrency=10)
logger.info("Scanning %s ..", pluralize(len(locations), "backup location"))
for location in locations:
for cmd in self.rotate_backups(location, prepare=True, **kw):
pool.add(cmd)
if pool.num_commands > 0:
backups = pluralize(pool.num_commands, "backup")
logger.info("Preparing to rotate %s (in parallel) ..", backups)
pool.run()
logger.info("Successfully rotated %s in %s.", backups, timer) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_config_file(self, location):
""" Load a rotation scheme and other options from a configuration file. :param location: Any value accepted by :func:`coerce_location()`. :returns: The configured or given :class:`Location` object. """ |
location = coerce_location(location)
for configured_location, rotation_scheme, options in load_config_file(self.config_file, expand=False):
if configured_location.match(location):
logger.verbose("Loading configuration for %s ..", location)
if rotation_scheme:
self.rotation_scheme = rotation_scheme
for name, value in options.items():
if value:
setattr(self, name, value)
# Create a new Location object based on the directory of the
# given location and the execution context of the configured
# location, because:
#
# 1. The directory of the configured location may be a filename
# pattern whereas we are interested in the expanded name.
#
# 2. The execution context of the given location may lack some
# details of the configured location.
return Location(
context=configured_location.context,
directory=location.directory,
)
logger.verbose("No configuration found for %s.", location)
return location |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def collect_backups(self, location):
""" Collect the backups at the given location. :param location: Any value accepted by :func:`coerce_location()`. :returns: A sorted :class:`list` of :class:`Backup` objects (the backups are sorted by their date). :raises: :exc:`~exceptions.ValueError` when the given directory doesn't exist or isn't readable. """ |
backups = []
location = coerce_location(location)
logger.info("Scanning %s for backups ..", location)
location.ensure_readable()
for entry in natsort(location.context.list_entries(location.directory)):
match = TIMESTAMP_PATTERN.search(entry)
if match:
if self.exclude_list and any(fnmatch.fnmatch(entry, p) for p in self.exclude_list):
logger.verbose("Excluded %s (it matched the exclude list).", entry)
elif self.include_list and not any(fnmatch.fnmatch(entry, p) for p in self.include_list):
logger.verbose("Excluded %s (it didn't match the include list).", entry)
else:
try:
backups.append(Backup(
pathname=os.path.join(location.directory, entry),
timestamp=datetime.datetime(*(int(group, 10) for group in match.groups('0'))),
))
except ValueError as e:
logger.notice("Ignoring %s due to invalid date (%s).", entry, e)
else:
logger.debug("Failed to match time stamp in filename: %s", entry)
if backups:
logger.info("Found %i timestamped backups in %s.", len(backups), location)
return sorted(backups) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find_preservation_criteria(self, backups_by_frequency):
""" Collect the criteria used to decide which backups to preserve. :param backups_by_frequency: A :class:`dict` in the format generated by :func:`group_backups()` which has been processed by :func:`apply_rotation_scheme()`. :returns: A :class:`dict` with :class:`Backup` objects as keys and :class:`list` objects containing strings (rotation frequencies) as values. """ |
backups_to_preserve = collections.defaultdict(list)
for frequency, delta in ORDERED_FREQUENCIES:
for period in backups_by_frequency[frequency].values():
for backup in period:
backups_to_preserve[backup].append(frequency)
return backups_to_preserve |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def match(self, location):
""" Check if the given location "matches". :param location: The :class:`Location` object to try to match. :returns: :data:`True` if the two locations are on the same system and the :attr:`directory` can be matched as a filename pattern or a literal match on the normalized pathname. """ |
if self.ssh_alias != location.ssh_alias:
# Never match locations on other systems.
return False
elif self.have_wildcards:
# Match filename patterns using fnmatch().
return fnmatch.fnmatch(location.directory, self.directory)
else:
# Compare normalized directory pathnames.
self = os.path.normpath(self.directory)
other = os.path.normpath(location.directory)
return self == other |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def setup_file_logger(filename, formatting, log_level):
""" A helper function for creating a file logger. Accepts arguments, as it is used in Status and LoggingWriter. """ |
logger = logging.getLogger()
# If a stream handler has been attached, remove it.
if logger.handlers:
logger.removeHandler(logger.handlers[0])
handler = logging.FileHandler(filename)
logger.addHandler(handler)
formatter = logging.Formatter(*formatting)
handler.setFormatter(formatter)
logger.setLevel(log_level)
handler.setLevel(log_level)
return logger |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def output_to_bar(self, message, comma=True):
""" Outputs data to stdout, without buffering. message: A string containing the data to be output. comma: Whether or not a comma should be placed at the end of the output. """ |
if comma:
message += ','
sys.stdout.write(message + '\n')
sys.stdout.flush() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def reload(self):
""" Reload the installed plugins and the configuration file. This is called when either the plugins or config get updated. """ |
logging.debug('Reloading config file as files have been modified.')
self.config.plugin, self.config.general = self.config.reload()
logging.debug('Reloading plugins as files have been modified.')
self.loader = plugin_manager.PluginLoader(
self._plugin_path, self.config.plugin)
self._plugin_mod_time = os.path.getmtime(self._plugin_path)
self._config_mod_time = os.path.getmtime(self._config_file_path) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run_plugins(self):
""" Creates a thread for each plugin and lets the thread_manager handle it. """ |
for obj in self.loader.objects:
# Reserve a slot in the output_dict in order to ensure that the
# items are in the correct order.
self.output_dict[obj.output_options['name']] = None
self.thread_manager.add_thread(obj.main, obj.options['interval']) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run(self):
""" Monitors if the config file or plugins are updated. Also outputs the JSON data generated by the plugins, without needing to poll the threads. """ |
self.run_plugins()
while True:
# Reload plugins and config if either the config file or plugin
# directory are modified.
if self._config_mod_time != os.path.getmtime(self._config_file_path) or \
self._plugin_mod_time != os.path.getmtime(self._plugin_path):
self.thread_manager.kill_all_threads()
self.output_dict.clear()
self.reload()
self.run_plugins()
self.output_to_bar(json.dumps(self._remove_empty_output()))
time.sleep(self.config.general['interval']) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _remove_empty_output(self):
""" If plugins haven't been initialised and therefore not sending output or their output is None, there is no reason to take up extra room on the bar. """ |
output = []
for key in self.output_dict:
if self.output_dict[key] is not None and 'full_text' in self.output_dict[key]:
output.append(self.output_dict[key])
return output |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def handle_events(self):
""" An event handler that processes events from stdin and calls the on_click function of the respective object. This function is run in another thread, so as to not stall the main thread. """ |
for event in sys.stdin:
if event.startswith('['):
continue
name = json.loads(event.lstrip(','))['name']
for obj in self.loader.objects:
if obj.output_options['name'] == name:
obj.on_click(json.loads(event.lstrip(','))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def field_dict(self, model):
""" Helper function that returns a dictionary of all fields in the given model. If self.field_filter is set, it only includes the fields that match the filter. """ |
if self.field_filter:
return dict(
[(f.name, f) for f in model._meta.fields
if self.field_filter(f)]
)
else:
return dict(
[(f.name, f) for f in model._meta.fields
if not f.rel and
not f.primary_key and
not f.unique and
not isinstance(f, (models.AutoField, models.TextField))]
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run(self):
""" Calls the main function of a plugin and mutates the output dict with its return value. Provides an easy way to change the output whilst not needing to constantly poll a queue in another thread and allowing plugin's to manage their own intervals. """ |
self.running = True
while self.running:
ret = self.func()
self.output_dict[ret['name']] = ret
time.sleep(self.interval)
return |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_thread(self, func, interval):
""" Creates a thread, starts it and then adds it to the thread pool. Func: Same as in the Thread class. Interval: Same as in the Thread class. """ |
t = Thread(func, interval, self.output_dict)
t.start()
self._thread_pool.append(t) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _compile_files(self):
""" Compiles python plugin files in order to be processed by the loader. It compiles the plugins if they have been updated or haven't yet been compiled. """ |
for f in glob.glob(os.path.join(self.dir_path, '*.py')):
# Check for compiled Python files that aren't in the __pycache__.
if not os.path.isfile(os.path.join(self.dir_path, f + 'c')):
compileall.compile_dir(self.dir_path, quiet=True)
logging.debug('Compiled plugins as a new plugin has been added.')
return
# Recompile if there are newer plugins.
elif os.path.getmtime(os.path.join(self.dir_path, f)) > os.path.getmtime(
os.path.join(self.dir_path, f + 'c')):
compileall.compile_dir(self.dir_path, quiet=True)
logging.debug('Compiled plugins as a plugin has been changed.')
return |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _load_compiled(self, file_path):
""" Accepts a path to a compiled plugin and returns a module object. file_path: A string that represents a complete file path to a compiled plugin. """ |
name = os.path.splitext(os.path.split(file_path)[-1])[0]
plugin_directory = os.sep.join(os.path.split(file_path)[0:-1])
compiled_directory = os.path.join(plugin_directory, '__pycache__')
# Use glob to autocomplete the filename.
compiled_file = glob.glob(os.path.join(compiled_directory, (name + '.*')))[0]
plugin = imp.load_compiled(name, compiled_file)
return plugin |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_objects(self):
""" Matches the plugins that have been specified in the config file with the available plugins. Returns instantiated objects based upon the classes defined in the plugins. """ |
objects = []
for settings in self._config:
if settings['plugin'] in self.plugins:
module = self.plugins[settings['plugin']]
# Trusts that the only item in __all__ is the name of the
# plugin class.
plugin_class = getattr(module, module.__all__)
objects.append(plugin_class(settings))
logging.debug('Loaded a plugin object based upon {0}'.format(
settings['plugin']))
else:
logging.critical('Missing plugin {0} was not found in {1}'.format(
settings['plugin'], self.dir_path))
raise MissingPlugin('The plugin {0} was not found in {1}'.format(
settings['plugin'], self.dir_path))
return objects |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def refresh_files(self):
""" Discovers the available plugins and turns each into a module object. This is a seperate function to allow plugins to be updated dynamically by other parts of the application. """ |
plugins = {}
_plugin_files = glob.glob(os.path.join(self.dir_path, '[!_]*.pyc'))
for f in glob.glob(os.path.join(self.dir_path, '[!_]*.py')):
if not any(os.path.splitext(f)[0] == os.path.splitext(x)[0]
for x in _plugin_files):
logging.debug('Adding plugin {0}'.format(f))
_plugin_files.append(f)
for f in _plugin_files:
plugin = self._load_compiled(f)
plugins[plugin.__name__] = plugin
logging.debug('Loaded module object for plugin: {0}'.format(f))
return plugins |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def model_page(self, request, app_label, model_name, rest_of_url=None):
""" Handles the model-specific functionality of the databrowse site, delegating<to the appropriate ModelDatabrowse class. """ |
try:
model = get_model(app_label, model_name)
except LookupError:
model = None
if model is None:
raise http.Http404("App %r, model %r, not found." %
(app_label, model_name))
try:
databrowse_class = self.registry[model]
except KeyError:
raise http.Http404("This model exists but has not been registered "
"with databrowse.")
return databrowse_class(model, self).root(request, rest_of_url) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def values(self):
""" Returns a list of values for this field for this instance. It's a list so we can accomodate many-to-many fields. """ |
# This import is deliberately inside the function because it causes
# some settings to be imported, and we don't want to do that at the
# module level.
if self.field.rel:
if isinstance(self.field.rel, models.ManyToOneRel):
objs = getattr(self.instance.instance, self.field.name)
elif isinstance(self.field.rel,
models.ManyToManyRel): # ManyToManyRel
return list(getattr(self.instance.instance,
self.field.name).all())
elif self.field.choices:
objs = dict(self.field.choices).get(self.raw_value, EMPTY_VALUE)
elif isinstance(self.field, models.DateField) or \
isinstance(self.field, models.TimeField):
if self.raw_value:
if isinstance(self.field, models.DateTimeField):
objs = capfirst(formats.date_format(self.raw_value,
'DATETIME_FORMAT'))
elif isinstance(self.field, models.TimeField):
objs = capfirst(formats.time_format(self.raw_value,
'TIME_FORMAT'))
else:
objs = capfirst(formats.date_format(self.raw_value,
'DATE_FORMAT'))
else:
objs = EMPTY_VALUE
elif isinstance(self.field, models.BooleanField) or \
isinstance(self.field, models.NullBooleanField):
objs = {True: 'Yes', False: 'No', None: 'Unknown'}[self.raw_value]
else:
objs = self.raw_value
return [objs] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def field_dict(self, model):
""" Helper function that returns a dictionary of all DateFields or DateTimeFields in the given model. If self.field_names is set, it takes that into account when building the dictionary. """ |
if self.field_names is None:
return dict([(f.name, f) for f in model._meta.fields
if isinstance(f, models.DateField)])
else:
return dict([(f.name, f)
for f in model._meta.fields
if isinstance(f, models.DateField) and
(f.name in self.field_names)]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def kmer_dag(job,
input_file,
output_path,
kmer_length,
spark_conf,
workers,
cores,
memory,
sudo):
'''
Optionally launches a Spark cluster and then runs ADAM to count k-mers on an
input file.
:param job: Toil job
:param input_file: URL/path to input file to count k-mers on
:param output_path: URL/path to save k-mer counts at
:param kmer_length: The length of k-mer substrings to count.
:param spark_conf: Optional Spark configuration. If set, workers should \
not be set.
:param workers: Optional number of Spark workers to launch. If set, \
spark_conf should not be set, and cores and memory should be set.
:param cores: Number of cores per Spark worker. Must be set if workers is \
set.
:param memory: Amount of memory to provided to Spark workers. Must be set \
if workers is set.
:param sudo: Whether or not to run Spark containers with sudo.
:type job: toil.Job
:type input_file: string
:type output_path: string
:type kmer_length: int or string
:type spark_conf: string or None
:type workers: int or None
:type cores: int or None
:type memory: int or None
:type sudo: boolean
'''
require((spark_conf is not None and workers is None) or
(workers is not None and cores is not None and memory is not None and spark_conf is not None),
"Either worker count (--workers) must be defined or user must pass in Spark configuration (--spark-conf).")
# if we do not have a spark configuration, then we must spawn a cluster
if spark_conf is None:
master_hostname = spawn_spark_cluster(job,
sudo,
workers,
cores)
else:
spark_conf = shlex.split(spark_conf)
job.addChildJobFn(download_count_upload,
masterHostname,
input_file, output_file, kmer_length,
spark_conf, memory, sudo) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def download_count_upload(job,
master_ip,
input_file,
output_file,
kmer_length,
spark_conf,
memory,
sudo):
'''
Runs k-mer counting.
1. If the input file is located in S3, the file is copied into HDFS.
2. If the input file is not in Parquet format, the file is converted into Parquet.
3. The k-mers are counted and saved as text.
4. If the output path is an S3 URL, the file is copied back to S3.
:param job: Toil job
:param input_file: URL/path to input file to count k-mers on
:param output_file: URL/path to save k-mer counts at
:param kmer_length: The length of k-mer substrings to count.
:param spark_conf: Optional Spark configuration. If set, memory should \
not be set.
:param memory: Amount of memory to provided to Spark workers. Must be set \
if spark_conf is not set.
:param sudo: Whether or not to run Spark containers with sudo.
:type job: toil.Job
:type input_file: string
:type output_file: string
:type kmer_length: int or string
:type spark_conf: list of string or None
:type memory: int or None
:type sudo: boolean
'''
if master_ip is not None:
hdfs_dir = "hdfs://{0}:{1}/".format(master_ip, HDFS_MASTER_PORT)
else:
_log.warn('Master IP is not set. If default filesystem is not set, jobs may fail.')
hdfs_dir = ""
# if the file isn't already in hdfs, copy it in
hdfs_input_file = hdfs_dir
if input_file.startswith("s3://"):
# append the s3 file name to our hdfs path
hdfs_input_file += input_file.split("/")[-1]
# run the download
_log.info("Downloading input file %s to %s.", input_file, hdfs_input_file)
call_conductor(job, master_ip, input_file, hdfs_input_file,
memory=memory, override_parameters=spark_conf)
else:
if not input_file.startswith("hdfs://"):
_log.warn("If not in S3, input file (%s) expected to be in HDFS (%s).",
input_file, hdfs_dir)
# where are we writing the output to? is it going to a location in hdfs or not?
run_upload = True
hdfs_output_file = hdfs_dir + "kmer_output.txt"
if output_file.startswith(hdfs_dir):
run_upload = False
hdfs_output_file = output_file
# do we need to convert to adam?
if (hdfs_input_file.endswith('.bam') or
hdfs_input_file.endswith('.sam') or
hdfs_input_file.endswith('.fq') or
hdfs_input_file.endswith('.fastq')):
hdfs_tmp_file = hdfs_input_file
# change the file extension to adam
hdfs_input_file = '.'.join(hdfs_input_file.split('.')[:-1].append('adam'))
# convert the file
_log.info('Converting %s into ADAM format at %s.', hdfs_tmp_file, hdfs_input_file)
call_adam(job, master_ip,
['transform',
hdfs_tmp_file, hdfs_input_file],
memory=memory, override_parameters=spark_conf)
# run k-mer counting
_log.info('Counting %d-mers in %s, and saving to %s.',
kmer_length, hdfs_input_file, hdfs_output_file)
call_adam(job, master_ip,
['count_kmers',
hdfs_input_file, hdfs_output_file,
str(kmer_length)],
memory=memory, override_parameters=spark_conf)
# do we need to upload the file back? if so, run upload
if run_upload:
_log.info("Uploading output file %s to %s.", hdfs_output_file, output_file)
call_conductor(job, master_ip, hdfs_output_file, output_file,
memory=memory, override_parameters=spark_conf) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run_gatk_germline_pipeline(job, samples, config):
""" Downloads shared files and calls the GATK best practices germline pipeline for a cohort of samples :param JobFunctionWrappingJob job: passed automatically by Toil :param list[GermlineSample] samples: List of GermlineSample namedtuples :param Namespace config: Configuration options for pipeline Requires the following config attributes: config.preprocess_only If True, then stops pipeline after preprocessing steps config.joint_genotype If True, then joint genotypes cohort config.run_oncotator If True, then adds Oncotator to pipeline Additional parameters are needed for downstream steps. Refer to pipeline README for more information. """ |
# Determine the available disk space on a worker node before any jobs have been run.
work_dir = job.fileStore.getLocalTempDir()
st = os.statvfs(work_dir)
config.available_disk = st.f_bavail * st.f_frsize
# Check that there is a reasonable number of samples for joint genotyping
num_samples = len(samples)
if config.joint_genotype and not 30 < num_samples < 200:
job.fileStore.logToMaster('WARNING: GATK recommends batches of '
'30 to 200 samples for joint genotyping. '
'The current cohort has %d samples.' % num_samples)
shared_files = Job.wrapJobFn(download_shared_files, config).encapsulate()
job.addChild(shared_files)
if config.preprocess_only:
for sample in samples:
shared_files.addChildJobFn(prepare_bam,
sample.uuid,
sample.url,
shared_files.rv(),
paired_url=sample.paired_url,
rg_line=sample.rg_line)
else:
run_pipeline = Job.wrapJobFn(gatk_germline_pipeline,
samples,
shared_files.rv()).encapsulate()
shared_files.addChild(run_pipeline)
if config.run_oncotator:
annotate = Job.wrapJobFn(annotate_vcfs, run_pipeline.rv(), shared_files.rv())
run_pipeline.addChild(annotate) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def gatk_germline_pipeline(job, samples, config):
""" Runs the GATK best practices pipeline for germline SNP and INDEL discovery. Steps in Pipeline 0: Generate and preprocess BAM - Uploads processed BAM to output directory 1: Call Variants using HaplotypeCaller - Uploads GVCF 2: Genotype VCF - Uploads VCF 3: Filter Variants using either "hard filters" or VQSR - Uploads filtered VCF :param JobFunctionWrappingJob job: passed automatically by Toil :param list[GermlineSample] samples: List of GermlineSample namedtuples :param Namespace config: Input parameters and reference FileStoreIDs Requires the following config attributes: config.genome_fasta FilesStoreID for reference genome fasta file config.genome_fai FilesStoreID for reference genome fasta index file config.genome_dict FilesStoreID for reference genome sequence dictionary file config.cores Number of cores for each job config.xmx Java heap size in bytes config.suffix Suffix added to output filename config.output_dir URL or local path to output directory config.ssec Path to key file for SSE-C encryption config.joint_genotype If True, then joint genotype and filter cohort config.hc_output URL or local path to HaplotypeCaller output for testing :return: Dictionary of filtered VCF FileStoreIDs :rtype: dict """ |
require(len(samples) > 0, 'No samples were provided!')
# Get total size of genome reference files. This is used for configuring disk size.
genome_ref_size = config.genome_fasta.size + config.genome_fai.size + config.genome_dict.size
# 0: Generate processed BAM and BAI files for each sample
# group preprocessing and variant calling steps in empty Job instance
group_bam_jobs = Job()
gvcfs = {}
for sample in samples:
# 0: Generate processed BAM and BAI files for each sample
get_bam = group_bam_jobs.addChildJobFn(prepare_bam,
sample.uuid,
sample.url,
config,
paired_url=sample.paired_url,
rg_line=sample.rg_line)
# 1: Generate per sample gvcfs {uuid: gvcf_id}
# The HaplotypeCaller disk requirement depends on the input bam, bai, the genome reference
# files, and the output GVCF file. The output GVCF is smaller than the input BAM file.
hc_disk = PromisedRequirement(lambda bam, bai, ref_size:
2 * bam.size + bai.size + ref_size,
get_bam.rv(0),
get_bam.rv(1),
genome_ref_size)
get_gvcf = get_bam.addFollowOnJobFn(gatk_haplotype_caller,
get_bam.rv(0),
get_bam.rv(1),
config.genome_fasta, config.genome_fai, config.genome_dict,
annotations=config.annotations,
cores=config.cores,
disk=hc_disk,
memory=config.xmx,
hc_output=config.hc_output)
# Store cohort GVCFs in dictionary
gvcfs[sample.uuid] = get_gvcf.rv()
# Upload individual sample GVCF before genotyping to a sample specific output directory
vqsr_name = '{}{}.g.vcf'.format(sample.uuid, config.suffix)
get_gvcf.addChildJobFn(output_file_job,
vqsr_name,
get_gvcf.rv(),
os.path.join(config.output_dir, sample.uuid),
s3_key_path=config.ssec,
disk=PromisedRequirement(lambda x: x.size, get_gvcf.rv()))
# VQSR requires many variants in order to train a decent model. GATK recommends a minimum of
# 30 exomes or one large WGS sample:
# https://software.broadinstitute.org/gatk/documentation/article?id=3225
filtered_vcfs = {}
if config.joint_genotype:
# Need to configure joint genotype in a separate function to resolve promises
filtered_vcfs = group_bam_jobs.addFollowOnJobFn(joint_genotype_and_filter,
gvcfs,
config).rv()
# If not joint genotyping, then iterate over cohort and genotype and filter individually.
else:
for uuid, gvcf_id in gvcfs.iteritems():
filtered_vcfs[uuid] = group_bam_jobs.addFollowOnJobFn(genotype_and_filter,
{uuid: gvcf_id},
config).rv()
job.addChild(group_bam_jobs)
return filtered_vcfs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def joint_genotype_and_filter(job, gvcfs, config):
""" Checks for enough disk space for joint genotyping, then calls the genotype and filter pipeline function. :param JobFunctionWrappingJob job: passed automatically by Toil :param dict gvcfs: Dictionary of GVCFs {Sample ID: FileStoreID} :param Namespace config: Input parameters and reference FileStoreIDs Requires the following config attributes: config.genome_fasta FilesStoreID for reference genome fasta file config.genome_fai FilesStoreID for reference genome fasta index file config.genome_dict FilesStoreID for reference genome sequence dictionary file config.available_disk Total available disk space :returns: FileStoreID for the joint genotyped and filtered VCF file :rtype: str """ |
# Get the total size of genome reference files
genome_ref_size = config.genome_fasta.size + config.genome_fai.size + config.genome_dict.size
# Require at least 2.5x the sum of the individual GVCF files
cohort_size = sum(gvcf.size for gvcf in gvcfs.values())
require(int(2.5 * cohort_size + genome_ref_size) < config.available_disk,
'There is not enough disk space to joint '
'genotype samples:\n{}'.format('\n'.join(gvcfs.keys())))
job.fileStore.logToMaster('Merging cohort into a single GVCF file')
return job.addChildJobFn(genotype_and_filter, gvcfs, config).rv() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def genotype_and_filter(job, gvcfs, config):
""" Genotypes one or more GVCF files and runs either the VQSR or hard filtering pipeline. Uploads the genotyped VCF file to the config output directory. :param JobFunctionWrappingJob job: passed automatically by Toil :param dict gvcfs: Dictionary of GVCFs {Sample ID: FileStoreID} :param Namespace config: Input parameters and shared FileStoreIDs Requires the following config attributes: config.genome_fasta FilesStoreID for reference genome fasta file config.genome_fai FilesStoreID for reference genome fasta index file config.genome_dict FilesStoreID for reference genome sequence dictionary file config.suffix Suffix added to output filename config.output_dir URL or local path to output directory config.ssec Path to key file for SSE-C encryption config.cores Number of cores for each job config.xmx Java heap size in bytes config.unsafe_mode If True, then run GATK tools in UNSAFE mode :return: FileStoreID for genotyped and filtered VCF file :rtype: str """ |
# Get the total size of the genome reference
genome_ref_size = config.genome_fasta.size + config.genome_fai.size + config.genome_dict.size
# GenotypeGVCF disk requirement depends on the input GVCF, the genome reference files, and
# the output VCF file. The output VCF is smaller than the input GVCF.
genotype_gvcf_disk = PromisedRequirement(lambda gvcf_ids, ref_size:
2 * sum(gvcf_.size for gvcf_ in gvcf_ids) + ref_size,
gvcfs.values(),
genome_ref_size)
genotype_gvcf = job.addChildJobFn(gatk_genotype_gvcfs,
gvcfs,
config.genome_fasta,
config.genome_fai,
config.genome_dict,
annotations=config.annotations,
unsafe_mode=config.unsafe_mode,
cores=config.cores,
disk=genotype_gvcf_disk,
memory=config.xmx)
# Determine if output GVCF has multiple samples
if len(gvcfs) == 1:
uuid = gvcfs.keys()[0]
else:
uuid = 'joint_genotyped'
genotyped_filename = '%s.genotyped%s.vcf' % (uuid, config.suffix)
genotype_gvcf.addChildJobFn(output_file_job,
genotyped_filename,
genotype_gvcf.rv(),
os.path.join(config.output_dir, uuid),
s3_key_path=config.ssec,
disk=PromisedRequirement(lambda x: x.size, genotype_gvcf.rv()))
if config.run_vqsr:
if not config.joint_genotype:
job.fileStore.logToMaster('WARNING: Running VQSR without joint genotyping.')
joint_genotype_vcf = genotype_gvcf.addFollowOnJobFn(vqsr_pipeline,
uuid,
genotype_gvcf.rv(),
config)
else:
joint_genotype_vcf = genotype_gvcf.addFollowOnJobFn(hard_filter_pipeline,
uuid,
genotype_gvcf.rv(),
config)
return joint_genotype_vcf.rv() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def annotate_vcfs(job, vcfs, config):
""" Runs Oncotator for a group of VCF files. Each sample is annotated individually. :param JobFunctionWrappingJob job: passed automatically by Toil :param dict vcfs: Dictionary of VCF FileStoreIDs {Sample identifier: FileStoreID} :param Namespace config: Input parameters and shared FileStoreIDs Requires the following config attributes: config.oncotator_db FileStoreID to Oncotator database config.suffix Suffix added to output filename config.output_dir URL or local path to output directory config.ssec Path to key file for SSE-C encryption config.cores Number of cores for each job config.xmx Java heap size in bytes """ |
job.fileStore.logToMaster('Running Oncotator on the following samples:\n%s' % '\n'.join(vcfs.keys()))
for uuid, vcf_id in vcfs.iteritems():
# The Oncotator disk requirement depends on the input VCF, the Oncotator database
# and the output VCF. The annotated VCF will be significantly larger than the input VCF.
onco_disk = PromisedRequirement(lambda vcf, db: 3 * vcf.size + db.size,
vcf_id,
config.oncotator_db)
annotated_vcf = job.addChildJobFn(run_oncotator,
vcf_id,
config.oncotator_db,
disk=onco_disk,
cores=config.cores,
memory=config.xmx)
output_dir = os.path.join(config.output_dir, uuid)
filename = '{}.oncotator{}.vcf'.format(uuid, config.suffix)
annotated_vcf.addChildJobFn(output_file_job,
filename,
annotated_vcf.rv(),
output_dir,
s3_key_path=config.ssec,
disk=PromisedRequirement(lambda x: x.size, annotated_vcf.rv())) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_manifest(path_to_manifest):
""" Parses manifest file for Toil Germline Pipeline :param str path_to_manifest: Path to sample manifest file :return: List of GermlineSample namedtuples :rtype: list[GermlineSample] """ |
bam_re = r"^(?P<uuid>\S+)\s(?P<url>\S+[bsc][r]?am)"
fq_re = r"^(?P<uuid>\S+)\s(?P<url>\S+)\s(?P<paired_url>\S+)?\s?(?P<rg_line>@RG\S+)"
samples = []
with open(path_to_manifest, 'r') as f:
for line in f.readlines():
line = line.strip()
if line.startswith('#'):
continue
bam_match = re.match(bam_re, line)
fastq_match = re.match(fq_re, line)
if bam_match:
uuid = bam_match.group('uuid')
url = bam_match.group('url')
paired_url = None
rg_line = None
require('.bam' in url.lower(),
'Expected .bam extension:\n{}:\t{}'.format(uuid, url))
elif fastq_match:
uuid = fastq_match.group('uuid')
url = fastq_match.group('url')
paired_url = fastq_match.group('paired_url')
rg_line = fastq_match.group('rg_line')
require('.fq' in url.lower() or '.fastq' in url.lower(),
'Expected .fq extension:\n{}:\t{}'.format(uuid, url))
else:
raise ValueError('Could not parse entry in manifest: %s\n%s' % (f.name, line))
# Checks that URL has a scheme
require(urlparse(url).scheme, 'Invalid URL passed for {}'.format(url))
samples.append(GermlineSample(uuid, url, paired_url, rg_line))
return samples |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def download_shared_files(job, config):
""" Downloads shared reference files for Toil Germline pipeline :param JobFunctionWrappingJob job: passed automatically by Toil :param Namespace config: Pipeline configuration options :return: Updated config with shared fileStoreIDS :rtype: Namespace """ |
job.fileStore.logToMaster('Downloading shared reference files')
shared_files = {'genome_fasta', 'genome_fai', 'genome_dict'}
nonessential_files = {'genome_fai', 'genome_dict'}
# Download necessary files for pipeline configuration
if config.run_bwa:
shared_files |= {'amb', 'ann', 'bwt', 'pac', 'sa', 'alt'}
nonessential_files.add('alt')
if config.preprocess:
shared_files |= {'g1k_indel', 'mills', 'dbsnp'}
if config.run_vqsr:
shared_files |= {'g1k_snp', 'mills', 'dbsnp', 'hapmap', 'omni'}
if config.run_oncotator:
shared_files.add('oncotator_db')
for name in shared_files:
try:
url = getattr(config, name, None)
if url is None:
continue
setattr(config, name, job.addChildJobFn(download_url_job,
url,
name=name,
s3_key_path=config.ssec,
disk='15G' # Estimated reference file size
).rv())
finally:
if getattr(config, name, None) is None and name not in nonessential_files:
raise ValueError("Necessary configuration parameter is missing:\n{}".format(name))
return job.addFollowOnJobFn(reference_preprocessing, config).rv() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def reference_preprocessing(job, config):
""" Creates a genome fasta index and sequence dictionary file if not already present in the pipeline config. :param JobFunctionWrappingJob job: passed automatically by Toil :param Namespace config: Pipeline configuration options and shared files. Requires FileStoreID for genome fasta file as config.genome_fasta :return: Updated config with reference index files :rtype: Namespace """ |
job.fileStore.logToMaster('Preparing Reference Files')
genome_id = config.genome_fasta
if getattr(config, 'genome_fai', None) is None:
config.genome_fai = job.addChildJobFn(run_samtools_faidx,
genome_id,
cores=config.cores).rv()
if getattr(config, 'genome_dict', None) is None:
config.genome_dict = job.addChildJobFn(run_picard_create_sequence_dictionary,
genome_id,
cores=config.cores,
memory=config.xmx).rv()
return config |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def prepare_bam(job, uuid, url, config, paired_url=None, rg_line=None):
""" Prepares BAM file for Toil germline pipeline. Steps in pipeline 0: Download and align BAM or FASTQ sample 1: Sort BAM 2: Index BAM 3: Run GATK preprocessing pipeline (Optional) - Uploads preprocessed BAM to output directory :param JobFunctionWrappingJob job: passed automatically by Toil :param str uuid: Unique identifier for the sample :param str url: URL or local path to BAM file or FASTQs :param Namespace config: Configuration options for pipeline Requires the following config attributes: config.genome_fasta FilesStoreID for reference genome fasta file config.genome_fai FilesStoreID for reference genome fasta index file config.genome_dict FilesStoreID for reference genome sequence dictionary file config.g1k_indel FileStoreID for 1000G INDEL resource file config.mills FileStoreID for Mills resource file config.dbsnp FileStoreID for dbSNP resource file config.suffix Suffix added to output filename config.output_dir URL or local path to output directory config.ssec Path to key file for SSE-C encryption config.cores Number of cores for each job config.xmx Java heap size in bytes :param str|None paired_url: URL or local path to paired FASTQ file, default is None :param str|None rg_line: RG line for BWA alignment (i.e. @RG\tID:foo\tSM:bar), default is None :return: BAM and BAI FileStoreIDs :rtype: tuple """ |
# 0: Align FASTQ or realign BAM
if config.run_bwa:
get_bam = job.wrapJobFn(setup_and_run_bwakit,
uuid,
url,
rg_line,
config,
paired_url=paired_url).encapsulate()
# 0: Download BAM
elif '.bam' in url.lower():
job.fileStore.logToMaster("Downloading BAM: %s" % uuid)
get_bam = job.wrapJobFn(download_url_job,
url,
name='toil.bam',
s3_key_path=config.ssec,
disk=config.file_size).encapsulate()
else:
raise ValueError('Could not generate BAM file for %s\n'
'Provide a FASTQ URL and set run-bwa or '
'provide a BAM URL that includes .bam extension.' % uuid)
# 1: Sort BAM file if necessary
# Realigning BAM file shuffles read order
if config.sorted and not config.run_bwa:
sorted_bam = get_bam
else:
# The samtools sort disk requirement depends on the input bam, the tmp files, and the
# sorted output bam.
sorted_bam_disk = PromisedRequirement(lambda bam: 3 * bam.size, get_bam.rv())
sorted_bam = get_bam.addChildJobFn(run_samtools_sort,
get_bam.rv(),
cores=config.cores,
disk=sorted_bam_disk)
# 2: Index BAM
# The samtools index disk requirement depends on the input bam and the output bam index
index_bam_disk = PromisedRequirement(lambda bam: bam.size, sorted_bam.rv())
index_bam = job.wrapJobFn(run_samtools_index, sorted_bam.rv(), disk=index_bam_disk)
job.addChild(get_bam)
sorted_bam.addChild(index_bam)
if config.preprocess:
preprocess = job.wrapJobFn(run_gatk_preprocessing,
sorted_bam.rv(),
index_bam.rv(),
config.genome_fasta,
config.genome_dict,
config.genome_fai,
config.g1k_indel,
config.mills,
config.dbsnp,
memory=config.xmx,
cores=config.cores).encapsulate()
sorted_bam.addChild(preprocess)
index_bam.addChild(preprocess)
# Update output BAM promises
output_bam_promise = preprocess.rv(0)
output_bai_promise = preprocess.rv(1)
# Save processed BAM
output_dir = os.path.join(config.output_dir, uuid)
filename = '{}.preprocessed{}.bam'.format(uuid, config.suffix)
output_bam = job.wrapJobFn(output_file_job,
filename,
preprocess.rv(0),
output_dir,
s3_key_path=config.ssec)
preprocess.addChild(output_bam)
else:
output_bam_promise = sorted_bam.rv()
output_bai_promise = index_bam.rv()
return output_bam_promise, output_bai_promise |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def setup_and_run_bwakit(job, uuid, url, rg_line, config, paired_url=None):
""" Downloads and runs bwakit for BAM or FASTQ files :param JobFunctionWrappingJob job: passed automatically by Toil :param str uuid: Unique sample identifier :param str url: FASTQ or BAM file URL. BAM alignment URL must have .bam extension. :param Namespace config: Input parameters and shared FileStoreIDs Requires the following config attributes: config.genome_fasta FilesStoreID for reference genome fasta file config.genome_fai FilesStoreID for reference genome fasta index file config.cores Number of cores for each job config.trim If True, trim adapters using bwakit config.amb FileStoreID for BWA index file prefix.amb config.ann FileStoreID for BWA index file prefix.ann config.bwt FileStoreID for BWA index file prefix.bwt config.pac FileStoreID for BWA index file prefix.pac config.sa FileStoreID for BWA index file prefix.sa config.alt FileStoreID for alternate contigs file or None :param str|None paired_url: URL to paired FASTQ :param str|None rg_line: Read group line (i.e. @RG\tID:foo\tSM:bar) :return: BAM FileStoreID :rtype: str """ |
bwa_config = deepcopy(config)
bwa_config.uuid = uuid
bwa_config.rg_line = rg_line
# bwa_alignment uses a different naming convention
bwa_config.ref = config.genome_fasta
bwa_config.fai = config.genome_fai
# Determine if sample is a FASTQ or BAM file using the file extension
basename, ext = os.path.splitext(url)
ext = ext.lower()
if ext == '.gz':
_, ext = os.path.splitext(basename)
ext = ext.lower()
# The pipeline currently supports FASTQ and BAM files
require(ext in ['.fq', '.fastq', '.bam'],
'Please use .fq or .bam file extensions:\n%s' % url)
# Download fastq files
samples = []
input1 = job.addChildJobFn(download_url_job,
url,
name='file1',
s3_key_path=config.ssec,
disk=config.file_size)
samples.append(input1.rv())
# If the extension is for a BAM file, then configure bwakit to realign the BAM file.
if ext == '.bam':
bwa_config.bam = input1.rv()
else:
bwa_config.r1 = input1.rv()
# Download the paired FASTQ URL
if paired_url:
input2 = job.addChildJobFn(download_url_job,
paired_url,
name='file2',
s3_key_path=config.ssec,
disk=config.file_size)
samples.append(input2.rv())
bwa_config.r2 = input2.rv()
# The bwakit disk requirement depends on the size of the input files and the index
# Take the sum of the input files and scale it by a factor of 4
bwa_index_size = sum([getattr(config, index_file).size
for index_file in ['amb', 'ann', 'bwt', 'pac', 'sa', 'alt']
if getattr(config, index_file, None) is not None])
bwakit_disk = PromisedRequirement(lambda lst, index_size:
int(4 * sum(x.size for x in lst) + index_size),
samples,
bwa_index_size)
return job.addFollowOnJobFn(run_bwakit,
bwa_config,
sort=False, # BAM files are sorted later in the pipeline
trim=config.trim,
cores=config.cores,
disk=bwakit_disk).rv() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def gatk_haplotype_caller(job, bam, bai, ref, fai, ref_dict, annotations=None, emit_threshold=10.0, call_threshold=30.0, unsafe_mode=False, hc_output=None):
""" Uses GATK HaplotypeCaller to identify SNPs and INDELs. Outputs variants in a Genomic VCF file. :param JobFunctionWrappingJob job: passed automatically by Toil :param str bam: FileStoreID for BAM file :param str bai: FileStoreID for BAM index file :param str ref: FileStoreID for reference genome fasta file :param str ref_dict: FileStoreID for reference sequence dictionary file :param str fai: FileStoreID for reference fasta index file :param list[str] annotations: List of GATK variant annotations, default is None :param float emit_threshold: Minimum phred-scale confidence threshold for a variant to be emitted, default is 10.0 :param float call_threshold: Minimum phred-scale confidence threshold for a variant to be called, default is 30.0 :param bool unsafe_mode: If True, runs gatk UNSAFE mode: "-U ALLOW_SEQ_DICT_INCOMPATIBILITY" :param str hc_output: URL or local path to pre-cooked VCF file, default is None :return: FileStoreID for GVCF file :rtype: str """ |
job.fileStore.logToMaster('Running GATK HaplotypeCaller')
inputs = {'genome.fa': ref,
'genome.fa.fai': fai,
'genome.dict': ref_dict,
'input.bam': bam,
'input.bam.bai': bai}
work_dir = job.fileStore.getLocalTempDir()
for name, file_store_id in inputs.iteritems():
job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name))
# Call GATK -- HaplotypeCaller with parameters to produce a genomic VCF file:
# https://software.broadinstitute.org/gatk/documentation/article?id=2803
command = ['-T', 'HaplotypeCaller',
'-nct', str(job.cores),
'-R', 'genome.fa',
'-I', 'input.bam',
'-o', 'output.g.vcf',
'-stand_call_conf', str(call_threshold),
'-stand_emit_conf', str(emit_threshold),
'-variant_index_type', 'LINEAR',
'-variant_index_parameter', '128000',
'--genotyping_mode', 'Discovery',
'--emitRefConfidence', 'GVCF']
if unsafe_mode:
command = ['-U', 'ALLOW_SEQ_DICT_INCOMPATIBILITY'] + command
if annotations:
for annotation in annotations:
command.extend(['-A', annotation])
# Uses docker_call mock mode to replace output with hc_output file
outputs = {'output.g.vcf': hc_output}
docker_call(job=job, work_dir=work_dir,
env={'JAVA_OPTS': '-Djava.io.tmpdir=/data/ -Xmx{}'.format(job.memory)},
parameters=command,
tool='quay.io/ucsc_cgl/gatk:3.5--dba6dae49156168a909c43330350c6161dc7ecc2',
inputs=inputs.keys(),
outputs=outputs,
mock=True if outputs['output.g.vcf'] else False)
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'output.g.vcf')) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def static_dag(job, uuid, rg_line, inputs):
""" Prefer this here as it allows us to pull the job functions from other jobs without rewrapping the job functions back together. bwa_inputs: Input arguments to be passed to BWA. adam_inputs: Input arguments to be passed to ADAM. gatk_preprocess_inputs: Input arguments to be passed to GATK preprocessing. gatk_adam_call_inputs: Input arguments to be passed to GATK haplotype caller for the result of ADAM preprocessing. gatk_gatk_call_inputs: Input arguments to be passed to GATK haplotype caller for the result of GATK preprocessing. """ |
# get work directory
work_dir = job.fileStore.getLocalTempDir()
inputs.cpu_count = cpu_count()
inputs.maxCores = sys.maxint
args = {'uuid': uuid,
's3_bucket': inputs.s3_bucket,
'sequence_dir': inputs.sequence_dir,
'dir_suffix': inputs.dir_suffix}
# get head BWA alignment job function and encapsulate it
inputs.rg_line = rg_line
inputs.output_dir = 's3://{s3_bucket}/alignment{dir_suffix}'.format(**args)
bwa = job.wrapJobFn(download_reference_files,
inputs,
[[uuid,
['s3://{s3_bucket}/{sequence_dir}/{uuid}_1.fastq.gz'.format(**args),
's3://{s3_bucket}/{sequence_dir}/{uuid}_2.fastq.gz'.format(**args)]]]).encapsulate()
# get head ADAM preprocessing job function and encapsulate it
adam_preprocess = job.wrapJobFn(static_adam_preprocessing_dag,
inputs,
's3://{s3_bucket}/alignment{dir_suffix}/{uuid}.bam'.format(**args),
's3://{s3_bucket}/analysis{dir_suffix}/{uuid}'.format(**args),
suffix='.adam').encapsulate()
# Configure options for Toil Germline pipeline. This function call only runs the preprocessing steps.
gatk_preprocessing_inputs = copy.deepcopy(inputs)
gatk_preprocessing_inputs.suffix = '.gatk'
gatk_preprocessing_inputs.preprocess = True
gatk_preprocessing_inputs.preprocess_only = True
gatk_preprocessing_inputs.output_dir = 's3://{s3_bucket}/analysis{dir_suffix}'.format(**args)
# get head GATK preprocessing job function and encapsulate it
gatk_preprocess = job.wrapJobFn(run_gatk_germline_pipeline,
GermlineSample(uuid,
's3://{s3_bucket}/alignment{dir_suffix}/{uuid}.bam'.format(**args),
None, # Does not require second URL or RG_Line
None),
gatk_preprocessing_inputs).encapsulate()
# Configure options for Toil Germline pipeline for preprocessed ADAM BAM file.
adam_call_inputs = inputs
adam_call_inputs.suffix = '.adam'
adam_call_inputs.sorted = True
adam_call_inputs.preprocess = False
adam_call_inputs.run_vqsr = False
adam_call_inputs.joint_genotype = False
adam_call_inputs.output_dir = 's3://{s3_bucket}/analysis{dir_suffix}'.format(**args)
# get head GATK haplotype caller job function for the result of ADAM preprocessing and encapsulate it
gatk_adam_call = job.wrapJobFn(run_gatk_germline_pipeline,
GermlineSample(uuid,
's3://{s3_bucket}/analysis{dir_suffix}/{uuid}/{uuid}.adam.bam'.format(**args),
None,
None),
adam_call_inputs).encapsulate()
# Configure options for Toil Germline pipeline for preprocessed GATK BAM file.
gatk_call_inputs = copy.deepcopy(inputs)
gatk_call_inputs.sorted = True
gatk_call_inputs.preprocess = False
gatk_call_inputs.run_vqsr = False
gatk_call_inputs.joint_genotype = False
gatk_call_inputs.output_dir = 's3://{s3_bucket}/analysis{dir_suffix}'.format(**args)
# get head GATK haplotype caller job function for the result of GATK preprocessing and encapsulate it
gatk_gatk_call = job.wrapJobFn(run_gatk_germline_pipeline,
GermlineSample(uuid,
'S3://{s3_bucket}/analysis{dir_suffix}/{uuid}/{uuid}.gatk.bam'.format(**args),
None, None),
gatk_call_inputs).encapsulate()
# wire up dag
if not inputs.skip_alignment:
job.addChild(bwa)
if (inputs.pipeline_to_run == "adam" or
inputs.pipeline_to_run == "both"):
if inputs.skip_preprocessing:
job.addChild(gatk_adam_call)
else:
if inputs.skip_alignment:
job.addChild(adam_preprocess)
else:
bwa.addChild(adam_preprocess)
adam_preprocess.addChild(gatk_adam_call)
if (inputs.pipeline_to_run == "gatk" or
inputs.pipeline_to_run == "both"):
if inputs.skip_preprocessing:
job.addChild(gatk_gatk_call)
else:
if inputs.skip_alignment:
job.addChild(gatk_preprocess)
else:
bwa.addChild(gatk_preprocess)
gatk_preprocess.addChild(gatk_gatk_call) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def download_encrypted_file(work_dir, url, key_path, name):
""" Downloads encrypted file from S3 Input1: Working directory Input2: S3 URL to be downloaded Input3: Path to key necessary for decryption Input4: name of file to be downloaded """ |
file_path = os.path.join(work_dir, name)
key = generate_unique_key(key_path, url)
encoded_key = base64.b64encode(key)
encoded_key_md5 = base64.b64encode(hashlib.md5(key).digest())
h1 = 'x-amz-server-side-encryption-customer-algorithm:AES256'
h2 = 'x-amz-server-side-encryption-customer-key:{}'.format(encoded_key)
h3 = 'x-amz-server-side-encryption-customer-key-md5:{}'.format(encoded_key_md5)
try:
subprocess.check_call(['curl', '-fs', '--retry', '5', '-H', h1, '-H', h2, '-H', h3, url, '-o', file_path])
except OSError:
raise RuntimeError('Failed to find "curl". Install via "apt-get install curl"')
assert os.path.exists(file_path) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def return_input_paths(job, work_dir, ids, *args):
""" Returns the paths of files from the FileStore Input1: Toil job instance Input2: Working directory Input3: jobstore id dictionary Input4: names of files to be returned from the jobstore Returns: path(s) to the file(s) requested -- unpack these! """ |
paths = OrderedDict()
for name in args:
if not os.path.exists(os.path.join(work_dir, name)):
file_path = job.fileStore.readGlobalFile(ids[name], os.path.join(work_dir, name))
else:
file_path = os.path.join(work_dir, name)
paths[name] = file_path
if len(args) == 1:
return file_path
return paths.values() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def move_to_output_dir(work_dir, output_dir, uuid=None, files=list()):
""" Moves files from work_dir to output_dir Input1: Working directory Input2: Output directory Input3: UUID to be preprended onto file name Input4: list of file names to be moved from working dir to output dir """ |
for fname in files:
if uuid is None:
shutil.move(os.path.join(work_dir, fname), os.path.join(output_dir, fname))
else:
shutil.move(os.path.join(work_dir, fname), os.path.join(output_dir, '{}.{}'.format(uuid, fname))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def batch_start(job, input_args):
""" Downloads shared files that are used by all samples for alignment and places them in the jobstore. """ |
shared_files = ['ref.fa', 'ref.fa.amb', 'ref.fa.ann', 'ref.fa.bwt', 'ref.fa.pac', 'ref.fa.sa', 'ref.fa.fai']
shared_ids = {}
for fname in shared_files:
url = input_args[fname]
shared_ids[fname] = job.addChildJobFn(download_from_url, url, fname).rv()
job.addFollowOnJobFn(spawn_batch_jobs, shared_ids, input_args) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def spawn_batch_jobs(job, shared_ids, input_args):
""" Spawns an alignment job for every sample in the input configuration file """ |
samples = []
config = input_args['config']
with open(config, 'r') as f_in:
for line in f_in:
line = line.strip().split(',')
uuid = line[0]
urls = line[1:]
samples.append((uuid, urls))
for sample in samples:
job.addChildJobFn(alignment, shared_ids, input_args, sample, cores=32, memory='20 G', disk='100 G') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def alignment(job, ids, input_args, sample):
""" Runs BWA and then Bamsort on the supplied fastqs for this sample Input1: Toil Job instance Input2: jobstore id dictionary Input3: Input arguments dictionary Input4: Sample tuple -- contains uuid and urls for the sample """ |
uuid, urls = sample
# ids['bam'] = job.fileStore.getEmptyFileStoreID()
work_dir = job.fileStore.getLocalTempDir()
output_dir = input_args['output_dir']
key_path = input_args['ssec']
cores = multiprocessing.cpu_count()
# I/O
return_input_paths(job, work_dir, ids, 'ref.fa', 'ref.fa.amb', 'ref.fa.ann',
'ref.fa.bwt', 'ref.fa.pac', 'ref.fa.sa', 'ref.fa.fai')
# Get fastqs associated with this sample
for url in urls:
download_encrypted_file(work_dir, url, key_path, os.path.basename(url))
# Parameters for BWA and Bamsort
docker_cmd = ['docker', 'run', '--rm', '-v', '{}:/data'.format(work_dir)]
bwa_command = ["jvivian/bwa",
"mem",
"-R", "@RG\tID:{0}\tPL:Illumina\tSM:{0}\tLB:KapaHyper".format(uuid),
"-T", str(0),
"-t", str(cores),
"/data/ref.fa"] + [os.path.join('/data/', os.path.basename(x)) for x in urls]
bamsort_command = ["jeltje/biobambam",
"/usr/local/bin/bamsort",
"inputformat=sam",
"level=1",
"inputthreads={}".format(cores),
"outputthreads={}".format(cores),
"calmdnm=1",
"calmdnmrecompindetonly=1",
"calmdnmreference=/data/ref.fa",
"I=/data/{}".format(uuid + '.sam')]
# Piping the output to a file handle
with open(os.path.join(work_dir, uuid + '.sam'), 'w') as f_out:
subprocess.check_call(docker_cmd + bwa_command, stdout=f_out)
with open(os.path.join(work_dir, uuid + '.bam'), 'w') as f_out:
subprocess.check_call(docker_cmd + bamsort_command, stdout=f_out)
# Save in JobStore
# job.fileStore.updateGlobalFile(ids['bam'], os.path.join(work_dir, uuid + '.bam'))
ids['bam'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, uuid + '.bam'))
# Copy file to S3
if input_args['s3_dir']:
job.addChildJobFn(upload_bam_to_s3, ids, input_args, sample, cores=32, memory='20 G', disk='30 G')
# Move file in output_dir
if input_args['output_dir']:
move_to_output_dir(work_dir, output_dir, uuid=None, files=[uuid + '.bam']) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def upload_bam_to_s3(job, ids, input_args, sample):
""" Uploads output BAM from sample to S3 Input1: Toil Job instance Input2: jobstore id dictionary Input3: Input arguments dictionary Input4: Sample tuple -- contains uuid and urls for the sample """ |
uuid, urls = sample
key_path = input_args['ssec']
work_dir = job.fileStore.getLocalTempDir()
# Parse s3_dir to get bucket and s3 path
s3_dir = input_args['s3_dir']
bucket_name = s3_dir.lstrip('/').split('/')[0]
bucket_dir = '/'.join(s3_dir.lstrip('/').split('/')[1:])
base_url = 'https://s3-us-west-2.amazonaws.com/'
url = os.path.join(base_url, bucket_name, bucket_dir, uuid + '.bam')
#I/O
job.fileStore.readGlobalFile(ids['bam'], os.path.join(work_dir, uuid + '.bam'))
# Generate keyfile for upload
with open(os.path.join(work_dir, uuid + '.key'), 'wb') as f_out:
f_out.write(generate_unique_key(key_path, url))
# Commands to upload to S3 via S3AM
s3am_command = ['s3am',
'upload',
'--sse-key-file', os.path.join(work_dir, uuid + '.key'),
'file://{}'.format(os.path.join(work_dir, uuid + '.bam')),
bucket_name,
os.path.join(bucket_dir, uuid + '.bam')]
subprocess.check_call(s3am_command) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def tarball_files(work_dir, tar_name, uuid=None, files=None):
""" Tars a group of files together into a tarball work_dir: str Current Working Directory tar_name: str Name of tarball uuid: str UUID to stamp files with files: str(s) List of filenames to place in the tarball from working directory """ |
with tarfile.open(os.path.join(work_dir, tar_name), 'w:gz') as f_out:
for fname in files:
if uuid:
f_out.add(os.path.join(work_dir, fname), arcname=uuid + '.' + fname)
else:
f_out.add(os.path.join(work_dir, fname), arcname=fname) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def start_batch(job, input_args):
""" This function will administer 5 jobs at a time then recursively call itself until subset is empty """ |
samples = parse_sra(input_args['sra'])
# for analysis_id in samples:
job.addChildJobFn(download_and_transfer_sample, input_args, samples, cores=1, disk='30') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def download_and_transfer_sample(job, input_args, samples):
""" Downloads a sample from dbGaP via SRAToolKit, then uses S3AM to transfer it to S3 input_args: dict Dictionary of input arguments analysis_id: str An analysis ID for a sample in CGHub """ |
if len(samples) > 1:
a = samples[len(samples)/2:]
b = samples[:len(samples)/2]
job.addChildJobFn(download_and_transfer_sample, input_args, a, disk='30G')
job.addChildJobFn(download_and_transfer_sample, input_args, b, disk='30G')
else:
analysis_id = samples[0]
work_dir = job.fileStore.getLocalTempDir()
sudo = input_args['sudo']
# Acquire dbgap_key
shutil.copy(input_args['dbgap_key'], os.path.join(work_dir, 'dbgap.ngc'))
# Call to fastq-dump to pull down SRA files and convert to fastq
if input_args['single_end']:
parameters = [analysis_id]
else:
parameters = ['--split-files', analysis_id]
docker_call(tool='quay.io/ucsc_cgl/fastq-dump:2.5.7--4577a6c1a3c94adaa0c25dd6c03518ee610433d1',
work_dir=work_dir, tool_parameters=parameters, sudo=sudo)
# Collect files and encapsulate into a tarball
shutil.rmtree(os.path.join(work_dir, 'sra'))
sample_name = analysis_id + '.tar.gz'
if input_args['single_end']:
r = [os.path.basename(x) for x in glob.glob(os.path.join(work_dir, '*.f*'))]
tarball_files(work_dir, tar_name=sample_name, files=r)
else:
r1 = [os.path.basename(x) for x in glob.glob(os.path.join(work_dir, '*_1*'))]
r2 = [os.path.basename(x) for x in glob.glob(os.path.join(work_dir, '*_2*'))]
tarball_files(work_dir, tar_name=sample_name, files=r1 + r2)
# Parse s3_dir to get bucket and s3 path
key_path = input_args['ssec']
s3_dir = input_args['s3_dir']
bucket_name = s3_dir.lstrip('/').split('/')[0]
base_url = 'https://s3-us-west-2.amazonaws.com/'
url = os.path.join(base_url, bucket_name, sample_name)
# Generate keyfile for upload
with open(os.path.join(work_dir, 'temp.key'), 'wb') as f_out:
f_out.write(generate_unique_key(key_path, url))
# Upload to S3 via S3AM
s3am_command = ['s3am',
'upload',
'--sse-key-file', os.path.join(work_dir, 'temp.key'),
'file://{}'.format(os.path.join(work_dir, sample_name)),
's3://' + bucket_name + '/']
subprocess.check_call(s3am_command) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def output_file_job(job, filename, file_id, output_dir, s3_key_path=None):
""" Uploads a file from the FileStore to an output directory on the local filesystem or S3. :param JobFunctionWrappingJob job: passed automatically by Toil :param str filename: basename for file :param str file_id: FileStoreID :param str output_dir: Amazon S3 URL or local path :param str s3_key_path: (OPTIONAL) Path to 32-byte key to be used for SSE-C encryption :return: """ |
job.fileStore.logToMaster('Writing {} to {}'.format(filename, output_dir))
work_dir = job.fileStore.getLocalTempDir()
filepath = job.fileStore.readGlobalFile(file_id, os.path.join(work_dir, filename))
if urlparse(output_dir).scheme == 's3':
s3am_upload(job=job, fpath=os.path.join(work_dir, filepath),
s3_dir=output_dir,
s3_key_path=s3_key_path)
elif os.path.exists(os.path.join(output_dir, filename)):
job.fileStore.logToMaster("File already exists: {}".format(filename))
else:
mkdir_p(output_dir)
copy_files([filepath], output_dir) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def download_encrypted_file(job, input_args, name):
""" Downloads encrypted files from S3 via header injection input_args: dict Input dictionary defined in main() name: str Symbolic name associated with file """ |
work_dir = job.fileStore.getLocalTempDir()
key_path = input_args['ssec']
file_path = os.path.join(work_dir, name)
url = input_args[name]
with open(key_path, 'r') as f:
key = f.read()
if len(key) != 32:
raise RuntimeError('Invalid Key! Must be 32 bytes: {}'.format(key))
key = generate_unique_key(key_path, url)
encoded_key = base64.b64encode(key)
encoded_key_md5 = base64.b64encode(hashlib.md5(key).digest())
h1 = 'x-amz-server-side-encryption-customer-algorithm:AES256'
h2 = 'x-amz-server-side-encryption-customer-key:{}'.format(encoded_key)
h3 = 'x-amz-server-side-encryption-customer-key-md5:{}'.format(encoded_key_md5)
try:
subprocess.check_call(['curl', '-fs', '--retry', '5', '-H', h1, '-H', h2, '-H', h3, url, '-o', file_path])
except OSError:
raise RuntimeError('Failed to find "curl". Install via "apt-get install curl"')
assert os.path.exists(file_path)
return job.fileStore.writeGlobalFile(file_path) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def download_from_url(job, url):
""" Simple curl request made for a given url url: str URL to download """ |
work_dir = job.fileStore.getLocalTempDir()
file_path = os.path.join(work_dir, os.path.basename(url))
if not os.path.exists(file_path):
if url.startswith('s3:'):
download_from_s3_url(file_path, url)
else:
try:
subprocess.check_call(['curl', '-fs', '--retry', '5', '--create-dir', url, '-o', file_path])
except OSError:
raise RuntimeError('Failed to find "curl". Install via "apt-get install curl"')
assert os.path.exists(file_path)
return job.fileStore.writeGlobalFile(file_path) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def docker_call(work_dir, tool_parameters, tool, java_opts=None, outfile=None, sudo=False):
""" Makes subprocess call of a command to a docker container. tool_parameters: list An array of the parameters to be passed to the tool tool: str Name of the Docker image to be used (e.g. quay.io/ucsc_cgl/samtools) java_opts: str Optional commands to pass to a java jar execution. (e.g. '-Xmx15G') outfile: file Filehandle that stderr will be passed to sudo: bool If the user wants the docker command executed as sudo """ |
base_docker_call = 'docker run --log-driver=none --rm -v {}:/data'.format(work_dir).split()
if sudo:
base_docker_call = ['sudo'] + base_docker_call
if java_opts:
base_docker_call = base_docker_call + ['-e', 'JAVA_OPTS={}'.format(java_opts)]
try:
if outfile:
subprocess.check_call(base_docker_call + [tool] + tool_parameters, stdout=outfile)
else:
subprocess.check_call(base_docker_call + [tool] + tool_parameters)
except subprocess.CalledProcessError:
raise RuntimeError('docker command returned a non-zero exit status. Check error logs.')
except OSError:
raise RuntimeError('docker not found on system. Install on all nodes.') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def copy_to_output_dir(work_dir, output_dir, uuid=None, files=list()):
""" A list of files to move from work_dir to output_dir. work_dir: str Current working directory output_dir: str Output directory for files to go uuid: str UUID to "stamp" onto output files files: list List of files to iterate through """ |
for fname in files:
if uuid is None:
shutil.copy(os.path.join(work_dir, fname), os.path.join(output_dir, fname))
else:
shutil.copy(os.path.join(work_dir, fname), os.path.join(output_dir, '{}.{}'.format(uuid, fname))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def program_checks(job, input_args):
""" Checks that dependency programs are installed. input_args: dict Dictionary of input arguments (from main()) """ |
# Program checks
for program in ['curl', 'docker', 'unzip', 'samtools']:
assert which(program), 'Program "{}" must be installed on every node.'.format(program)
job.addChildJobFn(download_shared_files, input_args) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def download_shared_files(job, input_args):
""" Downloads and stores shared inputs files in the FileStore input_args: dict Dictionary of input arguments (from main()) """ |
shared_files = ['unc.bed', 'hg19.transcripts.fa', 'composite_exons.bed', 'normalize.pl', 'rsem_ref.zip',
'ebwt.zip', 'chromosomes.zip']
shared_ids = {}
for f in shared_files:
shared_ids[f] = job.addChildJobFn(download_from_url, input_args[f]).rv()
if input_args['config'] or input_args['config_fastq']:
job.addFollowOnJobFn(parse_config_file, shared_ids, input_args)
else:
sample_path = input_args['input']
uuid = os.path.splitext(os.path.basename(sample_path))[0]
sample = (uuid, sample_path)
job.addFollowOnJobFn(download_sample, shared_ids, input_args, sample) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_config_file(job, ids, input_args):
""" Launches pipeline for each sample. shared_ids: dict Dictionary of fileStore IDs input_args: dict Dictionary of input arguments """ |
samples = []
config = input_args['config']
with open(config, 'r') as f:
for line in f.readlines():
if not line.isspace():
sample = line.strip().split(',')
samples.append(sample)
for sample in samples:
job.addChildJobFn(download_sample, ids, input_args, sample) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def download_sample(job, ids, input_args, sample):
""" Defines variables unique to a sample that are used in the rest of the pipelines ids: dict Dictionary of fileStore IDS input_args: dict Dictionary of input arguments sample: tuple Contains uuid and sample_url """ |
if len(sample) == 2:
uuid, sample_location = sample
url1, url2 = None, None
else:
uuid, url1, url2 = sample
sample_location = None
# Update values unique to sample
sample_input = dict(input_args)
sample_input['uuid'] = uuid
sample_input['sample.tar'] = sample_location
if sample_input['output_dir']:
sample_input['output_dir'] = os.path.join(input_args['output_dir'], uuid)
sample_input['cpu_count'] = multiprocessing.cpu_count()
job_vars = (sample_input, ids)
# Download or locate local file and place in the jobStore
if sample_input['input']:
ids['sample.tar'] = job.fileStore.writeGlobalFile(os.path.abspath(sample_location))
elif sample_input['config_fastq']:
ids['R1.fastq'] = job.fileStore.writeGlobalFile(urlparse(url1).path)
ids['R2.fastq'] = job.fileStore.writeGlobalFile(urlparse(url2).path)
else:
if sample_input['ssec']:
ids['sample.tar'] = job.addChildJobFn(download_encrypted_file, sample_input, 'sample.tar', disk='25G').rv()
else:
ids['sample.tar'] = job.addChildJobFn(download_from_url, sample_input['sample.tar'], disk='25G').rv()
job.addFollowOnJobFn(static_dag_launchpoint, job_vars) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def static_dag_launchpoint(job, job_vars):
""" Statically define jobs in the pipeline job_vars: tuple Tuple of dictionaries: input_args and ids """ |
input_args, ids = job_vars
if input_args['config_fastq']:
cores = input_args['cpu_count']
a = job.wrapJobFn(mapsplice, job_vars, cores=cores, disk='130G').encapsulate()
else:
a = job.wrapJobFn(merge_fastqs, job_vars, disk='70 G').encapsulate()
b = job.wrapJobFn(consolidate_output, job_vars, a.rv())
# Take advantage of "encapsulate" to simplify pipeline wiring
job.addChild(a)
a.addChild(b) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def merge_fastqs(job, job_vars):
""" Unzips input sample and concats the Read1 and Read2 groups together. job_vars: tuple Tuple of dictionaries: input_args and ids """ |
input_args, ids = job_vars
work_dir = job.fileStore.getLocalTempDir()
cores = input_args['cpu_count']
single_end_reads = input_args['single_end_reads']
# I/O
sample = return_input_paths(job, work_dir, ids, 'sample.tar')
# Untar File
# subprocess.check_call(['unzip', sample, '-d', work_dir])
subprocess.check_call(['tar', '-xvf', sample, '-C', work_dir])
# Remove large files before creating concat versions.
os.remove(os.path.join(work_dir, 'sample.tar'))
# Zcat files in parallel
if single_end_reads:
files = sorted(glob.glob(os.path.join(work_dir, '*')))
with open(os.path.join(work_dir, 'R1.fastq'), 'w') as f1:
subprocess.check_call(['zcat'] + files, stdout=f1)
# FileStore
ids['R1.fastq'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R1.fastq'))
else:
r1_files = sorted(glob.glob(os.path.join(work_dir, '*R1*')))
r2_files = sorted(glob.glob(os.path.join(work_dir, '*R2*')))
with open(os.path.join(work_dir, 'R1.fastq'), 'w') as f1:
p1 = subprocess.Popen(['zcat'] + r1_files, stdout=f1)
with open(os.path.join(work_dir, 'R2.fastq'), 'w') as f2:
p2 = subprocess.Popen(['zcat'] + r2_files, stdout=f2)
p1.wait()
p2.wait()
# FileStore
ids['R1.fastq'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R1.fastq'))
ids['R2.fastq'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R2.fastq'))
job.fileStore.deleteGlobalFile(ids['sample.tar'])
# Spawn child job
return job.addChildJobFn(mapsplice, job_vars, cores=cores, disk='130 G').rv() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mapsplice(job, job_vars):
""" Maps RNA-Seq reads to a reference genome. job_vars: tuple Tuple of dictionaries: input_args and ids """ |
# Unpack variables
input_args, ids = job_vars
work_dir = job.fileStore.getLocalTempDir()
cores = input_args['cpu_count']
sudo = input_args['sudo']
single_end_reads = input_args['single_end_reads']
files_to_delete = ['R1.fastq']
# I/O
return_input_paths(job, work_dir, ids, 'ebwt.zip', 'chromosomes.zip')
if single_end_reads:
return_input_paths(job, work_dir, ids, 'R1.fastq')
else:
return_input_paths(job, work_dir, ids, 'R1.fastq', 'R2.fastq')
files_to_delete.extend(['R2.fastq'])
for fname in ['chromosomes.zip', 'ebwt.zip']:
subprocess.check_call(['unzip', '-o', os.path.join(work_dir, fname), '-d', work_dir])
# Command and call
parameters = ['-p', str(cores),
'-s', '25',
'--bam',
'--min-map-len', '50',
'-x', '/data/ebwt',
'-c', '/data/chromosomes',
'-1', '/data/R1.fastq',
'-o', '/data']
if not single_end_reads:
parameters.extend(['-2', '/data/R2.fastq'])
docker_call(tool='quay.io/ucsc_cgl/mapsplice:2.1.8--dd5ac549b95eb3e5d166a5e310417ef13651994e',
tool_parameters=parameters, work_dir=work_dir, sudo=sudo)
# Write to FileStore
for fname in ['alignments.bam', 'stats.txt']:
ids[fname] = job.fileStore.writeGlobalFile(os.path.join(work_dir, fname))
for fname in files_to_delete:
job.fileStore.deleteGlobalFile(ids[fname])
# Run child job
# map_id = job.addChildJobFn(mapping_stats, job_vars).rv()
if input_args['upload_bam_to_s3'] and input_args['s3_dir']:
job.addChildJobFn(upload_bam_to_s3, job_vars)
output_ids = job.addChildJobFn(add_read_groups, job_vars, disk='30 G').rv()
return output_ids |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_read_groups(job, job_vars):
""" This function adds read groups to the headers job_vars: tuple Tuple of dictionaries: input_args and ids """ |
input_args, ids = job_vars
work_dir = job.fileStore.getLocalTempDir()
sudo = input_args['sudo']
# I/O
alignments = return_input_paths(job, work_dir, ids, 'alignments.bam')
output = os.path.join(work_dir, 'rg_alignments.bam')
# Command and callg
parameter = ['AddOrReplaceReadGroups',
'INPUT={}'.format(docker_path(alignments)),
'OUTPUT={}'.format(docker_path(output)),
'RGSM={}'.format(input_args['uuid']),
'RGID={}'.format(input_args['uuid']),
'RGLB=TruSeq',
'RGPL=illumina',
'RGPU=barcode',
'VALIDATION_STRINGENCY=SILENT']
docker_call(tool='quay.io/ucsc_cgl/picardtools:1.95--dd5ac549b95eb3e5d166a5e310417ef13651994e',
tool_parameters=parameter, work_dir=work_dir, sudo=sudo)
# Write to FileStore
ids['rg_alignments.bam'] = job.fileStore.writeGlobalFile(output)
# Run child job
return job.addChildJobFn(bamsort_and_index, job_vars, disk='30 G').rv() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bamsort_and_index(job, job_vars):
""" Sorts bam file and produces index file job_vars: tuple Tuple of dictionaries: input_args and ids """ |
# Unpack variables
input_args, ids = job_vars
work_dir = job.fileStore.getLocalTempDir()
sudo = input_args['sudo']
# I/O
rg_alignments = return_input_paths(job, work_dir, ids, 'rg_alignments.bam')
output = os.path.join(work_dir, 'sorted.bam')
# Command -- second argument is "Output Prefix"
cmd1 = ['sort', docker_path(rg_alignments), docker_path('sorted')]
cmd2 = ['index', docker_path(output)]
docker_call(tool='quay.io/ucsc_cgl/samtools:0.1.19--dd5ac549b95eb3e5d166a5e310417ef13651994e',
tool_parameters=cmd1, work_dir=work_dir, sudo=sudo)
docker_call(tool='quay.io/ucsc_cgl/samtools:0.1.19--dd5ac549b95eb3e5d166a5e310417ef13651994e',
tool_parameters=cmd2, work_dir=work_dir, sudo=sudo)
# Write to FileStore
ids['sorted.bam'] = job.fileStore.writeGlobalFile(output)
ids['sorted.bam.bai'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'sorted.bam.bai'))
# Run child job
output_ids = job.addChildJobFn(sort_bam_by_reference, job_vars, disk='50 G').rv()
rseq_id = job.addChildJobFn(rseq_qc, job_vars, disk='20 G').rv()
return rseq_id, output_ids |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sort_bam_by_reference(job, job_vars):
""" Sorts the bam by reference job_vars: tuple Tuple of dictionaries: input_args and ids """ |
# Unpack variables
input_args, ids = job_vars
work_dir = job.fileStore.getLocalTempDir()
# I/O
sorted_bam, sorted_bai = return_input_paths(job, work_dir, ids, 'sorted.bam', 'sorted.bam.bai')
output = os.path.join(work_dir, 'sort_by_ref.bam')
# Call: Samtools
ref_seqs = []
handle = subprocess.Popen(["samtools", "view", "-H", sorted_bam], stdout=subprocess.PIPE).stdout
for line in handle:
if line.startswith("@SQ"):
tmp = line.split("\t")
chrom = tmp[1].split(":")[1]
ref_seqs.append(chrom)
handle.close()
# Iterate through chromosomes to create mini-bams
for chrom in ref_seqs:
# job.addChildJobFn(sbbr_child, chrom, os.path.join(work_dir, chrom), sorted_bam)
cmd_view = ["samtools", "view", "-b", sorted_bam, chrom]
cmd_sort = ["samtools", "sort", "-m", "3000000000", "-n", "-", os.path.join(work_dir, chrom)]
p1 = subprocess.Popen(cmd_view, stdout=subprocess.PIPE)
subprocess.check_call(cmd_sort, stdin=p1.stdout)
sorted_files = [os.path.join(work_dir, chrom) + '.bam' for chrom in ref_seqs]
cmd = ["samtools", "cat", "-o", output] + sorted_files
subprocess.check_call(cmd)
# Write to FileStore
ids['sort_by_ref.bam'] = job.fileStore.writeGlobalFile(output)
rsem_id = job.addChildJobFn(transcriptome, job_vars, disk='30 G', memory='30 G').rv()
exon_id = job.addChildJobFn(exon_count, job_vars, disk='30 G').rv()
return exon_id, rsem_id |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.