repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
inveniosoftware/invenio-access | invenio_access/permissions.py | Permission._load_permissions | def _load_permissions(self):
"""Load permissions associated to actions."""
result = _P(needs=set(), excludes=set())
if not self.allow_by_default:
result.needs.update(self.explicit_needs)
for explicit_need in self.explicit_needs:
if explicit_need.method == 'action':
action = current_access.get_action_cache(
self._cache_key(explicit_need)
)
if action is None:
action = _P(needs=set(), excludes=set())
actionsusers = ActionUsers.query_by_action(
explicit_need
).all()
actionsroles = ActionRoles.query_by_action(
explicit_need
).join(
ActionRoles.role
).all()
actionssystem = ActionSystemRoles.query_by_action(
explicit_need
).all()
for db_action in chain(
actionsusers, actionsroles, actionssystem):
if db_action.exclude:
action.excludes.add(db_action.need)
else:
action.needs.add(db_action.need)
current_access.set_action_cache(
self._cache_key(explicit_need),
action
)
# in-place update of results
result.update(action)
elif self.allow_by_default:
result.needs.add(explicit_need)
self._permissions = result | python | def _load_permissions(self):
"""Load permissions associated to actions."""
result = _P(needs=set(), excludes=set())
if not self.allow_by_default:
result.needs.update(self.explicit_needs)
for explicit_need in self.explicit_needs:
if explicit_need.method == 'action':
action = current_access.get_action_cache(
self._cache_key(explicit_need)
)
if action is None:
action = _P(needs=set(), excludes=set())
actionsusers = ActionUsers.query_by_action(
explicit_need
).all()
actionsroles = ActionRoles.query_by_action(
explicit_need
).join(
ActionRoles.role
).all()
actionssystem = ActionSystemRoles.query_by_action(
explicit_need
).all()
for db_action in chain(
actionsusers, actionsroles, actionssystem):
if db_action.exclude:
action.excludes.add(db_action.need)
else:
action.needs.add(db_action.need)
current_access.set_action_cache(
self._cache_key(explicit_need),
action
)
# in-place update of results
result.update(action)
elif self.allow_by_default:
result.needs.add(explicit_need)
self._permissions = result | [
"def",
"_load_permissions",
"(",
"self",
")",
":",
"result",
"=",
"_P",
"(",
"needs",
"=",
"set",
"(",
")",
",",
"excludes",
"=",
"set",
"(",
")",
")",
"if",
"not",
"self",
".",
"allow_by_default",
":",
"result",
".",
"needs",
".",
"update",
"(",
"self",
".",
"explicit_needs",
")",
"for",
"explicit_need",
"in",
"self",
".",
"explicit_needs",
":",
"if",
"explicit_need",
".",
"method",
"==",
"'action'",
":",
"action",
"=",
"current_access",
".",
"get_action_cache",
"(",
"self",
".",
"_cache_key",
"(",
"explicit_need",
")",
")",
"if",
"action",
"is",
"None",
":",
"action",
"=",
"_P",
"(",
"needs",
"=",
"set",
"(",
")",
",",
"excludes",
"=",
"set",
"(",
")",
")",
"actionsusers",
"=",
"ActionUsers",
".",
"query_by_action",
"(",
"explicit_need",
")",
".",
"all",
"(",
")",
"actionsroles",
"=",
"ActionRoles",
".",
"query_by_action",
"(",
"explicit_need",
")",
".",
"join",
"(",
"ActionRoles",
".",
"role",
")",
".",
"all",
"(",
")",
"actionssystem",
"=",
"ActionSystemRoles",
".",
"query_by_action",
"(",
"explicit_need",
")",
".",
"all",
"(",
")",
"for",
"db_action",
"in",
"chain",
"(",
"actionsusers",
",",
"actionsroles",
",",
"actionssystem",
")",
":",
"if",
"db_action",
".",
"exclude",
":",
"action",
".",
"excludes",
".",
"add",
"(",
"db_action",
".",
"need",
")",
"else",
":",
"action",
".",
"needs",
".",
"add",
"(",
"db_action",
".",
"need",
")",
"current_access",
".",
"set_action_cache",
"(",
"self",
".",
"_cache_key",
"(",
"explicit_need",
")",
",",
"action",
")",
"result",
".",
"update",
"(",
"action",
")",
"elif",
"self",
".",
"allow_by_default",
":",
"result",
".",
"needs",
".",
"add",
"(",
"explicit_need",
")",
"self",
".",
"_permissions",
"=",
"result"
] | Load permissions associated to actions. | [
"Load",
"permissions",
"associated",
"to",
"actions",
"."
] | 3b033a4bdc110eb2f7e9f08f0744a780884bfc80 | https://github.com/inveniosoftware/invenio-access/blob/3b033a4bdc110eb2f7e9f08f0744a780884bfc80/invenio_access/permissions.py#L122-L165 | train |
inveniosoftware/invenio-access | invenio_access/cli.py | lazy_result | def lazy_result(f):
"""Decorate function to return LazyProxy."""
@wraps(f)
def decorated(ctx, param, value):
return LocalProxy(lambda: f(ctx, param, value))
return decorated | python | def lazy_result(f):
"""Decorate function to return LazyProxy."""
@wraps(f)
def decorated(ctx, param, value):
return LocalProxy(lambda: f(ctx, param, value))
return decorated | [
"def",
"lazy_result",
"(",
"f",
")",
":",
"@",
"wraps",
"(",
"f",
")",
"def",
"decorated",
"(",
"ctx",
",",
"param",
",",
"value",
")",
":",
"return",
"LocalProxy",
"(",
"lambda",
":",
"f",
"(",
"ctx",
",",
"param",
",",
"value",
")",
")",
"return",
"decorated"
] | Decorate function to return LazyProxy. | [
"Decorate",
"function",
"to",
"return",
"LazyProxy",
"."
] | 3b033a4bdc110eb2f7e9f08f0744a780884bfc80 | https://github.com/inveniosoftware/invenio-access/blob/3b033a4bdc110eb2f7e9f08f0744a780884bfc80/invenio_access/cli.py#L30-L35 | train |
inveniosoftware/invenio-access | invenio_access/cli.py | process_action | def process_action(ctx, param, value):
"""Return an action if exists."""
actions = current_app.extensions['invenio-access'].actions
if value not in actions:
raise click.BadParameter('Action "%s" is not registered.', value)
return actions[value] | python | def process_action(ctx, param, value):
"""Return an action if exists."""
actions = current_app.extensions['invenio-access'].actions
if value not in actions:
raise click.BadParameter('Action "%s" is not registered.', value)
return actions[value] | [
"def",
"process_action",
"(",
"ctx",
",",
"param",
",",
"value",
")",
":",
"actions",
"=",
"current_app",
".",
"extensions",
"[",
"'invenio-access'",
"]",
".",
"actions",
"if",
"value",
"not",
"in",
"actions",
":",
"raise",
"click",
".",
"BadParameter",
"(",
"'Action \"%s\" is not registered.'",
",",
"value",
")",
"return",
"actions",
"[",
"value",
"]"
] | Return an action if exists. | [
"Return",
"an",
"action",
"if",
"exists",
"."
] | 3b033a4bdc110eb2f7e9f08f0744a780884bfc80 | https://github.com/inveniosoftware/invenio-access/blob/3b033a4bdc110eb2f7e9f08f0744a780884bfc80/invenio_access/cli.py#L39-L44 | train |
inveniosoftware/invenio-access | invenio_access/cli.py | process_email | def process_email(ctx, param, value):
"""Return an user if it exists."""
user = User.query.filter(User.email == value).first()
if not user:
raise click.BadParameter('User with email \'%s\' not found.', value)
return user | python | def process_email(ctx, param, value):
"""Return an user if it exists."""
user = User.query.filter(User.email == value).first()
if not user:
raise click.BadParameter('User with email \'%s\' not found.', value)
return user | [
"def",
"process_email",
"(",
"ctx",
",",
"param",
",",
"value",
")",
":",
"user",
"=",
"User",
".",
"query",
".",
"filter",
"(",
"User",
".",
"email",
"==",
"value",
")",
".",
"first",
"(",
")",
"if",
"not",
"user",
":",
"raise",
"click",
".",
"BadParameter",
"(",
"'User with email \\'%s\\' not found.'",
",",
"value",
")",
"return",
"user"
] | Return an user if it exists. | [
"Return",
"an",
"user",
"if",
"it",
"exists",
"."
] | 3b033a4bdc110eb2f7e9f08f0744a780884bfc80 | https://github.com/inveniosoftware/invenio-access/blob/3b033a4bdc110eb2f7e9f08f0744a780884bfc80/invenio_access/cli.py#L48-L53 | train |
inveniosoftware/invenio-access | invenio_access/cli.py | process_role | def process_role(ctx, param, value):
"""Return a role if it exists."""
role = Role.query.filter(Role.name == value).first()
if not role:
raise click.BadParameter('Role with name \'%s\' not found.', value)
return role | python | def process_role(ctx, param, value):
"""Return a role if it exists."""
role = Role.query.filter(Role.name == value).first()
if not role:
raise click.BadParameter('Role with name \'%s\' not found.', value)
return role | [
"def",
"process_role",
"(",
"ctx",
",",
"param",
",",
"value",
")",
":",
"role",
"=",
"Role",
".",
"query",
".",
"filter",
"(",
"Role",
".",
"name",
"==",
"value",
")",
".",
"first",
"(",
")",
"if",
"not",
"role",
":",
"raise",
"click",
".",
"BadParameter",
"(",
"'Role with name \\'%s\\' not found.'",
",",
"value",
")",
"return",
"role"
] | Return a role if it exists. | [
"Return",
"a",
"role",
"if",
"it",
"exists",
"."
] | 3b033a4bdc110eb2f7e9f08f0744a780884bfc80 | https://github.com/inveniosoftware/invenio-access/blob/3b033a4bdc110eb2f7e9f08f0744a780884bfc80/invenio_access/cli.py#L57-L62 | train |
inveniosoftware/invenio-access | invenio_access/cli.py | allow_user | def allow_user(user):
"""Allow a user identified by an email address."""
def processor(action, argument):
db.session.add(
ActionUsers.allow(action, argument=argument, user_id=user.id)
)
return processor | python | def allow_user(user):
"""Allow a user identified by an email address."""
def processor(action, argument):
db.session.add(
ActionUsers.allow(action, argument=argument, user_id=user.id)
)
return processor | [
"def",
"allow_user",
"(",
"user",
")",
":",
"def",
"processor",
"(",
"action",
",",
"argument",
")",
":",
"db",
".",
"session",
".",
"add",
"(",
"ActionUsers",
".",
"allow",
"(",
"action",
",",
"argument",
"=",
"argument",
",",
"user_id",
"=",
"user",
".",
"id",
")",
")",
"return",
"processor"
] | Allow a user identified by an email address. | [
"Allow",
"a",
"user",
"identified",
"by",
"an",
"email",
"address",
"."
] | 3b033a4bdc110eb2f7e9f08f0744a780884bfc80 | https://github.com/inveniosoftware/invenio-access/blob/3b033a4bdc110eb2f7e9f08f0744a780884bfc80/invenio_access/cli.py#L108-L114 | train |
inveniosoftware/invenio-access | invenio_access/cli.py | allow_role | def allow_role(role):
"""Allow a role identified by an email address."""
def processor(action, argument):
db.session.add(
ActionRoles.allow(action, argument=argument, role_id=role.id)
)
return processor | python | def allow_role(role):
"""Allow a role identified by an email address."""
def processor(action, argument):
db.session.add(
ActionRoles.allow(action, argument=argument, role_id=role.id)
)
return processor | [
"def",
"allow_role",
"(",
"role",
")",
":",
"def",
"processor",
"(",
"action",
",",
"argument",
")",
":",
"db",
".",
"session",
".",
"add",
"(",
"ActionRoles",
".",
"allow",
"(",
"action",
",",
"argument",
"=",
"argument",
",",
"role_id",
"=",
"role",
".",
"id",
")",
")",
"return",
"processor"
] | Allow a role identified by an email address. | [
"Allow",
"a",
"role",
"identified",
"by",
"an",
"email",
"address",
"."
] | 3b033a4bdc110eb2f7e9f08f0744a780884bfc80 | https://github.com/inveniosoftware/invenio-access/blob/3b033a4bdc110eb2f7e9f08f0744a780884bfc80/invenio_access/cli.py#L119-L125 | train |
inveniosoftware/invenio-access | invenio_access/cli.py | process_allow_action | def process_allow_action(processors, action, argument):
"""Process allow action."""
for processor in processors:
processor(action, argument)
db.session.commit() | python | def process_allow_action(processors, action, argument):
"""Process allow action."""
for processor in processors:
processor(action, argument)
db.session.commit() | [
"def",
"process_allow_action",
"(",
"processors",
",",
"action",
",",
"argument",
")",
":",
"for",
"processor",
"in",
"processors",
":",
"processor",
"(",
"action",
",",
"argument",
")",
"db",
".",
"session",
".",
"commit",
"(",
")"
] | Process allow action. | [
"Process",
"allow",
"action",
"."
] | 3b033a4bdc110eb2f7e9f08f0744a780884bfc80 | https://github.com/inveniosoftware/invenio-access/blob/3b033a4bdc110eb2f7e9f08f0744a780884bfc80/invenio_access/cli.py#L130-L134 | train |
inveniosoftware/invenio-access | invenio_access/cli.py | deny_user | def deny_user(user):
"""Deny a user identified by an email address."""
def processor(action, argument):
db.session.add(
ActionUsers.deny(action, argument=argument, user_id=user.id)
)
return processor | python | def deny_user(user):
"""Deny a user identified by an email address."""
def processor(action, argument):
db.session.add(
ActionUsers.deny(action, argument=argument, user_id=user.id)
)
return processor | [
"def",
"deny_user",
"(",
"user",
")",
":",
"def",
"processor",
"(",
"action",
",",
"argument",
")",
":",
"db",
".",
"session",
".",
"add",
"(",
"ActionUsers",
".",
"deny",
"(",
"action",
",",
"argument",
"=",
"argument",
",",
"user_id",
"=",
"user",
".",
"id",
")",
")",
"return",
"processor"
] | Deny a user identified by an email address. | [
"Deny",
"a",
"user",
"identified",
"by",
"an",
"email",
"address",
"."
] | 3b033a4bdc110eb2f7e9f08f0744a780884bfc80 | https://github.com/inveniosoftware/invenio-access/blob/3b033a4bdc110eb2f7e9f08f0744a780884bfc80/invenio_access/cli.py#L149-L155 | train |
inveniosoftware/invenio-access | invenio_access/cli.py | deny_role | def deny_role(role):
"""Deny a role identified by an email address."""
def processor(action, argument):
db.session.add(
ActionRoles.deny(action, argument=argument, role_id=role.id)
)
return processor | python | def deny_role(role):
"""Deny a role identified by an email address."""
def processor(action, argument):
db.session.add(
ActionRoles.deny(action, argument=argument, role_id=role.id)
)
return processor | [
"def",
"deny_role",
"(",
"role",
")",
":",
"def",
"processor",
"(",
"action",
",",
"argument",
")",
":",
"db",
".",
"session",
".",
"add",
"(",
"ActionRoles",
".",
"deny",
"(",
"action",
",",
"argument",
"=",
"argument",
",",
"role_id",
"=",
"role",
".",
"id",
")",
")",
"return",
"processor"
] | Deny a role identified by an email address. | [
"Deny",
"a",
"role",
"identified",
"by",
"an",
"email",
"address",
"."
] | 3b033a4bdc110eb2f7e9f08f0744a780884bfc80 | https://github.com/inveniosoftware/invenio-access/blob/3b033a4bdc110eb2f7e9f08f0744a780884bfc80/invenio_access/cli.py#L160-L166 | train |
inveniosoftware/invenio-access | invenio_access/cli.py | process_deny_action | def process_deny_action(processors, action, argument):
"""Process deny action."""
for processor in processors:
processor(action, argument)
db.session.commit() | python | def process_deny_action(processors, action, argument):
"""Process deny action."""
for processor in processors:
processor(action, argument)
db.session.commit() | [
"def",
"process_deny_action",
"(",
"processors",
",",
"action",
",",
"argument",
")",
":",
"for",
"processor",
"in",
"processors",
":",
"processor",
"(",
"action",
",",
"argument",
")",
"db",
".",
"session",
".",
"commit",
"(",
")"
] | Process deny action. | [
"Process",
"deny",
"action",
"."
] | 3b033a4bdc110eb2f7e9f08f0744a780884bfc80 | https://github.com/inveniosoftware/invenio-access/blob/3b033a4bdc110eb2f7e9f08f0744a780884bfc80/invenio_access/cli.py#L171-L175 | train |
inveniosoftware/invenio-access | invenio_access/cli.py | remove_global | def remove_global():
"""Remove global action rule."""
def processor(action, argument):
ActionUsers.query_by_action(action, argument=argument).filter(
ActionUsers.user_id.is_(None)
).delete(synchronize_session=False)
return processor | python | def remove_global():
"""Remove global action rule."""
def processor(action, argument):
ActionUsers.query_by_action(action, argument=argument).filter(
ActionUsers.user_id.is_(None)
).delete(synchronize_session=False)
return processor | [
"def",
"remove_global",
"(",
")",
":",
"def",
"processor",
"(",
"action",
",",
"argument",
")",
":",
"ActionUsers",
".",
"query_by_action",
"(",
"action",
",",
"argument",
"=",
"argument",
")",
".",
"filter",
"(",
"ActionUsers",
".",
"user_id",
".",
"is_",
"(",
"None",
")",
")",
".",
"delete",
"(",
"synchronize_session",
"=",
"False",
")",
"return",
"processor"
] | Remove global action rule. | [
"Remove",
"global",
"action",
"rule",
"."
] | 3b033a4bdc110eb2f7e9f08f0744a780884bfc80 | https://github.com/inveniosoftware/invenio-access/blob/3b033a4bdc110eb2f7e9f08f0744a780884bfc80/invenio_access/cli.py#L193-L199 | train |
inveniosoftware/invenio-access | invenio_access/cli.py | remove_user | def remove_user(user):
"""Remove a action for a user."""
def processor(action, argument):
ActionUsers.query_by_action(action, argument=argument).filter(
ActionUsers.user_id == user.id
).delete(synchronize_session=False)
return processor | python | def remove_user(user):
"""Remove a action for a user."""
def processor(action, argument):
ActionUsers.query_by_action(action, argument=argument).filter(
ActionUsers.user_id == user.id
).delete(synchronize_session=False)
return processor | [
"def",
"remove_user",
"(",
"user",
")",
":",
"def",
"processor",
"(",
"action",
",",
"argument",
")",
":",
"ActionUsers",
".",
"query_by_action",
"(",
"action",
",",
"argument",
"=",
"argument",
")",
".",
"filter",
"(",
"ActionUsers",
".",
"user_id",
"==",
"user",
".",
"id",
")",
".",
"delete",
"(",
"synchronize_session",
"=",
"False",
")",
"return",
"processor"
] | Remove a action for a user. | [
"Remove",
"a",
"action",
"for",
"a",
"user",
"."
] | 3b033a4bdc110eb2f7e9f08f0744a780884bfc80 | https://github.com/inveniosoftware/invenio-access/blob/3b033a4bdc110eb2f7e9f08f0744a780884bfc80/invenio_access/cli.py#L204-L210 | train |
inveniosoftware/invenio-access | invenio_access/cli.py | remove_role | def remove_role(role):
"""Remove a action for a role."""
def processor(action, argument):
ActionRoles.query_by_action(action, argument=argument).filter(
ActionRoles.role_id == role.id
).delete(synchronize_session=False)
return processor | python | def remove_role(role):
"""Remove a action for a role."""
def processor(action, argument):
ActionRoles.query_by_action(action, argument=argument).filter(
ActionRoles.role_id == role.id
).delete(synchronize_session=False)
return processor | [
"def",
"remove_role",
"(",
"role",
")",
":",
"def",
"processor",
"(",
"action",
",",
"argument",
")",
":",
"ActionRoles",
".",
"query_by_action",
"(",
"action",
",",
"argument",
"=",
"argument",
")",
".",
"filter",
"(",
"ActionRoles",
".",
"role_id",
"==",
"role",
".",
"id",
")",
".",
"delete",
"(",
"synchronize_session",
"=",
"False",
")",
"return",
"processor"
] | Remove a action for a role. | [
"Remove",
"a",
"action",
"for",
"a",
"role",
"."
] | 3b033a4bdc110eb2f7e9f08f0744a780884bfc80 | https://github.com/inveniosoftware/invenio-access/blob/3b033a4bdc110eb2f7e9f08f0744a780884bfc80/invenio_access/cli.py#L215-L221 | train |
inveniosoftware/invenio-access | invenio_access/cli.py | process_remove_action | def process_remove_action(processors, action, argument):
"""Process action removals."""
for processor in processors:
processor(action, argument)
db.session.commit() | python | def process_remove_action(processors, action, argument):
"""Process action removals."""
for processor in processors:
processor(action, argument)
db.session.commit() | [
"def",
"process_remove_action",
"(",
"processors",
",",
"action",
",",
"argument",
")",
":",
"for",
"processor",
"in",
"processors",
":",
"processor",
"(",
"action",
",",
"argument",
")",
"db",
".",
"session",
".",
"commit",
"(",
")"
] | Process action removals. | [
"Process",
"action",
"removals",
"."
] | 3b033a4bdc110eb2f7e9f08f0744a780884bfc80 | https://github.com/inveniosoftware/invenio-access/blob/3b033a4bdc110eb2f7e9f08f0744a780884bfc80/invenio_access/cli.py#L226-L230 | train |
inveniosoftware/invenio-access | invenio_access/cli.py | list_actions | def list_actions():
"""List all registered actions."""
for name, action in _current_actions.items():
click.echo('{0}:{1}'.format(
name, '*' if hasattr(action, 'argument') else ''
)) | python | def list_actions():
"""List all registered actions."""
for name, action in _current_actions.items():
click.echo('{0}:{1}'.format(
name, '*' if hasattr(action, 'argument') else ''
)) | [
"def",
"list_actions",
"(",
")",
":",
"for",
"name",
",",
"action",
"in",
"_current_actions",
".",
"items",
"(",
")",
":",
"click",
".",
"echo",
"(",
"'{0}:{1}'",
".",
"format",
"(",
"name",
",",
"'*'",
"if",
"hasattr",
"(",
"action",
",",
"'argument'",
")",
"else",
"''",
")",
")"
] | List all registered actions. | [
"List",
"all",
"registered",
"actions",
"."
] | 3b033a4bdc110eb2f7e9f08f0744a780884bfc80 | https://github.com/inveniosoftware/invenio-access/blob/3b033a4bdc110eb2f7e9f08f0744a780884bfc80/invenio_access/cli.py#L235-L240 | train |
inveniosoftware/invenio-access | invenio_access/cli.py | show_actions | def show_actions(email, role):
"""Show all assigned actions."""
if email:
actions = ActionUsers.query.join(ActionUsers.user).filter(
User.email.in_(email)
).all()
for action in actions:
click.secho('user:{0}:{1}:{2}:{3}'.format(
action.user.email,
action.action,
'' if action.argument is None else action.argument,
'deny' if action.exclude else 'allow',
), fg='red' if action.exclude else 'green')
if role:
actions = ActionRoles.query.filter(
Role.name.in_(role)
).join(ActionRoles.role).all()
for action in actions:
click.secho('role:{0}:{1}:{2}:{3}'.format(
action.role.name,
action.action,
'' if action.argument is None else action.argument,
'deny' if action.exclude else 'allow',
), fg='red' if action.exclude else 'green') | python | def show_actions(email, role):
"""Show all assigned actions."""
if email:
actions = ActionUsers.query.join(ActionUsers.user).filter(
User.email.in_(email)
).all()
for action in actions:
click.secho('user:{0}:{1}:{2}:{3}'.format(
action.user.email,
action.action,
'' if action.argument is None else action.argument,
'deny' if action.exclude else 'allow',
), fg='red' if action.exclude else 'green')
if role:
actions = ActionRoles.query.filter(
Role.name.in_(role)
).join(ActionRoles.role).all()
for action in actions:
click.secho('role:{0}:{1}:{2}:{3}'.format(
action.role.name,
action.action,
'' if action.argument is None else action.argument,
'deny' if action.exclude else 'allow',
), fg='red' if action.exclude else 'green') | [
"def",
"show_actions",
"(",
"email",
",",
"role",
")",
":",
"if",
"email",
":",
"actions",
"=",
"ActionUsers",
".",
"query",
".",
"join",
"(",
"ActionUsers",
".",
"user",
")",
".",
"filter",
"(",
"User",
".",
"email",
".",
"in_",
"(",
"email",
")",
")",
".",
"all",
"(",
")",
"for",
"action",
"in",
"actions",
":",
"click",
".",
"secho",
"(",
"'user:{0}:{1}:{2}:{3}'",
".",
"format",
"(",
"action",
".",
"user",
".",
"email",
",",
"action",
".",
"action",
",",
"''",
"if",
"action",
".",
"argument",
"is",
"None",
"else",
"action",
".",
"argument",
",",
"'deny'",
"if",
"action",
".",
"exclude",
"else",
"'allow'",
",",
")",
",",
"fg",
"=",
"'red'",
"if",
"action",
".",
"exclude",
"else",
"'green'",
")",
"if",
"role",
":",
"actions",
"=",
"ActionRoles",
".",
"query",
".",
"filter",
"(",
"Role",
".",
"name",
".",
"in_",
"(",
"role",
")",
")",
".",
"join",
"(",
"ActionRoles",
".",
"role",
")",
".",
"all",
"(",
")",
"for",
"action",
"in",
"actions",
":",
"click",
".",
"secho",
"(",
"'role:{0}:{1}:{2}:{3}'",
".",
"format",
"(",
"action",
".",
"role",
".",
"name",
",",
"action",
".",
"action",
",",
"''",
"if",
"action",
".",
"argument",
"is",
"None",
"else",
"action",
".",
"argument",
",",
"'deny'",
"if",
"action",
".",
"exclude",
"else",
"'allow'",
",",
")",
",",
"fg",
"=",
"'red'",
"if",
"action",
".",
"exclude",
"else",
"'green'",
")"
] | Show all assigned actions. | [
"Show",
"all",
"assigned",
"actions",
"."
] | 3b033a4bdc110eb2f7e9f08f0744a780884bfc80 | https://github.com/inveniosoftware/invenio-access/blob/3b033a4bdc110eb2f7e9f08f0744a780884bfc80/invenio_access/cli.py#L247-L271 | train |
BD2KGenomics/protect | src/protect/addons/assess_mhc_pathway.py | run_mhc_gene_assessment | def run_mhc_gene_assessment(job, rsem_files, rna_haplotype, univ_options, reports_options):
"""
A wrapper for assess_mhc_genes.
:param dict rsem_files: Results from running rsem
:param str rna_haplotype: The job store id for the rna haplotype file
:param dict univ_options: Dict of universal options used by almost all tools
:param dict reports_options: Options specific to reporting modules
:return: The results of running assess_mhc_genes
:rtype: toil.fileStore.FileID
"""
return job.addChildJobFn(assess_mhc_genes, rsem_files['rsem.genes.results'], rna_haplotype,
univ_options, reports_options).rv() | python | def run_mhc_gene_assessment(job, rsem_files, rna_haplotype, univ_options, reports_options):
"""
A wrapper for assess_mhc_genes.
:param dict rsem_files: Results from running rsem
:param str rna_haplotype: The job store id for the rna haplotype file
:param dict univ_options: Dict of universal options used by almost all tools
:param dict reports_options: Options specific to reporting modules
:return: The results of running assess_mhc_genes
:rtype: toil.fileStore.FileID
"""
return job.addChildJobFn(assess_mhc_genes, rsem_files['rsem.genes.results'], rna_haplotype,
univ_options, reports_options).rv() | [
"def",
"run_mhc_gene_assessment",
"(",
"job",
",",
"rsem_files",
",",
"rna_haplotype",
",",
"univ_options",
",",
"reports_options",
")",
":",
"return",
"job",
".",
"addChildJobFn",
"(",
"assess_mhc_genes",
",",
"rsem_files",
"[",
"'rsem.genes.results'",
"]",
",",
"rna_haplotype",
",",
"univ_options",
",",
"reports_options",
")",
".",
"rv",
"(",
")"
] | A wrapper for assess_mhc_genes.
:param dict rsem_files: Results from running rsem
:param str rna_haplotype: The job store id for the rna haplotype file
:param dict univ_options: Dict of universal options used by almost all tools
:param dict reports_options: Options specific to reporting modules
:return: The results of running assess_mhc_genes
:rtype: toil.fileStore.FileID | [
"A",
"wrapper",
"for",
"assess_mhc_genes",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/addons/assess_mhc_pathway.py#L27-L39 | train |
BD2KGenomics/protect | attic/ProTECT.py | parse_config_file | def parse_config_file(job, config_file):
"""
This module will parse the config file withing params and set up the variables that will be
passed to the various tools in the pipeline.
ARGUMENTS
config_file: string containing path to a config file. An example config
file is available at
https://s3-us-west-2.amazonaws.com/pimmuno-references
/input_parameters.list
RETURN VALUES
None
"""
job.fileStore.logToMaster('Parsing config file')
config_file = os.path.abspath(config_file)
if not os.path.exists(config_file):
raise ParameterError('The config file was not found at specified location. Please verify ' +
'and retry.')
# Initialize variables to hold the sample sets, the universal options, and the per-tool options
sample_set = defaultdict()
univ_options = defaultdict()
tool_options = defaultdict()
# Read through the notes and the
with open(config_file, 'r') as conf:
for line in conf:
line = line.strip()
if line.startswith('##') or len(line) == 0:
continue
if line.startswith('BEGIN'):
break
# The generator function tool_specific_param_generator will yield one group name at a time
# along with it's parameters.
for groupname, group_params in tool_specific_param_generator(job, conf):
if groupname == 'patient':
if 'patient_id' not in group_params.keys():
raise ParameterError('A patient group is missing the patient_id flag.')
sample_set[group_params['patient_id']] = group_params
elif groupname == 'Universal_Options':
univ_options = group_params
required_options = {'java_Xmx', 'output_folder', 'storage_location'}
missing_opts = required_options.difference(set(univ_options.keys()))
if len(missing_opts) > 0:
raise ParameterError(' The following options have no arguments in the config '
'file :\n' + '\n'.join(missing_opts))
if univ_options['sse_key_is_master']:
assert univ_options['sse_key_is_master'] in ('True', 'true', 'False', 'false')
univ_options['sse_key_is_master'] = \
univ_options['sse_key_is_master'] in ('True', 'true')
# If it isn't any of the above, it's a tool group
else:
tool_options[groupname] = group_params
# Ensure that all tools have been provided options.
required_tools = {'cutadapt', 'bwa', 'star', 'phlat', 'transgene', 'mut_callers', 'rsem',
'mhci', 'mhcii', 'snpeff', 'rank_boost'}
# 'fusion', 'indels'}
missing_tools = required_tools.difference(set(tool_options.keys()))
if len(missing_tools) > 0:
raise ParameterError(' The following tools have no arguments in the config file : \n' +
'\n'.join(missing_tools))
# Start a job for each sample in the sample set
for patient_id in sample_set.keys():
job.addFollowOnJobFn(pipeline_launchpad, sample_set[patient_id], univ_options, tool_options)
return None | python | def parse_config_file(job, config_file):
"""
This module will parse the config file withing params and set up the variables that will be
passed to the various tools in the pipeline.
ARGUMENTS
config_file: string containing path to a config file. An example config
file is available at
https://s3-us-west-2.amazonaws.com/pimmuno-references
/input_parameters.list
RETURN VALUES
None
"""
job.fileStore.logToMaster('Parsing config file')
config_file = os.path.abspath(config_file)
if not os.path.exists(config_file):
raise ParameterError('The config file was not found at specified location. Please verify ' +
'and retry.')
# Initialize variables to hold the sample sets, the universal options, and the per-tool options
sample_set = defaultdict()
univ_options = defaultdict()
tool_options = defaultdict()
# Read through the notes and the
with open(config_file, 'r') as conf:
for line in conf:
line = line.strip()
if line.startswith('##') or len(line) == 0:
continue
if line.startswith('BEGIN'):
break
# The generator function tool_specific_param_generator will yield one group name at a time
# along with it's parameters.
for groupname, group_params in tool_specific_param_generator(job, conf):
if groupname == 'patient':
if 'patient_id' not in group_params.keys():
raise ParameterError('A patient group is missing the patient_id flag.')
sample_set[group_params['patient_id']] = group_params
elif groupname == 'Universal_Options':
univ_options = group_params
required_options = {'java_Xmx', 'output_folder', 'storage_location'}
missing_opts = required_options.difference(set(univ_options.keys()))
if len(missing_opts) > 0:
raise ParameterError(' The following options have no arguments in the config '
'file :\n' + '\n'.join(missing_opts))
if univ_options['sse_key_is_master']:
assert univ_options['sse_key_is_master'] in ('True', 'true', 'False', 'false')
univ_options['sse_key_is_master'] = \
univ_options['sse_key_is_master'] in ('True', 'true')
# If it isn't any of the above, it's a tool group
else:
tool_options[groupname] = group_params
# Ensure that all tools have been provided options.
required_tools = {'cutadapt', 'bwa', 'star', 'phlat', 'transgene', 'mut_callers', 'rsem',
'mhci', 'mhcii', 'snpeff', 'rank_boost'}
# 'fusion', 'indels'}
missing_tools = required_tools.difference(set(tool_options.keys()))
if len(missing_tools) > 0:
raise ParameterError(' The following tools have no arguments in the config file : \n' +
'\n'.join(missing_tools))
# Start a job for each sample in the sample set
for patient_id in sample_set.keys():
job.addFollowOnJobFn(pipeline_launchpad, sample_set[patient_id], univ_options, tool_options)
return None | [
"def",
"parse_config_file",
"(",
"job",
",",
"config_file",
")",
":",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Parsing config file'",
")",
"config_file",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"config_file",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"config_file",
")",
":",
"raise",
"ParameterError",
"(",
"'The config file was not found at specified location. Please verify '",
"+",
"'and retry.'",
")",
"sample_set",
"=",
"defaultdict",
"(",
")",
"univ_options",
"=",
"defaultdict",
"(",
")",
"tool_options",
"=",
"defaultdict",
"(",
")",
"with",
"open",
"(",
"config_file",
",",
"'r'",
")",
"as",
"conf",
":",
"for",
"line",
"in",
"conf",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"line",
".",
"startswith",
"(",
"'##'",
")",
"or",
"len",
"(",
"line",
")",
"==",
"0",
":",
"continue",
"if",
"line",
".",
"startswith",
"(",
"'BEGIN'",
")",
":",
"break",
"for",
"groupname",
",",
"group_params",
"in",
"tool_specific_param_generator",
"(",
"job",
",",
"conf",
")",
":",
"if",
"groupname",
"==",
"'patient'",
":",
"if",
"'patient_id'",
"not",
"in",
"group_params",
".",
"keys",
"(",
")",
":",
"raise",
"ParameterError",
"(",
"'A patient group is missing the patient_id flag.'",
")",
"sample_set",
"[",
"group_params",
"[",
"'patient_id'",
"]",
"]",
"=",
"group_params",
"elif",
"groupname",
"==",
"'Universal_Options'",
":",
"univ_options",
"=",
"group_params",
"required_options",
"=",
"{",
"'java_Xmx'",
",",
"'output_folder'",
",",
"'storage_location'",
"}",
"missing_opts",
"=",
"required_options",
".",
"difference",
"(",
"set",
"(",
"univ_options",
".",
"keys",
"(",
")",
")",
")",
"if",
"len",
"(",
"missing_opts",
")",
">",
"0",
":",
"raise",
"ParameterError",
"(",
"' The following options have no arguments in the config '",
"'file :\\n'",
"+",
"'\\n'",
".",
"join",
"(",
"missing_opts",
")",
")",
"if",
"univ_options",
"[",
"'sse_key_is_master'",
"]",
":",
"assert",
"univ_options",
"[",
"'sse_key_is_master'",
"]",
"in",
"(",
"'True'",
",",
"'true'",
",",
"'False'",
",",
"'false'",
")",
"univ_options",
"[",
"'sse_key_is_master'",
"]",
"=",
"univ_options",
"[",
"'sse_key_is_master'",
"]",
"in",
"(",
"'True'",
",",
"'true'",
")",
"else",
":",
"tool_options",
"[",
"groupname",
"]",
"=",
"group_params",
"required_tools",
"=",
"{",
"'cutadapt'",
",",
"'bwa'",
",",
"'star'",
",",
"'phlat'",
",",
"'transgene'",
",",
"'mut_callers'",
",",
"'rsem'",
",",
"'mhci'",
",",
"'mhcii'",
",",
"'snpeff'",
",",
"'rank_boost'",
"}",
"missing_tools",
"=",
"required_tools",
".",
"difference",
"(",
"set",
"(",
"tool_options",
".",
"keys",
"(",
")",
")",
")",
"if",
"len",
"(",
"missing_tools",
")",
">",
"0",
":",
"raise",
"ParameterError",
"(",
"' The following tools have no arguments in the config file : \\n'",
"+",
"'\\n'",
".",
"join",
"(",
"missing_tools",
")",
")",
"for",
"patient_id",
"in",
"sample_set",
".",
"keys",
"(",
")",
":",
"job",
".",
"addFollowOnJobFn",
"(",
"pipeline_launchpad",
",",
"sample_set",
"[",
"patient_id",
"]",
",",
"univ_options",
",",
"tool_options",
")",
"return",
"None"
] | This module will parse the config file withing params and set up the variables that will be
passed to the various tools in the pipeline.
ARGUMENTS
config_file: string containing path to a config file. An example config
file is available at
https://s3-us-west-2.amazonaws.com/pimmuno-references
/input_parameters.list
RETURN VALUES
None | [
"This",
"module",
"will",
"parse",
"the",
"config",
"file",
"withing",
"params",
"and",
"set",
"up",
"the",
"variables",
"that",
"will",
"be",
"passed",
"to",
"the",
"various",
"tools",
"in",
"the",
"pipeline",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/attic/ProTECT.py#L46-L109 | train |
BD2KGenomics/protect | attic/ProTECT.py | run_cutadapt | def run_cutadapt(job, fastqs, univ_options, cutadapt_options):
"""
This module runs cutadapt on the input RNA fastq files and then calls the RNA aligners.
ARGUMENTS
1. fastqs: Dict of list of input RNA-Seq fastqs
fastqs
+- 'tumor_rna': [<JSid for 1.fastq> , <JSid for 2.fastq>]
2. univ_options: Dict of universal arguments used by almost all tools
univ_options
+- 'dockerhub': <dockerhub to use>
3. cutadapt_options: Dict of parameters specific to cutadapt
cutadapt_options
|- 'a': <sequence of 3' adapter to trim from fwd read>
+- 'A': <sequence of 3' adapter to trim from rev read>
RETURN VALUES
1. output_files: Dict of cutadapted fastqs
output_files
|- 'rna_cutadapt_1.fastq': <JSid>
+- 'rna_cutadapt_2.fastq': <JSid>
This module corresponds to node 2 on the tree
"""
job.fileStore.logToMaster('Running cutadapt on %s' %univ_options['patient'])
work_dir = job.fileStore.getLocalTempDir()
fq_extn = '.gz' if fastqs['gzipped'] else ''
input_files = {
'rna_1.fastq' + fq_extn: fastqs['tumor_rna'][0],
'rna_2.fastq' + fq_extn: fastqs['tumor_rna'][1]}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=True)
parameters = ['-a', cutadapt_options['a'], # Fwd read 3' adapter
'-A', cutadapt_options['A'], # Rev read 3' adapter
'-m', '35', # Minimum size of read
'-o', docker_path('rna_cutadapt_1.fastq'), # Output for R1
'-p', docker_path('rna_cutadapt_2.fastq'), # Output for R2
input_files['rna_1.fastq'],
input_files['rna_2.fastq']]
docker_call(tool='cutadapt', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'])
output_files = defaultdict()
for fastq_file in ['rna_cutadapt_1.fastq', 'rna_cutadapt_2.fastq']:
output_files[fastq_file] = job.fileStore.writeGlobalFile('/'.join([work_dir, fastq_file]))
return output_files | python | def run_cutadapt(job, fastqs, univ_options, cutadapt_options):
"""
This module runs cutadapt on the input RNA fastq files and then calls the RNA aligners.
ARGUMENTS
1. fastqs: Dict of list of input RNA-Seq fastqs
fastqs
+- 'tumor_rna': [<JSid for 1.fastq> , <JSid for 2.fastq>]
2. univ_options: Dict of universal arguments used by almost all tools
univ_options
+- 'dockerhub': <dockerhub to use>
3. cutadapt_options: Dict of parameters specific to cutadapt
cutadapt_options
|- 'a': <sequence of 3' adapter to trim from fwd read>
+- 'A': <sequence of 3' adapter to trim from rev read>
RETURN VALUES
1. output_files: Dict of cutadapted fastqs
output_files
|- 'rna_cutadapt_1.fastq': <JSid>
+- 'rna_cutadapt_2.fastq': <JSid>
This module corresponds to node 2 on the tree
"""
job.fileStore.logToMaster('Running cutadapt on %s' %univ_options['patient'])
work_dir = job.fileStore.getLocalTempDir()
fq_extn = '.gz' if fastqs['gzipped'] else ''
input_files = {
'rna_1.fastq' + fq_extn: fastqs['tumor_rna'][0],
'rna_2.fastq' + fq_extn: fastqs['tumor_rna'][1]}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=True)
parameters = ['-a', cutadapt_options['a'], # Fwd read 3' adapter
'-A', cutadapt_options['A'], # Rev read 3' adapter
'-m', '35', # Minimum size of read
'-o', docker_path('rna_cutadapt_1.fastq'), # Output for R1
'-p', docker_path('rna_cutadapt_2.fastq'), # Output for R2
input_files['rna_1.fastq'],
input_files['rna_2.fastq']]
docker_call(tool='cutadapt', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'])
output_files = defaultdict()
for fastq_file in ['rna_cutadapt_1.fastq', 'rna_cutadapt_2.fastq']:
output_files[fastq_file] = job.fileStore.writeGlobalFile('/'.join([work_dir, fastq_file]))
return output_files | [
"def",
"run_cutadapt",
"(",
"job",
",",
"fastqs",
",",
"univ_options",
",",
"cutadapt_options",
")",
":",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Running cutadapt on %s'",
"%",
"univ_options",
"[",
"'patient'",
"]",
")",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"fq_extn",
"=",
"'.gz'",
"if",
"fastqs",
"[",
"'gzipped'",
"]",
"else",
"''",
"input_files",
"=",
"{",
"'rna_1.fastq'",
"+",
"fq_extn",
":",
"fastqs",
"[",
"'tumor_rna'",
"]",
"[",
"0",
"]",
",",
"'rna_2.fastq'",
"+",
"fq_extn",
":",
"fastqs",
"[",
"'tumor_rna'",
"]",
"[",
"1",
"]",
"}",
"input_files",
"=",
"get_files_from_filestore",
"(",
"job",
",",
"input_files",
",",
"work_dir",
",",
"docker",
"=",
"True",
")",
"parameters",
"=",
"[",
"'-a'",
",",
"cutadapt_options",
"[",
"'a'",
"]",
",",
"'-A'",
",",
"cutadapt_options",
"[",
"'A'",
"]",
",",
"'-m'",
",",
"'35'",
",",
"'-o'",
",",
"docker_path",
"(",
"'rna_cutadapt_1.fastq'",
")",
",",
"'-p'",
",",
"docker_path",
"(",
"'rna_cutadapt_2.fastq'",
")",
",",
"input_files",
"[",
"'rna_1.fastq'",
"]",
",",
"input_files",
"[",
"'rna_2.fastq'",
"]",
"]",
"docker_call",
"(",
"tool",
"=",
"'cutadapt'",
",",
"tool_parameters",
"=",
"parameters",
",",
"work_dir",
"=",
"work_dir",
",",
"dockerhub",
"=",
"univ_options",
"[",
"'dockerhub'",
"]",
")",
"output_files",
"=",
"defaultdict",
"(",
")",
"for",
"fastq_file",
"in",
"[",
"'rna_cutadapt_1.fastq'",
",",
"'rna_cutadapt_2.fastq'",
"]",
":",
"output_files",
"[",
"fastq_file",
"]",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"'/'",
".",
"join",
"(",
"[",
"work_dir",
",",
"fastq_file",
"]",
")",
")",
"return",
"output_files"
] | This module runs cutadapt on the input RNA fastq files and then calls the RNA aligners.
ARGUMENTS
1. fastqs: Dict of list of input RNA-Seq fastqs
fastqs
+- 'tumor_rna': [<JSid for 1.fastq> , <JSid for 2.fastq>]
2. univ_options: Dict of universal arguments used by almost all tools
univ_options
+- 'dockerhub': <dockerhub to use>
3. cutadapt_options: Dict of parameters specific to cutadapt
cutadapt_options
|- 'a': <sequence of 3' adapter to trim from fwd read>
+- 'A': <sequence of 3' adapter to trim from rev read>
RETURN VALUES
1. output_files: Dict of cutadapted fastqs
output_files
|- 'rna_cutadapt_1.fastq': <JSid>
+- 'rna_cutadapt_2.fastq': <JSid>
This module corresponds to node 2 on the tree | [
"This",
"module",
"runs",
"cutadapt",
"on",
"the",
"input",
"RNA",
"fastq",
"files",
"and",
"then",
"calls",
"the",
"RNA",
"aligners",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/attic/ProTECT.py#L256-L298 | train |
BD2KGenomics/protect | attic/ProTECT.py | run_star | def run_star(job, fastqs, univ_options, star_options):
"""
This module uses STAR to align the RNA fastqs to the reference
ARGUMENTS
1. fastqs: REFER RETURN VALUE of run_cutadapt()
2. univ_options: Dict of universal arguments used by almost all tools
univ_options
+- 'dockerhub': <dockerhub to use>
3. star_options: Dict of parameters specific to STAR
star_options
|- 'index_tar': <JSid for the STAR index tarball>
+- 'n': <number of threads to allocate>
RETURN VALUES
1. output_files: Dict of aligned bams
output_files
|- 'rnaAligned.toTranscriptome.out.bam': <JSid>
+- 'rnaAligned.sortedByCoord.out.bam': Dict of genome bam + bai
|- 'rna_fix_pg_sorted.bam': <JSid>
+- 'rna_fix_pg_sorted.bam.bai': <JSid>
This module corresponds to node 9 on the tree
"""
assert star_options['type'] in ('star', 'starlong')
job.fileStore.logToMaster('Running STAR on %s' %univ_options['patient'])
work_dir = job.fileStore.getLocalTempDir()
input_files = {
'rna_cutadapt_1.fastq': fastqs['rna_cutadapt_1.fastq'],
'rna_cutadapt_2.fastq': fastqs['rna_cutadapt_2.fastq'],
'star_index.tar.gz': star_options['index_tar']}
input_files = get_files_from_filestore(job, input_files, work_dir,
docker=True)
parameters = ['--runThreadN', str(star_options['n']),
'--genomeDir', input_files['star_index'],
'--outFileNamePrefix', 'rna',
'--readFilesIn',
input_files['rna_cutadapt_1.fastq'],
input_files['rna_cutadapt_2.fastq'],
'--outSAMattributes', 'NH', 'HI', 'AS', 'NM', 'MD',
'--outSAMtype', 'BAM', 'SortedByCoordinate',
'--quantMode', 'TranscriptomeSAM',
'--outSAMunmapped', 'Within']
if star_options['type'] == 'star':
docker_call(tool='star', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'])
else:
docker_call(tool='starlong', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'])
output_files = defaultdict()
for bam_file in ['rnaAligned.toTranscriptome.out.bam',
'rnaAligned.sortedByCoord.out.bam']:
output_files[bam_file] = job.fileStore.writeGlobalFile('/'.join([
work_dir, bam_file]))
job.fileStore.deleteGlobalFile(fastqs['rna_cutadapt_1.fastq'])
job.fileStore.deleteGlobalFile(fastqs['rna_cutadapt_2.fastq'])
index_star = job.wrapJobFn(index_bamfile,
output_files['rnaAligned.sortedByCoord.out.bam'],
'rna', univ_options, disk='120G')
job.addChild(index_star)
output_files['rnaAligned.sortedByCoord.out.bam'] = index_star.rv()
return output_files | python | def run_star(job, fastqs, univ_options, star_options):
"""
This module uses STAR to align the RNA fastqs to the reference
ARGUMENTS
1. fastqs: REFER RETURN VALUE of run_cutadapt()
2. univ_options: Dict of universal arguments used by almost all tools
univ_options
+- 'dockerhub': <dockerhub to use>
3. star_options: Dict of parameters specific to STAR
star_options
|- 'index_tar': <JSid for the STAR index tarball>
+- 'n': <number of threads to allocate>
RETURN VALUES
1. output_files: Dict of aligned bams
output_files
|- 'rnaAligned.toTranscriptome.out.bam': <JSid>
+- 'rnaAligned.sortedByCoord.out.bam': Dict of genome bam + bai
|- 'rna_fix_pg_sorted.bam': <JSid>
+- 'rna_fix_pg_sorted.bam.bai': <JSid>
This module corresponds to node 9 on the tree
"""
assert star_options['type'] in ('star', 'starlong')
job.fileStore.logToMaster('Running STAR on %s' %univ_options['patient'])
work_dir = job.fileStore.getLocalTempDir()
input_files = {
'rna_cutadapt_1.fastq': fastqs['rna_cutadapt_1.fastq'],
'rna_cutadapt_2.fastq': fastqs['rna_cutadapt_2.fastq'],
'star_index.tar.gz': star_options['index_tar']}
input_files = get_files_from_filestore(job, input_files, work_dir,
docker=True)
parameters = ['--runThreadN', str(star_options['n']),
'--genomeDir', input_files['star_index'],
'--outFileNamePrefix', 'rna',
'--readFilesIn',
input_files['rna_cutadapt_1.fastq'],
input_files['rna_cutadapt_2.fastq'],
'--outSAMattributes', 'NH', 'HI', 'AS', 'NM', 'MD',
'--outSAMtype', 'BAM', 'SortedByCoordinate',
'--quantMode', 'TranscriptomeSAM',
'--outSAMunmapped', 'Within']
if star_options['type'] == 'star':
docker_call(tool='star', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'])
else:
docker_call(tool='starlong', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'])
output_files = defaultdict()
for bam_file in ['rnaAligned.toTranscriptome.out.bam',
'rnaAligned.sortedByCoord.out.bam']:
output_files[bam_file] = job.fileStore.writeGlobalFile('/'.join([
work_dir, bam_file]))
job.fileStore.deleteGlobalFile(fastqs['rna_cutadapt_1.fastq'])
job.fileStore.deleteGlobalFile(fastqs['rna_cutadapt_2.fastq'])
index_star = job.wrapJobFn(index_bamfile,
output_files['rnaAligned.sortedByCoord.out.bam'],
'rna', univ_options, disk='120G')
job.addChild(index_star)
output_files['rnaAligned.sortedByCoord.out.bam'] = index_star.rv()
return output_files | [
"def",
"run_star",
"(",
"job",
",",
"fastqs",
",",
"univ_options",
",",
"star_options",
")",
":",
"assert",
"star_options",
"[",
"'type'",
"]",
"in",
"(",
"'star'",
",",
"'starlong'",
")",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Running STAR on %s'",
"%",
"univ_options",
"[",
"'patient'",
"]",
")",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"input_files",
"=",
"{",
"'rna_cutadapt_1.fastq'",
":",
"fastqs",
"[",
"'rna_cutadapt_1.fastq'",
"]",
",",
"'rna_cutadapt_2.fastq'",
":",
"fastqs",
"[",
"'rna_cutadapt_2.fastq'",
"]",
",",
"'star_index.tar.gz'",
":",
"star_options",
"[",
"'index_tar'",
"]",
"}",
"input_files",
"=",
"get_files_from_filestore",
"(",
"job",
",",
"input_files",
",",
"work_dir",
",",
"docker",
"=",
"True",
")",
"parameters",
"=",
"[",
"'--runThreadN'",
",",
"str",
"(",
"star_options",
"[",
"'n'",
"]",
")",
",",
"'--genomeDir'",
",",
"input_files",
"[",
"'star_index'",
"]",
",",
"'--outFileNamePrefix'",
",",
"'rna'",
",",
"'--readFilesIn'",
",",
"input_files",
"[",
"'rna_cutadapt_1.fastq'",
"]",
",",
"input_files",
"[",
"'rna_cutadapt_2.fastq'",
"]",
",",
"'--outSAMattributes'",
",",
"'NH'",
",",
"'HI'",
",",
"'AS'",
",",
"'NM'",
",",
"'MD'",
",",
"'--outSAMtype'",
",",
"'BAM'",
",",
"'SortedByCoordinate'",
",",
"'--quantMode'",
",",
"'TranscriptomeSAM'",
",",
"'--outSAMunmapped'",
",",
"'Within'",
"]",
"if",
"star_options",
"[",
"'type'",
"]",
"==",
"'star'",
":",
"docker_call",
"(",
"tool",
"=",
"'star'",
",",
"tool_parameters",
"=",
"parameters",
",",
"work_dir",
"=",
"work_dir",
",",
"dockerhub",
"=",
"univ_options",
"[",
"'dockerhub'",
"]",
")",
"else",
":",
"docker_call",
"(",
"tool",
"=",
"'starlong'",
",",
"tool_parameters",
"=",
"parameters",
",",
"work_dir",
"=",
"work_dir",
",",
"dockerhub",
"=",
"univ_options",
"[",
"'dockerhub'",
"]",
")",
"output_files",
"=",
"defaultdict",
"(",
")",
"for",
"bam_file",
"in",
"[",
"'rnaAligned.toTranscriptome.out.bam'",
",",
"'rnaAligned.sortedByCoord.out.bam'",
"]",
":",
"output_files",
"[",
"bam_file",
"]",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"'/'",
".",
"join",
"(",
"[",
"work_dir",
",",
"bam_file",
"]",
")",
")",
"job",
".",
"fileStore",
".",
"deleteGlobalFile",
"(",
"fastqs",
"[",
"'rna_cutadapt_1.fastq'",
"]",
")",
"job",
".",
"fileStore",
".",
"deleteGlobalFile",
"(",
"fastqs",
"[",
"'rna_cutadapt_2.fastq'",
"]",
")",
"index_star",
"=",
"job",
".",
"wrapJobFn",
"(",
"index_bamfile",
",",
"output_files",
"[",
"'rnaAligned.sortedByCoord.out.bam'",
"]",
",",
"'rna'",
",",
"univ_options",
",",
"disk",
"=",
"'120G'",
")",
"job",
".",
"addChild",
"(",
"index_star",
")",
"output_files",
"[",
"'rnaAligned.sortedByCoord.out.bam'",
"]",
"=",
"index_star",
".",
"rv",
"(",
")",
"return",
"output_files"
] | This module uses STAR to align the RNA fastqs to the reference
ARGUMENTS
1. fastqs: REFER RETURN VALUE of run_cutadapt()
2. univ_options: Dict of universal arguments used by almost all tools
univ_options
+- 'dockerhub': <dockerhub to use>
3. star_options: Dict of parameters specific to STAR
star_options
|- 'index_tar': <JSid for the STAR index tarball>
+- 'n': <number of threads to allocate>
RETURN VALUES
1. output_files: Dict of aligned bams
output_files
|- 'rnaAligned.toTranscriptome.out.bam': <JSid>
+- 'rnaAligned.sortedByCoord.out.bam': Dict of genome bam + bai
|- 'rna_fix_pg_sorted.bam': <JSid>
+- 'rna_fix_pg_sorted.bam.bai': <JSid>
This module corresponds to node 9 on the tree | [
"This",
"module",
"uses",
"STAR",
"to",
"align",
"the",
"RNA",
"fastqs",
"to",
"the",
"reference"
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/attic/ProTECT.py#L301-L361 | train |
BD2KGenomics/protect | attic/ProTECT.py | run_bwa | def run_bwa(job, fastqs, sample_type, univ_options, bwa_options):
"""
This module aligns the SAMPLE_TYPE dna fastqs to the reference
ARGUMENTS -- <ST> depicts the sample type. Substitute with 'tumor'/'normal'
1. fastqs: Dict of list of input WGS/WXS fastqs
fastqs
+- '<ST>_dna': [<JSid for 1.fastq> , <JSid for 2.fastq>]
2. sample_type: string of 'tumor_dna' or 'normal_dna'
3. univ_options: Dict of universal arguments used by almost all tools
univ_options
+- 'dockerhub': <dockerhub to use>
4. bwa_options: Dict of parameters specific to bwa
bwa_options
|- 'index_tar': <JSid for the bwa index tarball>
+- 'n': <number of threads to allocate>
RETURN VALUES
1. output_files: Dict of aligned bam + reference (nested return)
output_files
|- '<ST>_fix_pg_sorted.bam': <JSid>
+- '<ST>_fix_pg_sorted.bam.bai': <JSid>
This module corresponds to nodes 3 and 4 on the tree
"""
job.fileStore.logToMaster('Running bwa on %s:%s' % (univ_options['patient'], sample_type))
work_dir = job.fileStore.getLocalTempDir()
fq_extn = '.gz' if fastqs['gzipped'] else ''
input_files = {
'dna_1.fastq' + fq_extn: fastqs[sample_type][0],
'dna_2.fastq' + fq_extn: fastqs[sample_type][1],
'bwa_index.tar.gz': bwa_options['index_tar']}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=True)
parameters = ['mem',
'-t', str(bwa_options['n']),
'-v', '1', # Don't print INFO messages to the stderr
'/'.join([input_files['bwa_index'], 'hg19.fa']),
input_files['dna_1.fastq'],
input_files['dna_2.fastq']]
with open(''.join([work_dir, '/', sample_type, '_aligned.sam']), 'w') as samfile:
docker_call(tool='bwa', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], outfile=samfile)
# samfile.name retains the path info
output_file = job.fileStore.writeGlobalFile(samfile.name)
samfile_processing = job.wrapJobFn(bam_conversion, output_file, sample_type, univ_options,
disk='60G')
job.addChild(samfile_processing)
# Return values get passed up the chain to here. The return value will be a dict with
# SAMPLE_TYPE_fix_pg_sorted.bam: jobStoreID
# SAMPLE_TYPE_fix_pg_sorted.bam.bai: jobStoreID
return samfile_processing.rv() | python | def run_bwa(job, fastqs, sample_type, univ_options, bwa_options):
"""
This module aligns the SAMPLE_TYPE dna fastqs to the reference
ARGUMENTS -- <ST> depicts the sample type. Substitute with 'tumor'/'normal'
1. fastqs: Dict of list of input WGS/WXS fastqs
fastqs
+- '<ST>_dna': [<JSid for 1.fastq> , <JSid for 2.fastq>]
2. sample_type: string of 'tumor_dna' or 'normal_dna'
3. univ_options: Dict of universal arguments used by almost all tools
univ_options
+- 'dockerhub': <dockerhub to use>
4. bwa_options: Dict of parameters specific to bwa
bwa_options
|- 'index_tar': <JSid for the bwa index tarball>
+- 'n': <number of threads to allocate>
RETURN VALUES
1. output_files: Dict of aligned bam + reference (nested return)
output_files
|- '<ST>_fix_pg_sorted.bam': <JSid>
+- '<ST>_fix_pg_sorted.bam.bai': <JSid>
This module corresponds to nodes 3 and 4 on the tree
"""
job.fileStore.logToMaster('Running bwa on %s:%s' % (univ_options['patient'], sample_type))
work_dir = job.fileStore.getLocalTempDir()
fq_extn = '.gz' if fastqs['gzipped'] else ''
input_files = {
'dna_1.fastq' + fq_extn: fastqs[sample_type][0],
'dna_2.fastq' + fq_extn: fastqs[sample_type][1],
'bwa_index.tar.gz': bwa_options['index_tar']}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=True)
parameters = ['mem',
'-t', str(bwa_options['n']),
'-v', '1', # Don't print INFO messages to the stderr
'/'.join([input_files['bwa_index'], 'hg19.fa']),
input_files['dna_1.fastq'],
input_files['dna_2.fastq']]
with open(''.join([work_dir, '/', sample_type, '_aligned.sam']), 'w') as samfile:
docker_call(tool='bwa', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], outfile=samfile)
# samfile.name retains the path info
output_file = job.fileStore.writeGlobalFile(samfile.name)
samfile_processing = job.wrapJobFn(bam_conversion, output_file, sample_type, univ_options,
disk='60G')
job.addChild(samfile_processing)
# Return values get passed up the chain to here. The return value will be a dict with
# SAMPLE_TYPE_fix_pg_sorted.bam: jobStoreID
# SAMPLE_TYPE_fix_pg_sorted.bam.bai: jobStoreID
return samfile_processing.rv() | [
"def",
"run_bwa",
"(",
"job",
",",
"fastqs",
",",
"sample_type",
",",
"univ_options",
",",
"bwa_options",
")",
":",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Running bwa on %s:%s'",
"%",
"(",
"univ_options",
"[",
"'patient'",
"]",
",",
"sample_type",
")",
")",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"fq_extn",
"=",
"'.gz'",
"if",
"fastqs",
"[",
"'gzipped'",
"]",
"else",
"''",
"input_files",
"=",
"{",
"'dna_1.fastq'",
"+",
"fq_extn",
":",
"fastqs",
"[",
"sample_type",
"]",
"[",
"0",
"]",
",",
"'dna_2.fastq'",
"+",
"fq_extn",
":",
"fastqs",
"[",
"sample_type",
"]",
"[",
"1",
"]",
",",
"'bwa_index.tar.gz'",
":",
"bwa_options",
"[",
"'index_tar'",
"]",
"}",
"input_files",
"=",
"get_files_from_filestore",
"(",
"job",
",",
"input_files",
",",
"work_dir",
",",
"docker",
"=",
"True",
")",
"parameters",
"=",
"[",
"'mem'",
",",
"'-t'",
",",
"str",
"(",
"bwa_options",
"[",
"'n'",
"]",
")",
",",
"'-v'",
",",
"'1'",
",",
"'/'",
".",
"join",
"(",
"[",
"input_files",
"[",
"'bwa_index'",
"]",
",",
"'hg19.fa'",
"]",
")",
",",
"input_files",
"[",
"'dna_1.fastq'",
"]",
",",
"input_files",
"[",
"'dna_2.fastq'",
"]",
"]",
"with",
"open",
"(",
"''",
".",
"join",
"(",
"[",
"work_dir",
",",
"'/'",
",",
"sample_type",
",",
"'_aligned.sam'",
"]",
")",
",",
"'w'",
")",
"as",
"samfile",
":",
"docker_call",
"(",
"tool",
"=",
"'bwa'",
",",
"tool_parameters",
"=",
"parameters",
",",
"work_dir",
"=",
"work_dir",
",",
"dockerhub",
"=",
"univ_options",
"[",
"'dockerhub'",
"]",
",",
"outfile",
"=",
"samfile",
")",
"output_file",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"samfile",
".",
"name",
")",
"samfile_processing",
"=",
"job",
".",
"wrapJobFn",
"(",
"bam_conversion",
",",
"output_file",
",",
"sample_type",
",",
"univ_options",
",",
"disk",
"=",
"'60G'",
")",
"job",
".",
"addChild",
"(",
"samfile_processing",
")",
"return",
"samfile_processing",
".",
"rv",
"(",
")"
] | This module aligns the SAMPLE_TYPE dna fastqs to the reference
ARGUMENTS -- <ST> depicts the sample type. Substitute with 'tumor'/'normal'
1. fastqs: Dict of list of input WGS/WXS fastqs
fastqs
+- '<ST>_dna': [<JSid for 1.fastq> , <JSid for 2.fastq>]
2. sample_type: string of 'tumor_dna' or 'normal_dna'
3. univ_options: Dict of universal arguments used by almost all tools
univ_options
+- 'dockerhub': <dockerhub to use>
4. bwa_options: Dict of parameters specific to bwa
bwa_options
|- 'index_tar': <JSid for the bwa index tarball>
+- 'n': <number of threads to allocate>
RETURN VALUES
1. output_files: Dict of aligned bam + reference (nested return)
output_files
|- '<ST>_fix_pg_sorted.bam': <JSid>
+- '<ST>_fix_pg_sorted.bam.bai': <JSid>
This module corresponds to nodes 3 and 4 on the tree | [
"This",
"module",
"aligns",
"the",
"SAMPLE_TYPE",
"dna",
"fastqs",
"to",
"the",
"reference"
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/attic/ProTECT.py#L364-L414 | train |
BD2KGenomics/protect | attic/ProTECT.py | bam_conversion | def bam_conversion(job, samfile, sample_type, univ_options):
"""
This module converts SAMFILE from sam to bam
ARGUMENTS
1. samfile: <JSid for a sam file>
2. sample_type: string of 'tumor_dna' or 'normal_dna'
3. univ_options: Dict of universal arguments used by almost all tools
univ_options
+- 'dockerhub': <dockerhub to use>
RETURN VALUES
1. output_files: REFER output_files in run_bwa()
"""
job.fileStore.logToMaster('Running sam2bam on %s:%s' % (univ_options['patient'], sample_type))
work_dir = job.fileStore.getLocalTempDir()
input_files = {
'aligned.sam': samfile}
input_files = get_files_from_filestore(job, input_files, work_dir,
docker=True)
bamfile = '/'.join([work_dir, 'aligned.bam'])
parameters = ['view',
'-bS',
'-o', docker_path(bamfile),
input_files['aligned.sam']
]
docker_call(tool='samtools', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'])
output_file = job.fileStore.writeGlobalFile(bamfile)
job.fileStore.deleteGlobalFile(samfile)
reheader_bam = job.wrapJobFn(fix_bam_header, output_file, sample_type, univ_options, disk='60G')
job.addChild(reheader_bam)
return reheader_bam.rv() | python | def bam_conversion(job, samfile, sample_type, univ_options):
"""
This module converts SAMFILE from sam to bam
ARGUMENTS
1. samfile: <JSid for a sam file>
2. sample_type: string of 'tumor_dna' or 'normal_dna'
3. univ_options: Dict of universal arguments used by almost all tools
univ_options
+- 'dockerhub': <dockerhub to use>
RETURN VALUES
1. output_files: REFER output_files in run_bwa()
"""
job.fileStore.logToMaster('Running sam2bam on %s:%s' % (univ_options['patient'], sample_type))
work_dir = job.fileStore.getLocalTempDir()
input_files = {
'aligned.sam': samfile}
input_files = get_files_from_filestore(job, input_files, work_dir,
docker=True)
bamfile = '/'.join([work_dir, 'aligned.bam'])
parameters = ['view',
'-bS',
'-o', docker_path(bamfile),
input_files['aligned.sam']
]
docker_call(tool='samtools', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'])
output_file = job.fileStore.writeGlobalFile(bamfile)
job.fileStore.deleteGlobalFile(samfile)
reheader_bam = job.wrapJobFn(fix_bam_header, output_file, sample_type, univ_options, disk='60G')
job.addChild(reheader_bam)
return reheader_bam.rv() | [
"def",
"bam_conversion",
"(",
"job",
",",
"samfile",
",",
"sample_type",
",",
"univ_options",
")",
":",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Running sam2bam on %s:%s'",
"%",
"(",
"univ_options",
"[",
"'patient'",
"]",
",",
"sample_type",
")",
")",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"input_files",
"=",
"{",
"'aligned.sam'",
":",
"samfile",
"}",
"input_files",
"=",
"get_files_from_filestore",
"(",
"job",
",",
"input_files",
",",
"work_dir",
",",
"docker",
"=",
"True",
")",
"bamfile",
"=",
"'/'",
".",
"join",
"(",
"[",
"work_dir",
",",
"'aligned.bam'",
"]",
")",
"parameters",
"=",
"[",
"'view'",
",",
"'-bS'",
",",
"'-o'",
",",
"docker_path",
"(",
"bamfile",
")",
",",
"input_files",
"[",
"'aligned.sam'",
"]",
"]",
"docker_call",
"(",
"tool",
"=",
"'samtools'",
",",
"tool_parameters",
"=",
"parameters",
",",
"work_dir",
"=",
"work_dir",
",",
"dockerhub",
"=",
"univ_options",
"[",
"'dockerhub'",
"]",
")",
"output_file",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"bamfile",
")",
"job",
".",
"fileStore",
".",
"deleteGlobalFile",
"(",
"samfile",
")",
"reheader_bam",
"=",
"job",
".",
"wrapJobFn",
"(",
"fix_bam_header",
",",
"output_file",
",",
"sample_type",
",",
"univ_options",
",",
"disk",
"=",
"'60G'",
")",
"job",
".",
"addChild",
"(",
"reheader_bam",
")",
"return",
"reheader_bam",
".",
"rv",
"(",
")"
] | This module converts SAMFILE from sam to bam
ARGUMENTS
1. samfile: <JSid for a sam file>
2. sample_type: string of 'tumor_dna' or 'normal_dna'
3. univ_options: Dict of universal arguments used by almost all tools
univ_options
+- 'dockerhub': <dockerhub to use>
RETURN VALUES
1. output_files: REFER output_files in run_bwa() | [
"This",
"module",
"converts",
"SAMFILE",
"from",
"sam",
"to",
"bam"
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/attic/ProTECT.py#L417-L448 | train |
BD2KGenomics/protect | attic/ProTECT.py | fix_bam_header | def fix_bam_header(job, bamfile, sample_type, univ_options):
"""
This module modified the header in BAMFILE
ARGUMENTS
1. bamfile: <JSid for a bam file>
2. sample_type: string of 'tumor_dna' or 'normal_dna'
3. univ_options: Dict of universal arguments used by almost all tools
univ_options
+- 'dockerhub': <dockerhub to use>
RETURN VALUES
1. output_files: REFER output_files in run_bwa()
"""
job.fileStore.logToMaster('Running reheader on %s:%s' % (univ_options['patient'], sample_type))
work_dir = job.fileStore.getLocalTempDir()
input_files = {
'aligned.bam': bamfile}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=True)
parameters = ['view',
'-H',
input_files['aligned.bam']]
with open('/'.join([work_dir, 'aligned_bam.header']), 'w') as headerfile:
docker_call(tool='samtools', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], outfile=headerfile)
with open(headerfile.name, 'r') as headerfile, \
open('/'.join([work_dir, 'output_bam.header']), 'w') as outheaderfile:
for line in headerfile:
if line.startswith('@PG'):
line = '\t'.join([x for x in line.strip().split('\t') if not x.startswith('CL')])
print(line.strip(), file=outheaderfile)
parameters = ['reheader',
docker_path(outheaderfile.name),
input_files['aligned.bam']]
with open('/'.join([work_dir, 'aligned_fixPG.bam']), 'w') as fixpg_bamfile:
docker_call(tool='samtools', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], outfile=fixpg_bamfile)
output_file = job.fileStore.writeGlobalFile(fixpg_bamfile.name)
job.fileStore.deleteGlobalFile(bamfile)
add_rg = job.wrapJobFn(add_readgroups, output_file, sample_type, univ_options, disk='60G')
job.addChild(add_rg)
return add_rg.rv() | python | def fix_bam_header(job, bamfile, sample_type, univ_options):
"""
This module modified the header in BAMFILE
ARGUMENTS
1. bamfile: <JSid for a bam file>
2. sample_type: string of 'tumor_dna' or 'normal_dna'
3. univ_options: Dict of universal arguments used by almost all tools
univ_options
+- 'dockerhub': <dockerhub to use>
RETURN VALUES
1. output_files: REFER output_files in run_bwa()
"""
job.fileStore.logToMaster('Running reheader on %s:%s' % (univ_options['patient'], sample_type))
work_dir = job.fileStore.getLocalTempDir()
input_files = {
'aligned.bam': bamfile}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=True)
parameters = ['view',
'-H',
input_files['aligned.bam']]
with open('/'.join([work_dir, 'aligned_bam.header']), 'w') as headerfile:
docker_call(tool='samtools', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], outfile=headerfile)
with open(headerfile.name, 'r') as headerfile, \
open('/'.join([work_dir, 'output_bam.header']), 'w') as outheaderfile:
for line in headerfile:
if line.startswith('@PG'):
line = '\t'.join([x for x in line.strip().split('\t') if not x.startswith('CL')])
print(line.strip(), file=outheaderfile)
parameters = ['reheader',
docker_path(outheaderfile.name),
input_files['aligned.bam']]
with open('/'.join([work_dir, 'aligned_fixPG.bam']), 'w') as fixpg_bamfile:
docker_call(tool='samtools', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], outfile=fixpg_bamfile)
output_file = job.fileStore.writeGlobalFile(fixpg_bamfile.name)
job.fileStore.deleteGlobalFile(bamfile)
add_rg = job.wrapJobFn(add_readgroups, output_file, sample_type, univ_options, disk='60G')
job.addChild(add_rg)
return add_rg.rv() | [
"def",
"fix_bam_header",
"(",
"job",
",",
"bamfile",
",",
"sample_type",
",",
"univ_options",
")",
":",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Running reheader on %s:%s'",
"%",
"(",
"univ_options",
"[",
"'patient'",
"]",
",",
"sample_type",
")",
")",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"input_files",
"=",
"{",
"'aligned.bam'",
":",
"bamfile",
"}",
"input_files",
"=",
"get_files_from_filestore",
"(",
"job",
",",
"input_files",
",",
"work_dir",
",",
"docker",
"=",
"True",
")",
"parameters",
"=",
"[",
"'view'",
",",
"'-H'",
",",
"input_files",
"[",
"'aligned.bam'",
"]",
"]",
"with",
"open",
"(",
"'/'",
".",
"join",
"(",
"[",
"work_dir",
",",
"'aligned_bam.header'",
"]",
")",
",",
"'w'",
")",
"as",
"headerfile",
":",
"docker_call",
"(",
"tool",
"=",
"'samtools'",
",",
"tool_parameters",
"=",
"parameters",
",",
"work_dir",
"=",
"work_dir",
",",
"dockerhub",
"=",
"univ_options",
"[",
"'dockerhub'",
"]",
",",
"outfile",
"=",
"headerfile",
")",
"with",
"open",
"(",
"headerfile",
".",
"name",
",",
"'r'",
")",
"as",
"headerfile",
",",
"open",
"(",
"'/'",
".",
"join",
"(",
"[",
"work_dir",
",",
"'output_bam.header'",
"]",
")",
",",
"'w'",
")",
"as",
"outheaderfile",
":",
"for",
"line",
"in",
"headerfile",
":",
"if",
"line",
".",
"startswith",
"(",
"'@PG'",
")",
":",
"line",
"=",
"'\\t'",
".",
"join",
"(",
"[",
"x",
"for",
"x",
"in",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'\\t'",
")",
"if",
"not",
"x",
".",
"startswith",
"(",
"'CL'",
")",
"]",
")",
"print",
"(",
"line",
".",
"strip",
"(",
")",
",",
"file",
"=",
"outheaderfile",
")",
"parameters",
"=",
"[",
"'reheader'",
",",
"docker_path",
"(",
"outheaderfile",
".",
"name",
")",
",",
"input_files",
"[",
"'aligned.bam'",
"]",
"]",
"with",
"open",
"(",
"'/'",
".",
"join",
"(",
"[",
"work_dir",
",",
"'aligned_fixPG.bam'",
"]",
")",
",",
"'w'",
")",
"as",
"fixpg_bamfile",
":",
"docker_call",
"(",
"tool",
"=",
"'samtools'",
",",
"tool_parameters",
"=",
"parameters",
",",
"work_dir",
"=",
"work_dir",
",",
"dockerhub",
"=",
"univ_options",
"[",
"'dockerhub'",
"]",
",",
"outfile",
"=",
"fixpg_bamfile",
")",
"output_file",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"fixpg_bamfile",
".",
"name",
")",
"job",
".",
"fileStore",
".",
"deleteGlobalFile",
"(",
"bamfile",
")",
"add_rg",
"=",
"job",
".",
"wrapJobFn",
"(",
"add_readgroups",
",",
"output_file",
",",
"sample_type",
",",
"univ_options",
",",
"disk",
"=",
"'60G'",
")",
"job",
".",
"addChild",
"(",
"add_rg",
")",
"return",
"add_rg",
".",
"rv",
"(",
")"
] | This module modified the header in BAMFILE
ARGUMENTS
1. bamfile: <JSid for a bam file>
2. sample_type: string of 'tumor_dna' or 'normal_dna'
3. univ_options: Dict of universal arguments used by almost all tools
univ_options
+- 'dockerhub': <dockerhub to use>
RETURN VALUES
1. output_files: REFER output_files in run_bwa() | [
"This",
"module",
"modified",
"the",
"header",
"in",
"BAMFILE"
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/attic/ProTECT.py#L451-L491 | train |
BD2KGenomics/protect | attic/ProTECT.py | run_rsem | def run_rsem(job, star_bams, univ_options, rsem_options):
"""
This module will run rsem on the RNA Bam file.
ARGUMENTS
1. star_bams: Dict of input STAR bams
star_bams
+- 'rnaAligned.toTranscriptome.out.bam': <JSid>
2. univ_options: Dict of universal arguments used by almost all tools
univ_options
+- 'dockerhub': <dockerhub to use>
3. rsem_options: Dict of parameters specific to rsem
rsem_options
|- 'index_tar': <JSid for the rsem index tarball>
+- 'n': <number of threads to allocate>
RETURN VALUES
1. output_file: <Jsid of rsem.isoforms.results>
This module corresponds to node 9 on the tree
"""
job.fileStore.logToMaster('Running rsem index on %s' % univ_options['patient'])
work_dir = job.fileStore.getLocalTempDir()
input_files = {
'star_transcriptome.bam': star_bams['rnaAligned.toTranscriptome.out.bam'],
'rsem_index.tar.gz': rsem_options['index_tar']}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=True)
parameters = ['--paired-end',
'-p', str(rsem_options['n']),
'--bam',
input_files['star_transcriptome.bam'],
'--no-bam-output',
'/'.join([input_files['rsem_index'], 'hg19']),
'rsem']
docker_call(tool='rsem', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'])
output_file = \
job.fileStore.writeGlobalFile('/'.join([work_dir, 'rsem.isoforms.results']))
return output_file | python | def run_rsem(job, star_bams, univ_options, rsem_options):
"""
This module will run rsem on the RNA Bam file.
ARGUMENTS
1. star_bams: Dict of input STAR bams
star_bams
+- 'rnaAligned.toTranscriptome.out.bam': <JSid>
2. univ_options: Dict of universal arguments used by almost all tools
univ_options
+- 'dockerhub': <dockerhub to use>
3. rsem_options: Dict of parameters specific to rsem
rsem_options
|- 'index_tar': <JSid for the rsem index tarball>
+- 'n': <number of threads to allocate>
RETURN VALUES
1. output_file: <Jsid of rsem.isoforms.results>
This module corresponds to node 9 on the tree
"""
job.fileStore.logToMaster('Running rsem index on %s' % univ_options['patient'])
work_dir = job.fileStore.getLocalTempDir()
input_files = {
'star_transcriptome.bam': star_bams['rnaAligned.toTranscriptome.out.bam'],
'rsem_index.tar.gz': rsem_options['index_tar']}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=True)
parameters = ['--paired-end',
'-p', str(rsem_options['n']),
'--bam',
input_files['star_transcriptome.bam'],
'--no-bam-output',
'/'.join([input_files['rsem_index'], 'hg19']),
'rsem']
docker_call(tool='rsem', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'])
output_file = \
job.fileStore.writeGlobalFile('/'.join([work_dir, 'rsem.isoforms.results']))
return output_file | [
"def",
"run_rsem",
"(",
"job",
",",
"star_bams",
",",
"univ_options",
",",
"rsem_options",
")",
":",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Running rsem index on %s'",
"%",
"univ_options",
"[",
"'patient'",
"]",
")",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"input_files",
"=",
"{",
"'star_transcriptome.bam'",
":",
"star_bams",
"[",
"'rnaAligned.toTranscriptome.out.bam'",
"]",
",",
"'rsem_index.tar.gz'",
":",
"rsem_options",
"[",
"'index_tar'",
"]",
"}",
"input_files",
"=",
"get_files_from_filestore",
"(",
"job",
",",
"input_files",
",",
"work_dir",
",",
"docker",
"=",
"True",
")",
"parameters",
"=",
"[",
"'--paired-end'",
",",
"'-p'",
",",
"str",
"(",
"rsem_options",
"[",
"'n'",
"]",
")",
",",
"'--bam'",
",",
"input_files",
"[",
"'star_transcriptome.bam'",
"]",
",",
"'--no-bam-output'",
",",
"'/'",
".",
"join",
"(",
"[",
"input_files",
"[",
"'rsem_index'",
"]",
",",
"'hg19'",
"]",
")",
",",
"'rsem'",
"]",
"docker_call",
"(",
"tool",
"=",
"'rsem'",
",",
"tool_parameters",
"=",
"parameters",
",",
"work_dir",
"=",
"work_dir",
",",
"dockerhub",
"=",
"univ_options",
"[",
"'dockerhub'",
"]",
")",
"output_file",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"'/'",
".",
"join",
"(",
"[",
"work_dir",
",",
"'rsem.isoforms.results'",
"]",
")",
")",
"return",
"output_file"
] | This module will run rsem on the RNA Bam file.
ARGUMENTS
1. star_bams: Dict of input STAR bams
star_bams
+- 'rnaAligned.toTranscriptome.out.bam': <JSid>
2. univ_options: Dict of universal arguments used by almost all tools
univ_options
+- 'dockerhub': <dockerhub to use>
3. rsem_options: Dict of parameters specific to rsem
rsem_options
|- 'index_tar': <JSid for the rsem index tarball>
+- 'n': <number of threads to allocate>
RETURN VALUES
1. output_file: <Jsid of rsem.isoforms.results>
This module corresponds to node 9 on the tree | [
"This",
"module",
"will",
"run",
"rsem",
"on",
"the",
"RNA",
"Bam",
"file",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/attic/ProTECT.py#L565-L603 | train |
BD2KGenomics/protect | attic/ProTECT.py | merge_radia | def merge_radia(job, perchrom_rvs):
"""
This module will merge the per-chromosome radia files created by spawn_radia into a genome vcf.
It will make 2 vcfs, one for PASSing non-germline calls, and one for all calls.
ARGUMENTS
1. perchrom_rvs: REFER RETURN VALUE of spawn_radia()
RETURN VALUES
1. output_files: Dict of outputs
output_files
|- radia_calls.vcf: <JSid>
+- radia_parsed_filter_passing_calls.vcf: <JSid>
This module corresponds to node 11 on the tree
"""
job.fileStore.logToMaster('Running merge_radia')
work_dir = job.fileStore.getLocalTempDir()
# We need to squash the input dict of dicts to a single dict such that it can be passed to
# get_files_from_filestore
input_files = {filename: jsid for perchrom_files in perchrom_rvs.values()
for filename, jsid in perchrom_files.items()}
input_files = get_files_from_filestore(job, input_files, work_dir,
docker=False)
chromosomes = [''.join(['chr', str(x)]) for x in range(1, 23) + ['X', 'Y']]
with open('/'.join([work_dir, 'radia_calls.vcf']), 'w') as radfile, \
open('/'.join([work_dir, 'radia_filter_passing_calls.vcf']), 'w') as radpassfile:
for chrom in chromosomes:
with open(input_files[''.join(['radia_filtered_', chrom, '.vcf'])], 'r') as filtradfile:
for line in filtradfile:
line = line.strip()
if line.startswith('#'):
if chrom == 'chr1':
print(line, file=radfile)
print(line, file=radpassfile)
continue
else:
print(line, file=radfile)
line = line.split('\t')
if line[6] == 'PASS' and 'MT=GERM' not in line[7]:
print('\t'.join(line), file=radpassfile)
# parse the PASS radia vcf for multiple alt alleles
with open(radpassfile.name, 'r') as radpassfile, \
open('/'.join([work_dir, 'radia_parsed_filter_passing_calls.vcf']),
'w') as parsedradfile:
parse_radia_multi_alt(radpassfile, parsedradfile)
output_files = defaultdict()
for radia_file in [radfile.name, parsedradfile.name]:
output_files[os.path.basename(radia_file)] = job.fileStore.writeGlobalFile(radia_file)
return output_files | python | def merge_radia(job, perchrom_rvs):
"""
This module will merge the per-chromosome radia files created by spawn_radia into a genome vcf.
It will make 2 vcfs, one for PASSing non-germline calls, and one for all calls.
ARGUMENTS
1. perchrom_rvs: REFER RETURN VALUE of spawn_radia()
RETURN VALUES
1. output_files: Dict of outputs
output_files
|- radia_calls.vcf: <JSid>
+- radia_parsed_filter_passing_calls.vcf: <JSid>
This module corresponds to node 11 on the tree
"""
job.fileStore.logToMaster('Running merge_radia')
work_dir = job.fileStore.getLocalTempDir()
# We need to squash the input dict of dicts to a single dict such that it can be passed to
# get_files_from_filestore
input_files = {filename: jsid for perchrom_files in perchrom_rvs.values()
for filename, jsid in perchrom_files.items()}
input_files = get_files_from_filestore(job, input_files, work_dir,
docker=False)
chromosomes = [''.join(['chr', str(x)]) for x in range(1, 23) + ['X', 'Y']]
with open('/'.join([work_dir, 'radia_calls.vcf']), 'w') as radfile, \
open('/'.join([work_dir, 'radia_filter_passing_calls.vcf']), 'w') as radpassfile:
for chrom in chromosomes:
with open(input_files[''.join(['radia_filtered_', chrom, '.vcf'])], 'r') as filtradfile:
for line in filtradfile:
line = line.strip()
if line.startswith('#'):
if chrom == 'chr1':
print(line, file=radfile)
print(line, file=radpassfile)
continue
else:
print(line, file=radfile)
line = line.split('\t')
if line[6] == 'PASS' and 'MT=GERM' not in line[7]:
print('\t'.join(line), file=radpassfile)
# parse the PASS radia vcf for multiple alt alleles
with open(radpassfile.name, 'r') as radpassfile, \
open('/'.join([work_dir, 'radia_parsed_filter_passing_calls.vcf']),
'w') as parsedradfile:
parse_radia_multi_alt(radpassfile, parsedradfile)
output_files = defaultdict()
for radia_file in [radfile.name, parsedradfile.name]:
output_files[os.path.basename(radia_file)] = job.fileStore.writeGlobalFile(radia_file)
return output_files | [
"def",
"merge_radia",
"(",
"job",
",",
"perchrom_rvs",
")",
":",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Running merge_radia'",
")",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"input_files",
"=",
"{",
"filename",
":",
"jsid",
"for",
"perchrom_files",
"in",
"perchrom_rvs",
".",
"values",
"(",
")",
"for",
"filename",
",",
"jsid",
"in",
"perchrom_files",
".",
"items",
"(",
")",
"}",
"input_files",
"=",
"get_files_from_filestore",
"(",
"job",
",",
"input_files",
",",
"work_dir",
",",
"docker",
"=",
"False",
")",
"chromosomes",
"=",
"[",
"''",
".",
"join",
"(",
"[",
"'chr'",
",",
"str",
"(",
"x",
")",
"]",
")",
"for",
"x",
"in",
"range",
"(",
"1",
",",
"23",
")",
"+",
"[",
"'X'",
",",
"'Y'",
"]",
"]",
"with",
"open",
"(",
"'/'",
".",
"join",
"(",
"[",
"work_dir",
",",
"'radia_calls.vcf'",
"]",
")",
",",
"'w'",
")",
"as",
"radfile",
",",
"open",
"(",
"'/'",
".",
"join",
"(",
"[",
"work_dir",
",",
"'radia_filter_passing_calls.vcf'",
"]",
")",
",",
"'w'",
")",
"as",
"radpassfile",
":",
"for",
"chrom",
"in",
"chromosomes",
":",
"with",
"open",
"(",
"input_files",
"[",
"''",
".",
"join",
"(",
"[",
"'radia_filtered_'",
",",
"chrom",
",",
"'.vcf'",
"]",
")",
"]",
",",
"'r'",
")",
"as",
"filtradfile",
":",
"for",
"line",
"in",
"filtradfile",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"line",
".",
"startswith",
"(",
"'#'",
")",
":",
"if",
"chrom",
"==",
"'chr1'",
":",
"print",
"(",
"line",
",",
"file",
"=",
"radfile",
")",
"print",
"(",
"line",
",",
"file",
"=",
"radpassfile",
")",
"continue",
"else",
":",
"print",
"(",
"line",
",",
"file",
"=",
"radfile",
")",
"line",
"=",
"line",
".",
"split",
"(",
"'\\t'",
")",
"if",
"line",
"[",
"6",
"]",
"==",
"'PASS'",
"and",
"'MT=GERM'",
"not",
"in",
"line",
"[",
"7",
"]",
":",
"print",
"(",
"'\\t'",
".",
"join",
"(",
"line",
")",
",",
"file",
"=",
"radpassfile",
")",
"with",
"open",
"(",
"radpassfile",
".",
"name",
",",
"'r'",
")",
"as",
"radpassfile",
",",
"open",
"(",
"'/'",
".",
"join",
"(",
"[",
"work_dir",
",",
"'radia_parsed_filter_passing_calls.vcf'",
"]",
")",
",",
"'w'",
")",
"as",
"parsedradfile",
":",
"parse_radia_multi_alt",
"(",
"radpassfile",
",",
"parsedradfile",
")",
"output_files",
"=",
"defaultdict",
"(",
")",
"for",
"radia_file",
"in",
"[",
"radfile",
".",
"name",
",",
"parsedradfile",
".",
"name",
"]",
":",
"output_files",
"[",
"os",
".",
"path",
".",
"basename",
"(",
"radia_file",
")",
"]",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"radia_file",
")",
"return",
"output_files"
] | This module will merge the per-chromosome radia files created by spawn_radia into a genome vcf.
It will make 2 vcfs, one for PASSing non-germline calls, and one for all calls.
ARGUMENTS
1. perchrom_rvs: REFER RETURN VALUE of spawn_radia()
RETURN VALUES
1. output_files: Dict of outputs
output_files
|- radia_calls.vcf: <JSid>
+- radia_parsed_filter_passing_calls.vcf: <JSid>
This module corresponds to node 11 on the tree | [
"This",
"module",
"will",
"merge",
"the",
"per",
"-",
"chromosome",
"radia",
"files",
"created",
"by",
"spawn_radia",
"into",
"a",
"genome",
"vcf",
".",
"It",
"will",
"make",
"2",
"vcfs",
"one",
"for",
"PASSing",
"non",
"-",
"germline",
"calls",
"and",
"one",
"for",
"all",
"calls",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/attic/ProTECT.py#L738-L787 | train |
BD2KGenomics/protect | attic/ProTECT.py | run_radia | def run_radia(job, bams, univ_options, radia_options, chrom):
"""
This module will run radia on the RNA and DNA bams
ARGUMENTS
1. bams: Dict of bams and their indexes
bams
|- 'tumor_rna': <JSid>
|- 'tumor_rnai': <JSid>
|- 'tumor_dna': <JSid>
|- 'tumor_dnai': <JSid>
|- 'normal_dna': <JSid>
+- 'normal_dnai': <JSid>
2. univ_options: Dict of universal arguments used by almost all tools
univ_options
+- 'dockerhub': <dockerhub to use>
3. radia_options: Dict of parameters specific to radia
radia_options
|- 'dbsnp_vcf': <JSid for dnsnp vcf file>
+- 'genome': <JSid for genome fasta file>
4. chrom: String containing chromosome name with chr appended
RETURN VALUES
1. Dict of filtered radia output vcf and logfile (Nested return)
|- 'radia_filtered_CHROM.vcf': <JSid>
+- 'radia_filtered_CHROM_radia.log': <JSid>
"""
job.fileStore.logToMaster('Running radia on %s:%s' %(univ_options['patient'], chrom))
work_dir = job.fileStore.getLocalTempDir()
input_files = {
'rna.bam': bams['tumor_rna'],
'rna.bam.bai': bams['tumor_rnai'],
'tumor.bam': bams['tumor_dna'],
'tumor.bam.bai': bams['tumor_dnai'],
'normal.bam': bams['normal_dna'],
'normal.bam.bai': bams['normal_dnai'],
'genome.fasta': radia_options['genome_fasta'],
'genome.fasta.fai': radia_options['genome_fai']}
input_files = get_files_from_filestore(job, input_files, work_dir,
docker=True)
radia_output = ''.join([work_dir, '/radia_', chrom, '.vcf'])
radia_log = ''.join([work_dir, '/radia_', chrom, '_radia.log'])
parameters = [univ_options['patient'], # shortID
chrom,
'-n', input_files['normal.bam'],
'-t', input_files['tumor.bam'],
'-r', input_files['rna.bam'],
''.join(['--rnaTumorFasta=', input_files['genome.fasta']]),
'-f', input_files['genome.fasta'],
'-o', docker_path(radia_output),
'-i', 'hg19_M_rCRS',
'-m', input_files['genome.fasta'],
'-d', '[email protected]',
'-q', 'Illumina',
'--disease', 'CANCER',
'-l', 'INFO',
'-g', docker_path(radia_log)]
docker_call(tool='radia', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'])
output_files = defaultdict()
for radia_file in [radia_output, radia_log]:
output_files[os.path.basename(radia_file)] = \
job.fileStore.writeGlobalFile(radia_file)
filterradia = job.wrapJobFn(run_filter_radia, bams,
output_files[os.path.basename(radia_output)],
univ_options, radia_options, chrom, disk='60G', memory='6G')
job.addChild(filterradia)
return filterradia.rv() | python | def run_radia(job, bams, univ_options, radia_options, chrom):
"""
This module will run radia on the RNA and DNA bams
ARGUMENTS
1. bams: Dict of bams and their indexes
bams
|- 'tumor_rna': <JSid>
|- 'tumor_rnai': <JSid>
|- 'tumor_dna': <JSid>
|- 'tumor_dnai': <JSid>
|- 'normal_dna': <JSid>
+- 'normal_dnai': <JSid>
2. univ_options: Dict of universal arguments used by almost all tools
univ_options
+- 'dockerhub': <dockerhub to use>
3. radia_options: Dict of parameters specific to radia
radia_options
|- 'dbsnp_vcf': <JSid for dnsnp vcf file>
+- 'genome': <JSid for genome fasta file>
4. chrom: String containing chromosome name with chr appended
RETURN VALUES
1. Dict of filtered radia output vcf and logfile (Nested return)
|- 'radia_filtered_CHROM.vcf': <JSid>
+- 'radia_filtered_CHROM_radia.log': <JSid>
"""
job.fileStore.logToMaster('Running radia on %s:%s' %(univ_options['patient'], chrom))
work_dir = job.fileStore.getLocalTempDir()
input_files = {
'rna.bam': bams['tumor_rna'],
'rna.bam.bai': bams['tumor_rnai'],
'tumor.bam': bams['tumor_dna'],
'tumor.bam.bai': bams['tumor_dnai'],
'normal.bam': bams['normal_dna'],
'normal.bam.bai': bams['normal_dnai'],
'genome.fasta': radia_options['genome_fasta'],
'genome.fasta.fai': radia_options['genome_fai']}
input_files = get_files_from_filestore(job, input_files, work_dir,
docker=True)
radia_output = ''.join([work_dir, '/radia_', chrom, '.vcf'])
radia_log = ''.join([work_dir, '/radia_', chrom, '_radia.log'])
parameters = [univ_options['patient'], # shortID
chrom,
'-n', input_files['normal.bam'],
'-t', input_files['tumor.bam'],
'-r', input_files['rna.bam'],
''.join(['--rnaTumorFasta=', input_files['genome.fasta']]),
'-f', input_files['genome.fasta'],
'-o', docker_path(radia_output),
'-i', 'hg19_M_rCRS',
'-m', input_files['genome.fasta'],
'-d', '[email protected]',
'-q', 'Illumina',
'--disease', 'CANCER',
'-l', 'INFO',
'-g', docker_path(radia_log)]
docker_call(tool='radia', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'])
output_files = defaultdict()
for radia_file in [radia_output, radia_log]:
output_files[os.path.basename(radia_file)] = \
job.fileStore.writeGlobalFile(radia_file)
filterradia = job.wrapJobFn(run_filter_radia, bams,
output_files[os.path.basename(radia_output)],
univ_options, radia_options, chrom, disk='60G', memory='6G')
job.addChild(filterradia)
return filterradia.rv() | [
"def",
"run_radia",
"(",
"job",
",",
"bams",
",",
"univ_options",
",",
"radia_options",
",",
"chrom",
")",
":",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Running radia on %s:%s'",
"%",
"(",
"univ_options",
"[",
"'patient'",
"]",
",",
"chrom",
")",
")",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"input_files",
"=",
"{",
"'rna.bam'",
":",
"bams",
"[",
"'tumor_rna'",
"]",
",",
"'rna.bam.bai'",
":",
"bams",
"[",
"'tumor_rnai'",
"]",
",",
"'tumor.bam'",
":",
"bams",
"[",
"'tumor_dna'",
"]",
",",
"'tumor.bam.bai'",
":",
"bams",
"[",
"'tumor_dnai'",
"]",
",",
"'normal.bam'",
":",
"bams",
"[",
"'normal_dna'",
"]",
",",
"'normal.bam.bai'",
":",
"bams",
"[",
"'normal_dnai'",
"]",
",",
"'genome.fasta'",
":",
"radia_options",
"[",
"'genome_fasta'",
"]",
",",
"'genome.fasta.fai'",
":",
"radia_options",
"[",
"'genome_fai'",
"]",
"}",
"input_files",
"=",
"get_files_from_filestore",
"(",
"job",
",",
"input_files",
",",
"work_dir",
",",
"docker",
"=",
"True",
")",
"radia_output",
"=",
"''",
".",
"join",
"(",
"[",
"work_dir",
",",
"'/radia_'",
",",
"chrom",
",",
"'.vcf'",
"]",
")",
"radia_log",
"=",
"''",
".",
"join",
"(",
"[",
"work_dir",
",",
"'/radia_'",
",",
"chrom",
",",
"'_radia.log'",
"]",
")",
"parameters",
"=",
"[",
"univ_options",
"[",
"'patient'",
"]",
",",
"chrom",
",",
"'-n'",
",",
"input_files",
"[",
"'normal.bam'",
"]",
",",
"'-t'",
",",
"input_files",
"[",
"'tumor.bam'",
"]",
",",
"'-r'",
",",
"input_files",
"[",
"'rna.bam'",
"]",
",",
"''",
".",
"join",
"(",
"[",
"'--rnaTumorFasta='",
",",
"input_files",
"[",
"'genome.fasta'",
"]",
"]",
")",
",",
"'-f'",
",",
"input_files",
"[",
"'genome.fasta'",
"]",
",",
"'-o'",
",",
"docker_path",
"(",
"radia_output",
")",
",",
"'-i'",
",",
"'hg19_M_rCRS'",
",",
"'-m'",
",",
"input_files",
"[",
"'genome.fasta'",
"]",
",",
"'-d'",
",",
"'[email protected]'",
",",
"'-q'",
",",
"'Illumina'",
",",
"'--disease'",
",",
"'CANCER'",
",",
"'-l'",
",",
"'INFO'",
",",
"'-g'",
",",
"docker_path",
"(",
"radia_log",
")",
"]",
"docker_call",
"(",
"tool",
"=",
"'radia'",
",",
"tool_parameters",
"=",
"parameters",
",",
"work_dir",
"=",
"work_dir",
",",
"dockerhub",
"=",
"univ_options",
"[",
"'dockerhub'",
"]",
")",
"output_files",
"=",
"defaultdict",
"(",
")",
"for",
"radia_file",
"in",
"[",
"radia_output",
",",
"radia_log",
"]",
":",
"output_files",
"[",
"os",
".",
"path",
".",
"basename",
"(",
"radia_file",
")",
"]",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"radia_file",
")",
"filterradia",
"=",
"job",
".",
"wrapJobFn",
"(",
"run_filter_radia",
",",
"bams",
",",
"output_files",
"[",
"os",
".",
"path",
".",
"basename",
"(",
"radia_output",
")",
"]",
",",
"univ_options",
",",
"radia_options",
",",
"chrom",
",",
"disk",
"=",
"'60G'",
",",
"memory",
"=",
"'6G'",
")",
"job",
".",
"addChild",
"(",
"filterradia",
")",
"return",
"filterradia",
".",
"rv",
"(",
")"
] | This module will run radia on the RNA and DNA bams
ARGUMENTS
1. bams: Dict of bams and their indexes
bams
|- 'tumor_rna': <JSid>
|- 'tumor_rnai': <JSid>
|- 'tumor_dna': <JSid>
|- 'tumor_dnai': <JSid>
|- 'normal_dna': <JSid>
+- 'normal_dnai': <JSid>
2. univ_options: Dict of universal arguments used by almost all tools
univ_options
+- 'dockerhub': <dockerhub to use>
3. radia_options: Dict of parameters specific to radia
radia_options
|- 'dbsnp_vcf': <JSid for dnsnp vcf file>
+- 'genome': <JSid for genome fasta file>
4. chrom: String containing chromosome name with chr appended
RETURN VALUES
1. Dict of filtered radia output vcf and logfile (Nested return)
|- 'radia_filtered_CHROM.vcf': <JSid>
+- 'radia_filtered_CHROM_radia.log': <JSid> | [
"This",
"module",
"will",
"run",
"radia",
"on",
"the",
"RNA",
"and",
"DNA",
"bams"
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/attic/ProTECT.py#L790-L857 | train |
BD2KGenomics/protect | attic/ProTECT.py | run_filter_radia | def run_filter_radia(job, bams, radia_file, univ_options, radia_options, chrom):
"""
This module will run filterradia on the RNA and DNA bams.
ARGUMENTS
1. bams: REFER ARGUMENTS of run_radia()
2. univ_options: REFER ARGUMENTS of run_radia()
3. radia_file: <JSid of vcf generated by run_radia()>
3. radia_options: REFER ARGUMENTS of run_radia()
4. chrom: REFER ARGUMENTS of run_radia()
RETURN VALUES
1. Dict of filtered radia output vcf and logfile
|- 'radia_filtered_CHROM.vcf': <JSid>
+- 'radia_filtered_CHROM_radia.log': <JSid>
"""
job.fileStore.logToMaster('Running filter-radia on %s:%s' % (univ_options['patient'], chrom))
work_dir = job.fileStore.getLocalTempDir()
input_files = {
'rna.bam': bams['tumor_rna'],
'rna.bam.bai': bams['tumor_rnai'],
'tumor.bam': bams['tumor_dna'],
'tumor.bam.bai': bams['tumor_dnai'],
'normal.bam': bams['normal_dna'],
'normal.bam.bai': bams['normal_dnai'],
'radia.vcf': radia_file,
'genome.fasta': radia_options['genome_fasta'],
'genome.fasta.fai': radia_options['genome_fai']
}
input_files = get_files_from_filestore(job, input_files, work_dir,
docker=True)
filterradia_output = ''.join(['radia_filtered_', chrom, '.vcf'])
filterradia_log = ''.join([work_dir, '/radia_filtered_', chrom, '_radia.log'
])
parameters = [univ_options['patient'], # shortID
chrom.lstrip('chr'),
input_files['radia.vcf'],
'/data',
'/home/radia/scripts',
'-b', '/home/radia/data/hg19/blacklists/1000Genomes/phase1/',
'-d', '/home/radia/data/hg19/snp135',
'-r', '/home/radia/data/hg19/retroGenes/',
'-p', '/home/radia/data/hg19/pseudoGenes/',
'-c', '/home/radia/data/hg19/cosmic/',
'-t', '/home/radia/data/hg19/gaf/2_1',
'--noSnpEff',
'--rnaGeneBlckFile', '/home/radia/data/rnaGeneBlacklist.tab',
'--rnaGeneFamilyBlckFile',
'/home/radia/data/rnaGeneFamilyBlacklist.tab',
'-f', input_files['genome.fasta'],
'--log=INFO',
'-g', docker_path(filterradia_log)]
docker_call(tool='filterradia', tool_parameters=parameters,
work_dir=work_dir, dockerhub=univ_options['dockerhub'])
output_files = defaultdict()
output_files[filterradia_output] = \
job.fileStore.writeGlobalFile(''.join([work_dir, '/',
univ_options['patient'], '_',
chrom, '.vcf']))
output_files[os.path.basename(filterradia_log)] = \
job.fileStore.writeGlobalFile(filterradia_log)
return output_files | python | def run_filter_radia(job, bams, radia_file, univ_options, radia_options, chrom):
"""
This module will run filterradia on the RNA and DNA bams.
ARGUMENTS
1. bams: REFER ARGUMENTS of run_radia()
2. univ_options: REFER ARGUMENTS of run_radia()
3. radia_file: <JSid of vcf generated by run_radia()>
3. radia_options: REFER ARGUMENTS of run_radia()
4. chrom: REFER ARGUMENTS of run_radia()
RETURN VALUES
1. Dict of filtered radia output vcf and logfile
|- 'radia_filtered_CHROM.vcf': <JSid>
+- 'radia_filtered_CHROM_radia.log': <JSid>
"""
job.fileStore.logToMaster('Running filter-radia on %s:%s' % (univ_options['patient'], chrom))
work_dir = job.fileStore.getLocalTempDir()
input_files = {
'rna.bam': bams['tumor_rna'],
'rna.bam.bai': bams['tumor_rnai'],
'tumor.bam': bams['tumor_dna'],
'tumor.bam.bai': bams['tumor_dnai'],
'normal.bam': bams['normal_dna'],
'normal.bam.bai': bams['normal_dnai'],
'radia.vcf': radia_file,
'genome.fasta': radia_options['genome_fasta'],
'genome.fasta.fai': radia_options['genome_fai']
}
input_files = get_files_from_filestore(job, input_files, work_dir,
docker=True)
filterradia_output = ''.join(['radia_filtered_', chrom, '.vcf'])
filterradia_log = ''.join([work_dir, '/radia_filtered_', chrom, '_radia.log'
])
parameters = [univ_options['patient'], # shortID
chrom.lstrip('chr'),
input_files['radia.vcf'],
'/data',
'/home/radia/scripts',
'-b', '/home/radia/data/hg19/blacklists/1000Genomes/phase1/',
'-d', '/home/radia/data/hg19/snp135',
'-r', '/home/radia/data/hg19/retroGenes/',
'-p', '/home/radia/data/hg19/pseudoGenes/',
'-c', '/home/radia/data/hg19/cosmic/',
'-t', '/home/radia/data/hg19/gaf/2_1',
'--noSnpEff',
'--rnaGeneBlckFile', '/home/radia/data/rnaGeneBlacklist.tab',
'--rnaGeneFamilyBlckFile',
'/home/radia/data/rnaGeneFamilyBlacklist.tab',
'-f', input_files['genome.fasta'],
'--log=INFO',
'-g', docker_path(filterradia_log)]
docker_call(tool='filterradia', tool_parameters=parameters,
work_dir=work_dir, dockerhub=univ_options['dockerhub'])
output_files = defaultdict()
output_files[filterradia_output] = \
job.fileStore.writeGlobalFile(''.join([work_dir, '/',
univ_options['patient'], '_',
chrom, '.vcf']))
output_files[os.path.basename(filterradia_log)] = \
job.fileStore.writeGlobalFile(filterradia_log)
return output_files | [
"def",
"run_filter_radia",
"(",
"job",
",",
"bams",
",",
"radia_file",
",",
"univ_options",
",",
"radia_options",
",",
"chrom",
")",
":",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Running filter-radia on %s:%s'",
"%",
"(",
"univ_options",
"[",
"'patient'",
"]",
",",
"chrom",
")",
")",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"input_files",
"=",
"{",
"'rna.bam'",
":",
"bams",
"[",
"'tumor_rna'",
"]",
",",
"'rna.bam.bai'",
":",
"bams",
"[",
"'tumor_rnai'",
"]",
",",
"'tumor.bam'",
":",
"bams",
"[",
"'tumor_dna'",
"]",
",",
"'tumor.bam.bai'",
":",
"bams",
"[",
"'tumor_dnai'",
"]",
",",
"'normal.bam'",
":",
"bams",
"[",
"'normal_dna'",
"]",
",",
"'normal.bam.bai'",
":",
"bams",
"[",
"'normal_dnai'",
"]",
",",
"'radia.vcf'",
":",
"radia_file",
",",
"'genome.fasta'",
":",
"radia_options",
"[",
"'genome_fasta'",
"]",
",",
"'genome.fasta.fai'",
":",
"radia_options",
"[",
"'genome_fai'",
"]",
"}",
"input_files",
"=",
"get_files_from_filestore",
"(",
"job",
",",
"input_files",
",",
"work_dir",
",",
"docker",
"=",
"True",
")",
"filterradia_output",
"=",
"''",
".",
"join",
"(",
"[",
"'radia_filtered_'",
",",
"chrom",
",",
"'.vcf'",
"]",
")",
"filterradia_log",
"=",
"''",
".",
"join",
"(",
"[",
"work_dir",
",",
"'/radia_filtered_'",
",",
"chrom",
",",
"'_radia.log'",
"]",
")",
"parameters",
"=",
"[",
"univ_options",
"[",
"'patient'",
"]",
",",
"chrom",
".",
"lstrip",
"(",
"'chr'",
")",
",",
"input_files",
"[",
"'radia.vcf'",
"]",
",",
"'/data'",
",",
"'/home/radia/scripts'",
",",
"'-b'",
",",
"'/home/radia/data/hg19/blacklists/1000Genomes/phase1/'",
",",
"'-d'",
",",
"'/home/radia/data/hg19/snp135'",
",",
"'-r'",
",",
"'/home/radia/data/hg19/retroGenes/'",
",",
"'-p'",
",",
"'/home/radia/data/hg19/pseudoGenes/'",
",",
"'-c'",
",",
"'/home/radia/data/hg19/cosmic/'",
",",
"'-t'",
",",
"'/home/radia/data/hg19/gaf/2_1'",
",",
"'--noSnpEff'",
",",
"'--rnaGeneBlckFile'",
",",
"'/home/radia/data/rnaGeneBlacklist.tab'",
",",
"'--rnaGeneFamilyBlckFile'",
",",
"'/home/radia/data/rnaGeneFamilyBlacklist.tab'",
",",
"'-f'",
",",
"input_files",
"[",
"'genome.fasta'",
"]",
",",
"'--log=INFO'",
",",
"'-g'",
",",
"docker_path",
"(",
"filterradia_log",
")",
"]",
"docker_call",
"(",
"tool",
"=",
"'filterradia'",
",",
"tool_parameters",
"=",
"parameters",
",",
"work_dir",
"=",
"work_dir",
",",
"dockerhub",
"=",
"univ_options",
"[",
"'dockerhub'",
"]",
")",
"output_files",
"=",
"defaultdict",
"(",
")",
"output_files",
"[",
"filterradia_output",
"]",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"''",
".",
"join",
"(",
"[",
"work_dir",
",",
"'/'",
",",
"univ_options",
"[",
"'patient'",
"]",
",",
"'_'",
",",
"chrom",
",",
"'.vcf'",
"]",
")",
")",
"output_files",
"[",
"os",
".",
"path",
".",
"basename",
"(",
"filterradia_log",
")",
"]",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"filterradia_log",
")",
"return",
"output_files"
] | This module will run filterradia on the RNA and DNA bams.
ARGUMENTS
1. bams: REFER ARGUMENTS of run_radia()
2. univ_options: REFER ARGUMENTS of run_radia()
3. radia_file: <JSid of vcf generated by run_radia()>
3. radia_options: REFER ARGUMENTS of run_radia()
4. chrom: REFER ARGUMENTS of run_radia()
RETURN VALUES
1. Dict of filtered radia output vcf and logfile
|- 'radia_filtered_CHROM.vcf': <JSid>
+- 'radia_filtered_CHROM_radia.log': <JSid> | [
"This",
"module",
"will",
"run",
"filterradia",
"on",
"the",
"RNA",
"and",
"DNA",
"bams",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/attic/ProTECT.py#L860-L921 | train |
BD2KGenomics/protect | attic/ProTECT.py | merge_mutect | def merge_mutect(job, perchrom_rvs):
"""
This module will merge the per-chromosome mutect files created by spawn_mutect into a genome
vcf. It will make 2 vcfs, one for PASSing non-germline calls, and one for all calls.
ARGUMENTS
1. perchrom_rvs: REFER RETURN VALUE of spawn_mutect()
RETURN VALUES
1. output_files: <JSid for mutect_passing_calls.vcf>
This module corresponds to node 11 on the tree
"""
job.fileStore.logToMaster('Running merge_mutect')
work_dir = job.fileStore.getLocalTempDir()
# We need to squash the input dict of dicts to a single dict such that it can be passed to
# get_files_from_filestore
input_files = {filename: jsid for perchrom_files in perchrom_rvs.values()
for filename, jsid in perchrom_files.items()}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
chromosomes = [''.join(['chr', str(x)]) for x in range(1, 23) + ['X', 'Y']]
with open('/'.join([work_dir, 'mutect_calls.vcf']), 'w') as mutvcf, \
open('/'.join([work_dir, 'mutect_calls.out']), 'w') as mutout, \
open('/'.join([work_dir, 'mutect_passing_calls.vcf']), 'w') as mutpassvcf:
out_header_not_printed = True
for chrom in chromosomes:
with open(input_files[''.join(['mutect_', chrom, '.vcf'])], 'r') as mutfile:
for line in mutfile:
line = line.strip()
if line.startswith('#'):
if chrom == 'chr1':
print(line, file=mutvcf)
print(line, file=mutpassvcf)
continue
else:
print(line, file=mutvcf)
line = line.split('\t')
if line[6] != 'REJECT':
print('\t'.join(line), file=mutpassvcf)
with open(input_files[''.join(['mutect_', chrom,
'.out'])], 'r') as mutfile:
for line in mutfile:
line = line.strip()
if line.startswith('#'):
if chrom == 'chr1':
print(line, file=mutout)
continue
elif out_header_not_printed:
print(line, file=mutout)
out_header_not_printed = False
else:
print(line, file=mutout)
output_file = job.fileStore.writeGlobalFile(mutpassvcf.name)
return output_file | python | def merge_mutect(job, perchrom_rvs):
"""
This module will merge the per-chromosome mutect files created by spawn_mutect into a genome
vcf. It will make 2 vcfs, one for PASSing non-germline calls, and one for all calls.
ARGUMENTS
1. perchrom_rvs: REFER RETURN VALUE of spawn_mutect()
RETURN VALUES
1. output_files: <JSid for mutect_passing_calls.vcf>
This module corresponds to node 11 on the tree
"""
job.fileStore.logToMaster('Running merge_mutect')
work_dir = job.fileStore.getLocalTempDir()
# We need to squash the input dict of dicts to a single dict such that it can be passed to
# get_files_from_filestore
input_files = {filename: jsid for perchrom_files in perchrom_rvs.values()
for filename, jsid in perchrom_files.items()}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
chromosomes = [''.join(['chr', str(x)]) for x in range(1, 23) + ['X', 'Y']]
with open('/'.join([work_dir, 'mutect_calls.vcf']), 'w') as mutvcf, \
open('/'.join([work_dir, 'mutect_calls.out']), 'w') as mutout, \
open('/'.join([work_dir, 'mutect_passing_calls.vcf']), 'w') as mutpassvcf:
out_header_not_printed = True
for chrom in chromosomes:
with open(input_files[''.join(['mutect_', chrom, '.vcf'])], 'r') as mutfile:
for line in mutfile:
line = line.strip()
if line.startswith('#'):
if chrom == 'chr1':
print(line, file=mutvcf)
print(line, file=mutpassvcf)
continue
else:
print(line, file=mutvcf)
line = line.split('\t')
if line[6] != 'REJECT':
print('\t'.join(line), file=mutpassvcf)
with open(input_files[''.join(['mutect_', chrom,
'.out'])], 'r') as mutfile:
for line in mutfile:
line = line.strip()
if line.startswith('#'):
if chrom == 'chr1':
print(line, file=mutout)
continue
elif out_header_not_printed:
print(line, file=mutout)
out_header_not_printed = False
else:
print(line, file=mutout)
output_file = job.fileStore.writeGlobalFile(mutpassvcf.name)
return output_file | [
"def",
"merge_mutect",
"(",
"job",
",",
"perchrom_rvs",
")",
":",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Running merge_mutect'",
")",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"input_files",
"=",
"{",
"filename",
":",
"jsid",
"for",
"perchrom_files",
"in",
"perchrom_rvs",
".",
"values",
"(",
")",
"for",
"filename",
",",
"jsid",
"in",
"perchrom_files",
".",
"items",
"(",
")",
"}",
"input_files",
"=",
"get_files_from_filestore",
"(",
"job",
",",
"input_files",
",",
"work_dir",
",",
"docker",
"=",
"False",
")",
"chromosomes",
"=",
"[",
"''",
".",
"join",
"(",
"[",
"'chr'",
",",
"str",
"(",
"x",
")",
"]",
")",
"for",
"x",
"in",
"range",
"(",
"1",
",",
"23",
")",
"+",
"[",
"'X'",
",",
"'Y'",
"]",
"]",
"with",
"open",
"(",
"'/'",
".",
"join",
"(",
"[",
"work_dir",
",",
"'mutect_calls.vcf'",
"]",
")",
",",
"'w'",
")",
"as",
"mutvcf",
",",
"open",
"(",
"'/'",
".",
"join",
"(",
"[",
"work_dir",
",",
"'mutect_calls.out'",
"]",
")",
",",
"'w'",
")",
"as",
"mutout",
",",
"open",
"(",
"'/'",
".",
"join",
"(",
"[",
"work_dir",
",",
"'mutect_passing_calls.vcf'",
"]",
")",
",",
"'w'",
")",
"as",
"mutpassvcf",
":",
"out_header_not_printed",
"=",
"True",
"for",
"chrom",
"in",
"chromosomes",
":",
"with",
"open",
"(",
"input_files",
"[",
"''",
".",
"join",
"(",
"[",
"'mutect_'",
",",
"chrom",
",",
"'.vcf'",
"]",
")",
"]",
",",
"'r'",
")",
"as",
"mutfile",
":",
"for",
"line",
"in",
"mutfile",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"line",
".",
"startswith",
"(",
"'#'",
")",
":",
"if",
"chrom",
"==",
"'chr1'",
":",
"print",
"(",
"line",
",",
"file",
"=",
"mutvcf",
")",
"print",
"(",
"line",
",",
"file",
"=",
"mutpassvcf",
")",
"continue",
"else",
":",
"print",
"(",
"line",
",",
"file",
"=",
"mutvcf",
")",
"line",
"=",
"line",
".",
"split",
"(",
"'\\t'",
")",
"if",
"line",
"[",
"6",
"]",
"!=",
"'REJECT'",
":",
"print",
"(",
"'\\t'",
".",
"join",
"(",
"line",
")",
",",
"file",
"=",
"mutpassvcf",
")",
"with",
"open",
"(",
"input_files",
"[",
"''",
".",
"join",
"(",
"[",
"'mutect_'",
",",
"chrom",
",",
"'.out'",
"]",
")",
"]",
",",
"'r'",
")",
"as",
"mutfile",
":",
"for",
"line",
"in",
"mutfile",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"line",
".",
"startswith",
"(",
"'#'",
")",
":",
"if",
"chrom",
"==",
"'chr1'",
":",
"print",
"(",
"line",
",",
"file",
"=",
"mutout",
")",
"continue",
"elif",
"out_header_not_printed",
":",
"print",
"(",
"line",
",",
"file",
"=",
"mutout",
")",
"out_header_not_printed",
"=",
"False",
"else",
":",
"print",
"(",
"line",
",",
"file",
"=",
"mutout",
")",
"output_file",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"mutpassvcf",
".",
"name",
")",
"return",
"output_file"
] | This module will merge the per-chromosome mutect files created by spawn_mutect into a genome
vcf. It will make 2 vcfs, one for PASSing non-germline calls, and one for all calls.
ARGUMENTS
1. perchrom_rvs: REFER RETURN VALUE of spawn_mutect()
RETURN VALUES
1. output_files: <JSid for mutect_passing_calls.vcf>
This module corresponds to node 11 on the tree | [
"This",
"module",
"will",
"merge",
"the",
"per",
"-",
"chromosome",
"mutect",
"files",
"created",
"by",
"spawn_mutect",
"into",
"a",
"genome",
"vcf",
".",
"It",
"will",
"make",
"2",
"vcfs",
"one",
"for",
"PASSing",
"non",
"-",
"germline",
"calls",
"and",
"one",
"for",
"all",
"calls",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/attic/ProTECT.py#L973-L1026 | train |
BD2KGenomics/protect | attic/ProTECT.py | run_mutect | def run_mutect(job, tumor_bam, normal_bam, univ_options, mutect_options, chrom):
"""
This module will run mutect on the DNA bams
ARGUMENTS
1. tumor_bam: REFER ARGUMENTS of spawn_mutect()
2. normal_bam: REFER ARGUMENTS of spawn_mutect()
3. univ_options: REFER ARGUMENTS of spawn_mutect()
4. mutect_options: REFER ARGUMENTS of spawn_mutect()
5. chrom: String containing chromosome name with chr appended
RETURN VALUES
1. output_files: Dict of results of mutect for chromosome
output_files
|- 'mutect_CHROM.vcf': <JSid>
+- 'mutect_CHROM.out': <JSid>
This module corresponds to node 12 on the tree
"""
job.fileStore.logToMaster('Running mutect on %s:%s' % (univ_options['patient'], chrom))
work_dir = job.fileStore.getLocalTempDir()
input_files = {
'tumor.bam': tumor_bam['tumor_dna_fix_pg_sorted.bam'],
'tumor.bam.bai': tumor_bam['tumor_dna_fix_pg_sorted.bam.bai'],
'normal.bam': normal_bam['normal_dna_fix_pg_sorted.bam'],
'normal.bam.bai': normal_bam['normal_dna_fix_pg_sorted.bam.bai'],
'genome.fa': mutect_options['genome_fasta'],
'genome.fa.fai': mutect_options['genome_fai'],
'genome.dict': mutect_options['genome_dict'],
'cosmic.vcf': mutect_options['cosmic_vcf'],
'cosmic.vcf.idx': mutect_options['cosmic_idx'],
'dbsnp.vcf': mutect_options['dbsnp_vcf'],
'dbsnp.vcf.idx': mutect_options['dbsnp_idx']}
input_files = get_files_from_filestore(job, input_files, work_dir,
docker=True)
mutout = ''.join([work_dir, '/mutect_', chrom, '.out'])
mutvcf = ''.join([work_dir, '/mutect_', chrom, '.vcf'])
parameters = ['-R', input_files['genome.fa'],
'--cosmic', input_files['cosmic.vcf'],
'--dbsnp', input_files['dbsnp.vcf'],
'--input_file:normal', input_files['normal.bam'],
'--input_file:tumor', input_files['tumor.bam'],
#'--tumor_lod', str(10),
#'--initial_tumor_lod', str(4.0),
'-L', chrom,
'--out', docker_path(mutout),
'--vcf', docker_path(mutvcf)
]
Xmx = mutect_options['java_Xmx'] if mutect_options['java_Xmx'] else univ_options['java_Xmx']
docker_call(tool='mutect:1.1.7', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], java_opts=Xmx)
output_files = defaultdict()
for mutect_file in [mutout, mutvcf]:
output_files[os.path.basename(mutect_file)] = job.fileStore.writeGlobalFile(mutect_file)
return output_files | python | def run_mutect(job, tumor_bam, normal_bam, univ_options, mutect_options, chrom):
"""
This module will run mutect on the DNA bams
ARGUMENTS
1. tumor_bam: REFER ARGUMENTS of spawn_mutect()
2. normal_bam: REFER ARGUMENTS of spawn_mutect()
3. univ_options: REFER ARGUMENTS of spawn_mutect()
4. mutect_options: REFER ARGUMENTS of spawn_mutect()
5. chrom: String containing chromosome name with chr appended
RETURN VALUES
1. output_files: Dict of results of mutect for chromosome
output_files
|- 'mutect_CHROM.vcf': <JSid>
+- 'mutect_CHROM.out': <JSid>
This module corresponds to node 12 on the tree
"""
job.fileStore.logToMaster('Running mutect on %s:%s' % (univ_options['patient'], chrom))
work_dir = job.fileStore.getLocalTempDir()
input_files = {
'tumor.bam': tumor_bam['tumor_dna_fix_pg_sorted.bam'],
'tumor.bam.bai': tumor_bam['tumor_dna_fix_pg_sorted.bam.bai'],
'normal.bam': normal_bam['normal_dna_fix_pg_sorted.bam'],
'normal.bam.bai': normal_bam['normal_dna_fix_pg_sorted.bam.bai'],
'genome.fa': mutect_options['genome_fasta'],
'genome.fa.fai': mutect_options['genome_fai'],
'genome.dict': mutect_options['genome_dict'],
'cosmic.vcf': mutect_options['cosmic_vcf'],
'cosmic.vcf.idx': mutect_options['cosmic_idx'],
'dbsnp.vcf': mutect_options['dbsnp_vcf'],
'dbsnp.vcf.idx': mutect_options['dbsnp_idx']}
input_files = get_files_from_filestore(job, input_files, work_dir,
docker=True)
mutout = ''.join([work_dir, '/mutect_', chrom, '.out'])
mutvcf = ''.join([work_dir, '/mutect_', chrom, '.vcf'])
parameters = ['-R', input_files['genome.fa'],
'--cosmic', input_files['cosmic.vcf'],
'--dbsnp', input_files['dbsnp.vcf'],
'--input_file:normal', input_files['normal.bam'],
'--input_file:tumor', input_files['tumor.bam'],
#'--tumor_lod', str(10),
#'--initial_tumor_lod', str(4.0),
'-L', chrom,
'--out', docker_path(mutout),
'--vcf', docker_path(mutvcf)
]
Xmx = mutect_options['java_Xmx'] if mutect_options['java_Xmx'] else univ_options['java_Xmx']
docker_call(tool='mutect:1.1.7', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], java_opts=Xmx)
output_files = defaultdict()
for mutect_file in [mutout, mutvcf]:
output_files[os.path.basename(mutect_file)] = job.fileStore.writeGlobalFile(mutect_file)
return output_files | [
"def",
"run_mutect",
"(",
"job",
",",
"tumor_bam",
",",
"normal_bam",
",",
"univ_options",
",",
"mutect_options",
",",
"chrom",
")",
":",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Running mutect on %s:%s'",
"%",
"(",
"univ_options",
"[",
"'patient'",
"]",
",",
"chrom",
")",
")",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"input_files",
"=",
"{",
"'tumor.bam'",
":",
"tumor_bam",
"[",
"'tumor_dna_fix_pg_sorted.bam'",
"]",
",",
"'tumor.bam.bai'",
":",
"tumor_bam",
"[",
"'tumor_dna_fix_pg_sorted.bam.bai'",
"]",
",",
"'normal.bam'",
":",
"normal_bam",
"[",
"'normal_dna_fix_pg_sorted.bam'",
"]",
",",
"'normal.bam.bai'",
":",
"normal_bam",
"[",
"'normal_dna_fix_pg_sorted.bam.bai'",
"]",
",",
"'genome.fa'",
":",
"mutect_options",
"[",
"'genome_fasta'",
"]",
",",
"'genome.fa.fai'",
":",
"mutect_options",
"[",
"'genome_fai'",
"]",
",",
"'genome.dict'",
":",
"mutect_options",
"[",
"'genome_dict'",
"]",
",",
"'cosmic.vcf'",
":",
"mutect_options",
"[",
"'cosmic_vcf'",
"]",
",",
"'cosmic.vcf.idx'",
":",
"mutect_options",
"[",
"'cosmic_idx'",
"]",
",",
"'dbsnp.vcf'",
":",
"mutect_options",
"[",
"'dbsnp_vcf'",
"]",
",",
"'dbsnp.vcf.idx'",
":",
"mutect_options",
"[",
"'dbsnp_idx'",
"]",
"}",
"input_files",
"=",
"get_files_from_filestore",
"(",
"job",
",",
"input_files",
",",
"work_dir",
",",
"docker",
"=",
"True",
")",
"mutout",
"=",
"''",
".",
"join",
"(",
"[",
"work_dir",
",",
"'/mutect_'",
",",
"chrom",
",",
"'.out'",
"]",
")",
"mutvcf",
"=",
"''",
".",
"join",
"(",
"[",
"work_dir",
",",
"'/mutect_'",
",",
"chrom",
",",
"'.vcf'",
"]",
")",
"parameters",
"=",
"[",
"'-R'",
",",
"input_files",
"[",
"'genome.fa'",
"]",
",",
"'--cosmic'",
",",
"input_files",
"[",
"'cosmic.vcf'",
"]",
",",
"'--dbsnp'",
",",
"input_files",
"[",
"'dbsnp.vcf'",
"]",
",",
"'--input_file:normal'",
",",
"input_files",
"[",
"'normal.bam'",
"]",
",",
"'--input_file:tumor'",
",",
"input_files",
"[",
"'tumor.bam'",
"]",
",",
"'-L'",
",",
"chrom",
",",
"'--out'",
",",
"docker_path",
"(",
"mutout",
")",
",",
"'--vcf'",
",",
"docker_path",
"(",
"mutvcf",
")",
"]",
"Xmx",
"=",
"mutect_options",
"[",
"'java_Xmx'",
"]",
"if",
"mutect_options",
"[",
"'java_Xmx'",
"]",
"else",
"univ_options",
"[",
"'java_Xmx'",
"]",
"docker_call",
"(",
"tool",
"=",
"'mutect:1.1.7'",
",",
"tool_parameters",
"=",
"parameters",
",",
"work_dir",
"=",
"work_dir",
",",
"dockerhub",
"=",
"univ_options",
"[",
"'dockerhub'",
"]",
",",
"java_opts",
"=",
"Xmx",
")",
"output_files",
"=",
"defaultdict",
"(",
")",
"for",
"mutect_file",
"in",
"[",
"mutout",
",",
"mutvcf",
"]",
":",
"output_files",
"[",
"os",
".",
"path",
".",
"basename",
"(",
"mutect_file",
")",
"]",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"mutect_file",
")",
"return",
"output_files"
] | This module will run mutect on the DNA bams
ARGUMENTS
1. tumor_bam: REFER ARGUMENTS of spawn_mutect()
2. normal_bam: REFER ARGUMENTS of spawn_mutect()
3. univ_options: REFER ARGUMENTS of spawn_mutect()
4. mutect_options: REFER ARGUMENTS of spawn_mutect()
5. chrom: String containing chromosome name with chr appended
RETURN VALUES
1. output_files: Dict of results of mutect for chromosome
output_files
|- 'mutect_CHROM.vcf': <JSid>
+- 'mutect_CHROM.out': <JSid>
This module corresponds to node 12 on the tree | [
"This",
"module",
"will",
"run",
"mutect",
"on",
"the",
"DNA",
"bams"
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/attic/ProTECT.py#L1029-L1083 | train |
BD2KGenomics/protect | attic/ProTECT.py | run_indel_caller | def run_indel_caller(job, tumor_bam, normal_bam, univ_options, indel_options):
"""
This module will run an indel caller on the DNA bams. This module will be
implemented in the future.
This module corresponds to node 13 on the tree
"""
job.fileStore.logToMaster('Running INDEL on %s' % univ_options['patient'])
indel_file = job.fileStore.getLocalTempFile()
output_file = job.fileStore.writeGlobalFile(indel_file)
return output_file | python | def run_indel_caller(job, tumor_bam, normal_bam, univ_options, indel_options):
"""
This module will run an indel caller on the DNA bams. This module will be
implemented in the future.
This module corresponds to node 13 on the tree
"""
job.fileStore.logToMaster('Running INDEL on %s' % univ_options['patient'])
indel_file = job.fileStore.getLocalTempFile()
output_file = job.fileStore.writeGlobalFile(indel_file)
return output_file | [
"def",
"run_indel_caller",
"(",
"job",
",",
"tumor_bam",
",",
"normal_bam",
",",
"univ_options",
",",
"indel_options",
")",
":",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Running INDEL on %s'",
"%",
"univ_options",
"[",
"'patient'",
"]",
")",
"indel_file",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempFile",
"(",
")",
"output_file",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"indel_file",
")",
"return",
"output_file"
] | This module will run an indel caller on the DNA bams. This module will be
implemented in the future.
This module corresponds to node 13 on the tree | [
"This",
"module",
"will",
"run",
"an",
"indel",
"caller",
"on",
"the",
"DNA",
"bams",
".",
"This",
"module",
"will",
"be",
"implemented",
"in",
"the",
"future",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/attic/ProTECT.py#L1086-L1096 | train |
BD2KGenomics/protect | attic/ProTECT.py | run_fusion_caller | def run_fusion_caller(job, star_bam, univ_options, fusion_options):
"""
This module will run a fusion caller on DNA bams. This module will be
implemented in the future.
This module corresponds to node 10 on the tree
"""
job.fileStore.logToMaster('Running FUSION on %s' % univ_options['patient'])
fusion_file = job.fileStore.getLocalTempFile()
output_file = job.fileStore.writeGlobalFile(fusion_file)
return output_file | python | def run_fusion_caller(job, star_bam, univ_options, fusion_options):
"""
This module will run a fusion caller on DNA bams. This module will be
implemented in the future.
This module corresponds to node 10 on the tree
"""
job.fileStore.logToMaster('Running FUSION on %s' % univ_options['patient'])
fusion_file = job.fileStore.getLocalTempFile()
output_file = job.fileStore.writeGlobalFile(fusion_file)
return output_file | [
"def",
"run_fusion_caller",
"(",
"job",
",",
"star_bam",
",",
"univ_options",
",",
"fusion_options",
")",
":",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Running FUSION on %s'",
"%",
"univ_options",
"[",
"'patient'",
"]",
")",
"fusion_file",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempFile",
"(",
")",
"output_file",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"fusion_file",
")",
"return",
"output_file"
] | This module will run a fusion caller on DNA bams. This module will be
implemented in the future.
This module corresponds to node 10 on the tree | [
"This",
"module",
"will",
"run",
"a",
"fusion",
"caller",
"on",
"DNA",
"bams",
".",
"This",
"module",
"will",
"be",
"implemented",
"in",
"the",
"future",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/attic/ProTECT.py#L1099-L1109 | train |
BD2KGenomics/protect | attic/ProTECT.py | run_mutation_aggregator | def run_mutation_aggregator(job, fusion_output, radia_output, mutect_output, indel_output,
univ_options):
"""
This module will aggregate all the mutations called in the previous steps and will then call
snpeff on the results.
ARGUMENTS
1. fusion_output: <JSid for vcf generated by the fusion caller>
2. radia_output: <JSid for vcf generated by radia>
3. mutect_output: <JSid for vcf generated by mutect>
4. indel_output: <JSid for vcf generated by the indel caller>
RETURN VALUES
1. output_file: <JSid for merged vcf>
This module corresponds to node 15 on the tree
"""
job.fileStore.logToMaster('Aggregating mutations for %s' % univ_options['patient'])
work_dir = job.fileStore.getLocalTempDir()
input_files = {
'mutect.vcf': mutect_output,
'radia.vcf': radia_output['radia_parsed_filter_passing_calls.vcf'],
'indel.vcf': indel_output,
'fusion.vcf': fusion_output}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
# Modify these once INDELs and Fusions are implemented
input_files.pop('indel.vcf')
input_files.pop('fusion.vcf')
# read files into memory
vcf_file = defaultdict()
mutcallers = input_files.keys()
with open(''.join([work_dir, '/', univ_options['patient'], '_merged_mutations.vcf']),
'w') as merged_mut_file:
for mut_caller in mutcallers:
caller = mut_caller.rstrip('.vcf')
vcf_file[caller] = defaultdict()
with open(input_files[mut_caller], 'r') as mutfile:
for line in mutfile:
if line.startswith('#'):
if caller == 'radia':
print(line.strip(), file=merged_mut_file)
continue
line = line.strip().split()
vcf_file[caller][(line[0], line[1], line[3], line[4])] = line
# This method can be changed in the future to incorporate more callers and
# fancier integration methods
merge_vcfs(vcf_file, merged_mut_file.name)
export_results(merged_mut_file.name, univ_options)
output_file = job.fileStore.writeGlobalFile(merged_mut_file.name)
return output_file | python | def run_mutation_aggregator(job, fusion_output, radia_output, mutect_output, indel_output,
univ_options):
"""
This module will aggregate all the mutations called in the previous steps and will then call
snpeff on the results.
ARGUMENTS
1. fusion_output: <JSid for vcf generated by the fusion caller>
2. radia_output: <JSid for vcf generated by radia>
3. mutect_output: <JSid for vcf generated by mutect>
4. indel_output: <JSid for vcf generated by the indel caller>
RETURN VALUES
1. output_file: <JSid for merged vcf>
This module corresponds to node 15 on the tree
"""
job.fileStore.logToMaster('Aggregating mutations for %s' % univ_options['patient'])
work_dir = job.fileStore.getLocalTempDir()
input_files = {
'mutect.vcf': mutect_output,
'radia.vcf': radia_output['radia_parsed_filter_passing_calls.vcf'],
'indel.vcf': indel_output,
'fusion.vcf': fusion_output}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
# Modify these once INDELs and Fusions are implemented
input_files.pop('indel.vcf')
input_files.pop('fusion.vcf')
# read files into memory
vcf_file = defaultdict()
mutcallers = input_files.keys()
with open(''.join([work_dir, '/', univ_options['patient'], '_merged_mutations.vcf']),
'w') as merged_mut_file:
for mut_caller in mutcallers:
caller = mut_caller.rstrip('.vcf')
vcf_file[caller] = defaultdict()
with open(input_files[mut_caller], 'r') as mutfile:
for line in mutfile:
if line.startswith('#'):
if caller == 'radia':
print(line.strip(), file=merged_mut_file)
continue
line = line.strip().split()
vcf_file[caller][(line[0], line[1], line[3], line[4])] = line
# This method can be changed in the future to incorporate more callers and
# fancier integration methods
merge_vcfs(vcf_file, merged_mut_file.name)
export_results(merged_mut_file.name, univ_options)
output_file = job.fileStore.writeGlobalFile(merged_mut_file.name)
return output_file | [
"def",
"run_mutation_aggregator",
"(",
"job",
",",
"fusion_output",
",",
"radia_output",
",",
"mutect_output",
",",
"indel_output",
",",
"univ_options",
")",
":",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Aggregating mutations for %s'",
"%",
"univ_options",
"[",
"'patient'",
"]",
")",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"input_files",
"=",
"{",
"'mutect.vcf'",
":",
"mutect_output",
",",
"'radia.vcf'",
":",
"radia_output",
"[",
"'radia_parsed_filter_passing_calls.vcf'",
"]",
",",
"'indel.vcf'",
":",
"indel_output",
",",
"'fusion.vcf'",
":",
"fusion_output",
"}",
"input_files",
"=",
"get_files_from_filestore",
"(",
"job",
",",
"input_files",
",",
"work_dir",
",",
"docker",
"=",
"False",
")",
"input_files",
".",
"pop",
"(",
"'indel.vcf'",
")",
"input_files",
".",
"pop",
"(",
"'fusion.vcf'",
")",
"vcf_file",
"=",
"defaultdict",
"(",
")",
"mutcallers",
"=",
"input_files",
".",
"keys",
"(",
")",
"with",
"open",
"(",
"''",
".",
"join",
"(",
"[",
"work_dir",
",",
"'/'",
",",
"univ_options",
"[",
"'patient'",
"]",
",",
"'_merged_mutations.vcf'",
"]",
")",
",",
"'w'",
")",
"as",
"merged_mut_file",
":",
"for",
"mut_caller",
"in",
"mutcallers",
":",
"caller",
"=",
"mut_caller",
".",
"rstrip",
"(",
"'.vcf'",
")",
"vcf_file",
"[",
"caller",
"]",
"=",
"defaultdict",
"(",
")",
"with",
"open",
"(",
"input_files",
"[",
"mut_caller",
"]",
",",
"'r'",
")",
"as",
"mutfile",
":",
"for",
"line",
"in",
"mutfile",
":",
"if",
"line",
".",
"startswith",
"(",
"'#'",
")",
":",
"if",
"caller",
"==",
"'radia'",
":",
"print",
"(",
"line",
".",
"strip",
"(",
")",
",",
"file",
"=",
"merged_mut_file",
")",
"continue",
"line",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"vcf_file",
"[",
"caller",
"]",
"[",
"(",
"line",
"[",
"0",
"]",
",",
"line",
"[",
"1",
"]",
",",
"line",
"[",
"3",
"]",
",",
"line",
"[",
"4",
"]",
")",
"]",
"=",
"line",
"merge_vcfs",
"(",
"vcf_file",
",",
"merged_mut_file",
".",
"name",
")",
"export_results",
"(",
"merged_mut_file",
".",
"name",
",",
"univ_options",
")",
"output_file",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"merged_mut_file",
".",
"name",
")",
"return",
"output_file"
] | This module will aggregate all the mutations called in the previous steps and will then call
snpeff on the results.
ARGUMENTS
1. fusion_output: <JSid for vcf generated by the fusion caller>
2. radia_output: <JSid for vcf generated by radia>
3. mutect_output: <JSid for vcf generated by mutect>
4. indel_output: <JSid for vcf generated by the indel caller>
RETURN VALUES
1. output_file: <JSid for merged vcf>
This module corresponds to node 15 on the tree | [
"This",
"module",
"will",
"aggregate",
"all",
"the",
"mutations",
"called",
"in",
"the",
"previous",
"steps",
"and",
"will",
"then",
"call",
"snpeff",
"on",
"the",
"results",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/attic/ProTECT.py#L1112-L1161 | train |
BD2KGenomics/protect | attic/ProTECT.py | run_snpeff | def run_snpeff(job, merged_mutation_file, univ_options, snpeff_options):
"""
This module will run snpeff on the aggregated mutation calls. Currently the only mutations
called are SNPs hence SnpEff suffices. This node will be replaced in the future with another
translator.
ARGUMENTS
1. merged_mutation_file: <JSid for merged vcf>
2. univ_options: Dict of universal arguments used by almost all tools
univ_options
+- 'dockerhub': <dockerhub to use>
3. snpeff_options: Dict of parameters specific to snpeff
snpeff_options
+- 'index_tar': <JSid for the snpEff index tarball>
RETURN VALUES
1. output_file: <JSid for the snpeffed vcf>
This node corresponds to node 16 on the tree
"""
job.fileStore.logToMaster('Running snpeff on %s' % univ_options['patient'])
work_dir = job.fileStore.getLocalTempDir()
input_files = {
'merged_mutations.vcf': merged_mutation_file,
'snpeff_index.tar.gz': snpeff_options['index_tar']}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=True)
parameters = ['eff',
'-dataDir', input_files['snpeff_index'],
'-c', '/'.join([input_files['snpeff_index'], 'snpEff_hg19_gencode.config']),
'-no-intergenic',
'-no-downstream',
'-no-upstream',
#'-canon',
'-noStats',
'hg19_gencode',
input_files['merged_mutations.vcf']]
Xmx = snpeff_options['java_Xmx'] if snpeff_options['java_Xmx'] else univ_options['java_Xmx']
with open('/'.join([work_dir, 'snpeffed_mutations.vcf']), 'w') as snpeff_file:
docker_call(tool='snpeff', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], java_opts=Xmx, outfile=snpeff_file)
output_file = job.fileStore.writeGlobalFile(snpeff_file.name)
return output_file | python | def run_snpeff(job, merged_mutation_file, univ_options, snpeff_options):
"""
This module will run snpeff on the aggregated mutation calls. Currently the only mutations
called are SNPs hence SnpEff suffices. This node will be replaced in the future with another
translator.
ARGUMENTS
1. merged_mutation_file: <JSid for merged vcf>
2. univ_options: Dict of universal arguments used by almost all tools
univ_options
+- 'dockerhub': <dockerhub to use>
3. snpeff_options: Dict of parameters specific to snpeff
snpeff_options
+- 'index_tar': <JSid for the snpEff index tarball>
RETURN VALUES
1. output_file: <JSid for the snpeffed vcf>
This node corresponds to node 16 on the tree
"""
job.fileStore.logToMaster('Running snpeff on %s' % univ_options['patient'])
work_dir = job.fileStore.getLocalTempDir()
input_files = {
'merged_mutations.vcf': merged_mutation_file,
'snpeff_index.tar.gz': snpeff_options['index_tar']}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=True)
parameters = ['eff',
'-dataDir', input_files['snpeff_index'],
'-c', '/'.join([input_files['snpeff_index'], 'snpEff_hg19_gencode.config']),
'-no-intergenic',
'-no-downstream',
'-no-upstream',
#'-canon',
'-noStats',
'hg19_gencode',
input_files['merged_mutations.vcf']]
Xmx = snpeff_options['java_Xmx'] if snpeff_options['java_Xmx'] else univ_options['java_Xmx']
with open('/'.join([work_dir, 'snpeffed_mutations.vcf']), 'w') as snpeff_file:
docker_call(tool='snpeff', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], java_opts=Xmx, outfile=snpeff_file)
output_file = job.fileStore.writeGlobalFile(snpeff_file.name)
return output_file | [
"def",
"run_snpeff",
"(",
"job",
",",
"merged_mutation_file",
",",
"univ_options",
",",
"snpeff_options",
")",
":",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Running snpeff on %s'",
"%",
"univ_options",
"[",
"'patient'",
"]",
")",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"input_files",
"=",
"{",
"'merged_mutations.vcf'",
":",
"merged_mutation_file",
",",
"'snpeff_index.tar.gz'",
":",
"snpeff_options",
"[",
"'index_tar'",
"]",
"}",
"input_files",
"=",
"get_files_from_filestore",
"(",
"job",
",",
"input_files",
",",
"work_dir",
",",
"docker",
"=",
"True",
")",
"parameters",
"=",
"[",
"'eff'",
",",
"'-dataDir'",
",",
"input_files",
"[",
"'snpeff_index'",
"]",
",",
"'-c'",
",",
"'/'",
".",
"join",
"(",
"[",
"input_files",
"[",
"'snpeff_index'",
"]",
",",
"'snpEff_hg19_gencode.config'",
"]",
")",
",",
"'-no-intergenic'",
",",
"'-no-downstream'",
",",
"'-no-upstream'",
",",
"'-noStats'",
",",
"'hg19_gencode'",
",",
"input_files",
"[",
"'merged_mutations.vcf'",
"]",
"]",
"Xmx",
"=",
"snpeff_options",
"[",
"'java_Xmx'",
"]",
"if",
"snpeff_options",
"[",
"'java_Xmx'",
"]",
"else",
"univ_options",
"[",
"'java_Xmx'",
"]",
"with",
"open",
"(",
"'/'",
".",
"join",
"(",
"[",
"work_dir",
",",
"'snpeffed_mutations.vcf'",
"]",
")",
",",
"'w'",
")",
"as",
"snpeff_file",
":",
"docker_call",
"(",
"tool",
"=",
"'snpeff'",
",",
"tool_parameters",
"=",
"parameters",
",",
"work_dir",
"=",
"work_dir",
",",
"dockerhub",
"=",
"univ_options",
"[",
"'dockerhub'",
"]",
",",
"java_opts",
"=",
"Xmx",
",",
"outfile",
"=",
"snpeff_file",
")",
"output_file",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"snpeff_file",
".",
"name",
")",
"return",
"output_file"
] | This module will run snpeff on the aggregated mutation calls. Currently the only mutations
called are SNPs hence SnpEff suffices. This node will be replaced in the future with another
translator.
ARGUMENTS
1. merged_mutation_file: <JSid for merged vcf>
2. univ_options: Dict of universal arguments used by almost all tools
univ_options
+- 'dockerhub': <dockerhub to use>
3. snpeff_options: Dict of parameters specific to snpeff
snpeff_options
+- 'index_tar': <JSid for the snpEff index tarball>
RETURN VALUES
1. output_file: <JSid for the snpeffed vcf>
This node corresponds to node 16 on the tree | [
"This",
"module",
"will",
"run",
"snpeff",
"on",
"the",
"aggregated",
"mutation",
"calls",
".",
"Currently",
"the",
"only",
"mutations",
"called",
"are",
"SNPs",
"hence",
"SnpEff",
"suffices",
".",
"This",
"node",
"will",
"be",
"replaced",
"in",
"the",
"future",
"with",
"another",
"translator",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/attic/ProTECT.py#L1164-L1205 | train |
BD2KGenomics/protect | attic/ProTECT.py | run_transgene | def run_transgene(job, snpeffed_file, univ_options, transgene_options):
"""
This module will run transgene on the input vcf file from the aggregator and produce the
peptides for MHC prediction
ARGUMENTS
1. snpeffed_file: <JSid for snpeffed vcf>
2. univ_options: Dict of universal arguments used by almost all tools
univ_options
+- 'dockerhub': <dockerhub to use>
3. transgene_options: Dict of parameters specific to transgene
transgene_options
+- 'gencode_peptide_fasta': <JSid for the gencode protein fasta>
RETURN VALUES
1. output_files: Dict of transgened n-mer peptide fastas
output_files
|- 'transgened_tumor_9_mer_snpeffed.faa': <JSid>
|- 'transgened_tumor_10_mer_snpeffed.faa': <JSid>
+- 'transgened_tumor_15_mer_snpeffed.faa': <JSid>
This module corresponds to node 17 on the tree
"""
job.fileStore.logToMaster('Running transgene on %s' % univ_options['patient'])
work_dir = job.fileStore.getLocalTempDir()
input_files = {
'snpeffed_muts.vcf': snpeffed_file,
'pepts.fa': transgene_options['gencode_peptide_fasta']}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=True)
parameters = ['--peptides', input_files['pepts.fa'],
'--snpeff', input_files['snpeffed_muts.vcf'],
'--prefix', 'transgened',
'--pep_lens', '9,10,15']
docker_call(tool='transgene', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'])
output_files = defaultdict()
for peplen in ['9', '10', '15']:
peptfile = '_'.join(['transgened_tumor', peplen, 'mer_snpeffed.faa'])
mapfile = '_'.join(['transgened_tumor', peplen, 'mer_snpeffed.faa.map'])
output_files[peptfile] = job.fileStore.writeGlobalFile(os.path.join(work_dir, peptfile))
output_files[mapfile] = job.fileStore.writeGlobalFile(os.path.join(work_dir, mapfile))
return output_files | python | def run_transgene(job, snpeffed_file, univ_options, transgene_options):
"""
This module will run transgene on the input vcf file from the aggregator and produce the
peptides for MHC prediction
ARGUMENTS
1. snpeffed_file: <JSid for snpeffed vcf>
2. univ_options: Dict of universal arguments used by almost all tools
univ_options
+- 'dockerhub': <dockerhub to use>
3. transgene_options: Dict of parameters specific to transgene
transgene_options
+- 'gencode_peptide_fasta': <JSid for the gencode protein fasta>
RETURN VALUES
1. output_files: Dict of transgened n-mer peptide fastas
output_files
|- 'transgened_tumor_9_mer_snpeffed.faa': <JSid>
|- 'transgened_tumor_10_mer_snpeffed.faa': <JSid>
+- 'transgened_tumor_15_mer_snpeffed.faa': <JSid>
This module corresponds to node 17 on the tree
"""
job.fileStore.logToMaster('Running transgene on %s' % univ_options['patient'])
work_dir = job.fileStore.getLocalTempDir()
input_files = {
'snpeffed_muts.vcf': snpeffed_file,
'pepts.fa': transgene_options['gencode_peptide_fasta']}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=True)
parameters = ['--peptides', input_files['pepts.fa'],
'--snpeff', input_files['snpeffed_muts.vcf'],
'--prefix', 'transgened',
'--pep_lens', '9,10,15']
docker_call(tool='transgene', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'])
output_files = defaultdict()
for peplen in ['9', '10', '15']:
peptfile = '_'.join(['transgened_tumor', peplen, 'mer_snpeffed.faa'])
mapfile = '_'.join(['transgened_tumor', peplen, 'mer_snpeffed.faa.map'])
output_files[peptfile] = job.fileStore.writeGlobalFile(os.path.join(work_dir, peptfile))
output_files[mapfile] = job.fileStore.writeGlobalFile(os.path.join(work_dir, mapfile))
return output_files | [
"def",
"run_transgene",
"(",
"job",
",",
"snpeffed_file",
",",
"univ_options",
",",
"transgene_options",
")",
":",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Running transgene on %s'",
"%",
"univ_options",
"[",
"'patient'",
"]",
")",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"input_files",
"=",
"{",
"'snpeffed_muts.vcf'",
":",
"snpeffed_file",
",",
"'pepts.fa'",
":",
"transgene_options",
"[",
"'gencode_peptide_fasta'",
"]",
"}",
"input_files",
"=",
"get_files_from_filestore",
"(",
"job",
",",
"input_files",
",",
"work_dir",
",",
"docker",
"=",
"True",
")",
"parameters",
"=",
"[",
"'--peptides'",
",",
"input_files",
"[",
"'pepts.fa'",
"]",
",",
"'--snpeff'",
",",
"input_files",
"[",
"'snpeffed_muts.vcf'",
"]",
",",
"'--prefix'",
",",
"'transgened'",
",",
"'--pep_lens'",
",",
"'9,10,15'",
"]",
"docker_call",
"(",
"tool",
"=",
"'transgene'",
",",
"tool_parameters",
"=",
"parameters",
",",
"work_dir",
"=",
"work_dir",
",",
"dockerhub",
"=",
"univ_options",
"[",
"'dockerhub'",
"]",
")",
"output_files",
"=",
"defaultdict",
"(",
")",
"for",
"peplen",
"in",
"[",
"'9'",
",",
"'10'",
",",
"'15'",
"]",
":",
"peptfile",
"=",
"'_'",
".",
"join",
"(",
"[",
"'transgened_tumor'",
",",
"peplen",
",",
"'mer_snpeffed.faa'",
"]",
")",
"mapfile",
"=",
"'_'",
".",
"join",
"(",
"[",
"'transgened_tumor'",
",",
"peplen",
",",
"'mer_snpeffed.faa.map'",
"]",
")",
"output_files",
"[",
"peptfile",
"]",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"peptfile",
")",
")",
"output_files",
"[",
"mapfile",
"]",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"mapfile",
")",
")",
"return",
"output_files"
] | This module will run transgene on the input vcf file from the aggregator and produce the
peptides for MHC prediction
ARGUMENTS
1. snpeffed_file: <JSid for snpeffed vcf>
2. univ_options: Dict of universal arguments used by almost all tools
univ_options
+- 'dockerhub': <dockerhub to use>
3. transgene_options: Dict of parameters specific to transgene
transgene_options
+- 'gencode_peptide_fasta': <JSid for the gencode protein fasta>
RETURN VALUES
1. output_files: Dict of transgened n-mer peptide fastas
output_files
|- 'transgened_tumor_9_mer_snpeffed.faa': <JSid>
|- 'transgened_tumor_10_mer_snpeffed.faa': <JSid>
+- 'transgened_tumor_15_mer_snpeffed.faa': <JSid>
This module corresponds to node 17 on the tree | [
"This",
"module",
"will",
"run",
"transgene",
"on",
"the",
"input",
"vcf",
"file",
"from",
"the",
"aggregator",
"and",
"produce",
"the",
"peptides",
"for",
"MHC",
"prediction"
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/attic/ProTECT.py#L1208-L1249 | train |
BD2KGenomics/protect | attic/ProTECT.py | run_phlat | def run_phlat(job, fastqs, sample_type, univ_options, phlat_options):
"""
This module will run PHLAT on SAMPLE_TYPE fastqs.
ARGUMENTS -- <ST> depicts the sample type. Substitute with 'tumor_dna',
'normal_dna', or 'tumor_rna'
1. fastqs: Dict of list of input WGS/WXS fastqs
fastqs
+- '<ST>': [<JSid for 1.fastq> , <JSid for 2.fastq>]
2. sample_type: string of 'tumor' or 'normal'
3. univ_options: Dict of universal arguments used by almost all tools
univ_options
+- 'dockerhub': <dockerhub to use>
4. phlat_options: Dict of parameters specific to phlat
phlat_options
|- 'index_tar': <JSid for the PHLAT index tarball>
+- 'n': <number of threads to allocate>
RETURN VALUES
1. output_file: <JSid for the allele predictions for ST>
This module corresponds to nodes 5, 6 and 7 on the tree
"""
job.fileStore.logToMaster('Running phlat on %s:%s' % (univ_options['patient'], sample_type))
work_dir = job.fileStore.getLocalTempDir()
fq_extn = '.gz' if fastqs['gzipped'] else ''
input_files = {
'input_1.fastq' + fq_extn: fastqs[sample_type][0],
'input_2.fastq' + fq_extn: fastqs[sample_type][1],
'phlat_index.tar.gz': phlat_options['index_tar']}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=True)
parameters = ['-1', input_files['input_1.fastq'],
'-2', input_files['input_2.fastq'],
'-index', input_files['phlat_index'],
'-b2url', '/usr/local/bin/bowtie2',
'-tag', sample_type,
'-e', '/home/phlat-1.0', # Phlat directory home
'-o', '/data', # Output directory
'-p', str(phlat_options['n'])] # Number of threads
docker_call(tool='phlat', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'])
output_file = job.fileStore.writeGlobalFile(''.join([work_dir, '/', sample_type, '_HLA.sum']))
return output_file | python | def run_phlat(job, fastqs, sample_type, univ_options, phlat_options):
"""
This module will run PHLAT on SAMPLE_TYPE fastqs.
ARGUMENTS -- <ST> depicts the sample type. Substitute with 'tumor_dna',
'normal_dna', or 'tumor_rna'
1. fastqs: Dict of list of input WGS/WXS fastqs
fastqs
+- '<ST>': [<JSid for 1.fastq> , <JSid for 2.fastq>]
2. sample_type: string of 'tumor' or 'normal'
3. univ_options: Dict of universal arguments used by almost all tools
univ_options
+- 'dockerhub': <dockerhub to use>
4. phlat_options: Dict of parameters specific to phlat
phlat_options
|- 'index_tar': <JSid for the PHLAT index tarball>
+- 'n': <number of threads to allocate>
RETURN VALUES
1. output_file: <JSid for the allele predictions for ST>
This module corresponds to nodes 5, 6 and 7 on the tree
"""
job.fileStore.logToMaster('Running phlat on %s:%s' % (univ_options['patient'], sample_type))
work_dir = job.fileStore.getLocalTempDir()
fq_extn = '.gz' if fastqs['gzipped'] else ''
input_files = {
'input_1.fastq' + fq_extn: fastqs[sample_type][0],
'input_2.fastq' + fq_extn: fastqs[sample_type][1],
'phlat_index.tar.gz': phlat_options['index_tar']}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=True)
parameters = ['-1', input_files['input_1.fastq'],
'-2', input_files['input_2.fastq'],
'-index', input_files['phlat_index'],
'-b2url', '/usr/local/bin/bowtie2',
'-tag', sample_type,
'-e', '/home/phlat-1.0', # Phlat directory home
'-o', '/data', # Output directory
'-p', str(phlat_options['n'])] # Number of threads
docker_call(tool='phlat', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'])
output_file = job.fileStore.writeGlobalFile(''.join([work_dir, '/', sample_type, '_HLA.sum']))
return output_file | [
"def",
"run_phlat",
"(",
"job",
",",
"fastqs",
",",
"sample_type",
",",
"univ_options",
",",
"phlat_options",
")",
":",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Running phlat on %s:%s'",
"%",
"(",
"univ_options",
"[",
"'patient'",
"]",
",",
"sample_type",
")",
")",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"fq_extn",
"=",
"'.gz'",
"if",
"fastqs",
"[",
"'gzipped'",
"]",
"else",
"''",
"input_files",
"=",
"{",
"'input_1.fastq'",
"+",
"fq_extn",
":",
"fastqs",
"[",
"sample_type",
"]",
"[",
"0",
"]",
",",
"'input_2.fastq'",
"+",
"fq_extn",
":",
"fastqs",
"[",
"sample_type",
"]",
"[",
"1",
"]",
",",
"'phlat_index.tar.gz'",
":",
"phlat_options",
"[",
"'index_tar'",
"]",
"}",
"input_files",
"=",
"get_files_from_filestore",
"(",
"job",
",",
"input_files",
",",
"work_dir",
",",
"docker",
"=",
"True",
")",
"parameters",
"=",
"[",
"'-1'",
",",
"input_files",
"[",
"'input_1.fastq'",
"]",
",",
"'-2'",
",",
"input_files",
"[",
"'input_2.fastq'",
"]",
",",
"'-index'",
",",
"input_files",
"[",
"'phlat_index'",
"]",
",",
"'-b2url'",
",",
"'/usr/local/bin/bowtie2'",
",",
"'-tag'",
",",
"sample_type",
",",
"'-e'",
",",
"'/home/phlat-1.0'",
",",
"'-o'",
",",
"'/data'",
",",
"'-p'",
",",
"str",
"(",
"phlat_options",
"[",
"'n'",
"]",
")",
"]",
"docker_call",
"(",
"tool",
"=",
"'phlat'",
",",
"tool_parameters",
"=",
"parameters",
",",
"work_dir",
"=",
"work_dir",
",",
"dockerhub",
"=",
"univ_options",
"[",
"'dockerhub'",
"]",
")",
"output_file",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"''",
".",
"join",
"(",
"[",
"work_dir",
",",
"'/'",
",",
"sample_type",
",",
"'_HLA.sum'",
"]",
")",
")",
"return",
"output_file"
] | This module will run PHLAT on SAMPLE_TYPE fastqs.
ARGUMENTS -- <ST> depicts the sample type. Substitute with 'tumor_dna',
'normal_dna', or 'tumor_rna'
1. fastqs: Dict of list of input WGS/WXS fastqs
fastqs
+- '<ST>': [<JSid for 1.fastq> , <JSid for 2.fastq>]
2. sample_type: string of 'tumor' or 'normal'
3. univ_options: Dict of universal arguments used by almost all tools
univ_options
+- 'dockerhub': <dockerhub to use>
4. phlat_options: Dict of parameters specific to phlat
phlat_options
|- 'index_tar': <JSid for the PHLAT index tarball>
+- 'n': <number of threads to allocate>
RETURN VALUES
1. output_file: <JSid for the allele predictions for ST>
This module corresponds to nodes 5, 6 and 7 on the tree | [
"This",
"module",
"will",
"run",
"PHLAT",
"on",
"SAMPLE_TYPE",
"fastqs",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/attic/ProTECT.py#L1252-L1294 | train |
BD2KGenomics/protect | attic/ProTECT.py | merge_phlat_calls | def merge_phlat_calls(job, tumor_phlat, normal_phlat, rna_phlat):
"""
This module will merge the results form running PHLAT on the 3 input fastq
pairs.
ARGUMENTS
1. tumor_phlat: <JSid for tumor DNA called alleles>
2. normal_phlat: <JSid for normal DNA called alleles>
3. rna_phlat: <JSid for tumor RNA called alleles>
RETURN VALUES
1. output_files: Dict of JSids for consensus MHCI and MHCII alleles
output_files
|- 'mhci_alleles.list': <JSid>
+- 'mhcii_alleles.list': <JSid>
This module corresponds to node 14 on the tree
"""
job.fileStore.logToMaster('Merging Phlat calls')
work_dir = job.fileStore.getLocalTempDir()
input_files = {
'tumor_dna': tumor_phlat,
'normal_dna': normal_phlat,
'tumor_rna': rna_phlat}
input_files = get_files_from_filestore(job, input_files, work_dir)
with open(input_files['tumor_dna'], 'r') as td_file, \
open(input_files['normal_dna'], 'r') as nd_file, \
open(input_files['tumor_rna'], 'r') as tr_file:
# TODO: Could this be a defautdict?
mhc_alleles = {'HLA_A': [], 'HLA_B': [], 'HLA_C': [], 'HLA_DPA': [], 'HLA_DQA': [],
'HLA_DPB': [], 'HLA_DQB': [], 'HLA_DRB': []}
for phlatfile in td_file, nd_file, tr_file:
mhc_alleles = parse_phlat_file(phlatfile, mhc_alleles)
# Get most probable alleles for each allele group and print to output
with open(os.path.join(work_dir, 'mhci_alleles.list'), 'w') as mhci_file, \
open(os.path.join(work_dir, 'mhcii_alleles.list'), 'w') as mhcii_file:
for mhci_group in ['HLA_A', 'HLA_B', 'HLA_C']:
mpa = most_probable_alleles(mhc_alleles[mhci_group])
print('\n'.join([''.join(['HLA-', x]) for x in mpa]), file=mhci_file)
drb_mpa = most_probable_alleles(mhc_alleles['HLA_DRB'])
print('\n'.join([''.join(['HLA-', x]) for x in drb_mpa]), file=mhcii_file)
dqa_mpa = most_probable_alleles(mhc_alleles['HLA_DQA'])
dqb_mpa = most_probable_alleles(mhc_alleles['HLA_DQB'])
for dqa_allele in dqa_mpa:
for dqb_allele in dqb_mpa:
print(''.join(['HLA-', dqa_allele, '/', dqb_allele]), file=mhcii_file)
output_files = defaultdict()
for allele_file in ['mhci_alleles.list', 'mhcii_alleles.list']:
output_files[allele_file] = job.fileStore.writeGlobalFile(os.path.join(work_dir,
allele_file))
return output_files | python | def merge_phlat_calls(job, tumor_phlat, normal_phlat, rna_phlat):
"""
This module will merge the results form running PHLAT on the 3 input fastq
pairs.
ARGUMENTS
1. tumor_phlat: <JSid for tumor DNA called alleles>
2. normal_phlat: <JSid for normal DNA called alleles>
3. rna_phlat: <JSid for tumor RNA called alleles>
RETURN VALUES
1. output_files: Dict of JSids for consensus MHCI and MHCII alleles
output_files
|- 'mhci_alleles.list': <JSid>
+- 'mhcii_alleles.list': <JSid>
This module corresponds to node 14 on the tree
"""
job.fileStore.logToMaster('Merging Phlat calls')
work_dir = job.fileStore.getLocalTempDir()
input_files = {
'tumor_dna': tumor_phlat,
'normal_dna': normal_phlat,
'tumor_rna': rna_phlat}
input_files = get_files_from_filestore(job, input_files, work_dir)
with open(input_files['tumor_dna'], 'r') as td_file, \
open(input_files['normal_dna'], 'r') as nd_file, \
open(input_files['tumor_rna'], 'r') as tr_file:
# TODO: Could this be a defautdict?
mhc_alleles = {'HLA_A': [], 'HLA_B': [], 'HLA_C': [], 'HLA_DPA': [], 'HLA_DQA': [],
'HLA_DPB': [], 'HLA_DQB': [], 'HLA_DRB': []}
for phlatfile in td_file, nd_file, tr_file:
mhc_alleles = parse_phlat_file(phlatfile, mhc_alleles)
# Get most probable alleles for each allele group and print to output
with open(os.path.join(work_dir, 'mhci_alleles.list'), 'w') as mhci_file, \
open(os.path.join(work_dir, 'mhcii_alleles.list'), 'w') as mhcii_file:
for mhci_group in ['HLA_A', 'HLA_B', 'HLA_C']:
mpa = most_probable_alleles(mhc_alleles[mhci_group])
print('\n'.join([''.join(['HLA-', x]) for x in mpa]), file=mhci_file)
drb_mpa = most_probable_alleles(mhc_alleles['HLA_DRB'])
print('\n'.join([''.join(['HLA-', x]) for x in drb_mpa]), file=mhcii_file)
dqa_mpa = most_probable_alleles(mhc_alleles['HLA_DQA'])
dqb_mpa = most_probable_alleles(mhc_alleles['HLA_DQB'])
for dqa_allele in dqa_mpa:
for dqb_allele in dqb_mpa:
print(''.join(['HLA-', dqa_allele, '/', dqb_allele]), file=mhcii_file)
output_files = defaultdict()
for allele_file in ['mhci_alleles.list', 'mhcii_alleles.list']:
output_files[allele_file] = job.fileStore.writeGlobalFile(os.path.join(work_dir,
allele_file))
return output_files | [
"def",
"merge_phlat_calls",
"(",
"job",
",",
"tumor_phlat",
",",
"normal_phlat",
",",
"rna_phlat",
")",
":",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Merging Phlat calls'",
")",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"input_files",
"=",
"{",
"'tumor_dna'",
":",
"tumor_phlat",
",",
"'normal_dna'",
":",
"normal_phlat",
",",
"'tumor_rna'",
":",
"rna_phlat",
"}",
"input_files",
"=",
"get_files_from_filestore",
"(",
"job",
",",
"input_files",
",",
"work_dir",
")",
"with",
"open",
"(",
"input_files",
"[",
"'tumor_dna'",
"]",
",",
"'r'",
")",
"as",
"td_file",
",",
"open",
"(",
"input_files",
"[",
"'normal_dna'",
"]",
",",
"'r'",
")",
"as",
"nd_file",
",",
"open",
"(",
"input_files",
"[",
"'tumor_rna'",
"]",
",",
"'r'",
")",
"as",
"tr_file",
":",
"mhc_alleles",
"=",
"{",
"'HLA_A'",
":",
"[",
"]",
",",
"'HLA_B'",
":",
"[",
"]",
",",
"'HLA_C'",
":",
"[",
"]",
",",
"'HLA_DPA'",
":",
"[",
"]",
",",
"'HLA_DQA'",
":",
"[",
"]",
",",
"'HLA_DPB'",
":",
"[",
"]",
",",
"'HLA_DQB'",
":",
"[",
"]",
",",
"'HLA_DRB'",
":",
"[",
"]",
"}",
"for",
"phlatfile",
"in",
"td_file",
",",
"nd_file",
",",
"tr_file",
":",
"mhc_alleles",
"=",
"parse_phlat_file",
"(",
"phlatfile",
",",
"mhc_alleles",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'mhci_alleles.list'",
")",
",",
"'w'",
")",
"as",
"mhci_file",
",",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'mhcii_alleles.list'",
")",
",",
"'w'",
")",
"as",
"mhcii_file",
":",
"for",
"mhci_group",
"in",
"[",
"'HLA_A'",
",",
"'HLA_B'",
",",
"'HLA_C'",
"]",
":",
"mpa",
"=",
"most_probable_alleles",
"(",
"mhc_alleles",
"[",
"mhci_group",
"]",
")",
"print",
"(",
"'\\n'",
".",
"join",
"(",
"[",
"''",
".",
"join",
"(",
"[",
"'HLA-'",
",",
"x",
"]",
")",
"for",
"x",
"in",
"mpa",
"]",
")",
",",
"file",
"=",
"mhci_file",
")",
"drb_mpa",
"=",
"most_probable_alleles",
"(",
"mhc_alleles",
"[",
"'HLA_DRB'",
"]",
")",
"print",
"(",
"'\\n'",
".",
"join",
"(",
"[",
"''",
".",
"join",
"(",
"[",
"'HLA-'",
",",
"x",
"]",
")",
"for",
"x",
"in",
"drb_mpa",
"]",
")",
",",
"file",
"=",
"mhcii_file",
")",
"dqa_mpa",
"=",
"most_probable_alleles",
"(",
"mhc_alleles",
"[",
"'HLA_DQA'",
"]",
")",
"dqb_mpa",
"=",
"most_probable_alleles",
"(",
"mhc_alleles",
"[",
"'HLA_DQB'",
"]",
")",
"for",
"dqa_allele",
"in",
"dqa_mpa",
":",
"for",
"dqb_allele",
"in",
"dqb_mpa",
":",
"print",
"(",
"''",
".",
"join",
"(",
"[",
"'HLA-'",
",",
"dqa_allele",
",",
"'/'",
",",
"dqb_allele",
"]",
")",
",",
"file",
"=",
"mhcii_file",
")",
"output_files",
"=",
"defaultdict",
"(",
")",
"for",
"allele_file",
"in",
"[",
"'mhci_alleles.list'",
",",
"'mhcii_alleles.list'",
"]",
":",
"output_files",
"[",
"allele_file",
"]",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"allele_file",
")",
")",
"return",
"output_files"
] | This module will merge the results form running PHLAT on the 3 input fastq
pairs.
ARGUMENTS
1. tumor_phlat: <JSid for tumor DNA called alleles>
2. normal_phlat: <JSid for normal DNA called alleles>
3. rna_phlat: <JSid for tumor RNA called alleles>
RETURN VALUES
1. output_files: Dict of JSids for consensus MHCI and MHCII alleles
output_files
|- 'mhci_alleles.list': <JSid>
+- 'mhcii_alleles.list': <JSid>
This module corresponds to node 14 on the tree | [
"This",
"module",
"will",
"merge",
"the",
"results",
"form",
"running",
"PHLAT",
"on",
"the",
"3",
"input",
"fastq",
"pairs",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/attic/ProTECT.py#L1297-L1347 | train |
BD2KGenomics/protect | attic/ProTECT.py | boost_ranks | def boost_ranks(job, isoform_expression, merged_mhc_calls, transgene_out, univ_options,
rank_boost_options):
"""
This is the final module in the pipeline. It will call the rank boosting R
script.
This module corresponds to node 21 in the tree
"""
job.fileStore.logToMaster('Running boost_ranks on %s' % univ_options['patient'])
work_dir = os.path.join(job.fileStore.getLocalTempDir(), univ_options['patient'])
os.mkdir(work_dir)
input_files = {
'rsem_quant.tsv': isoform_expression,
'mhci_merged_files.tsv': merged_mhc_calls['mhci_merged_files.list'],
'mhcii_merged_files.tsv': merged_mhc_calls['mhcii_merged_files.list'],
'mhci_peptides.faa': transgene_out['transgened_tumor_10_mer_snpeffed.faa'],
'mhcii_peptides.faa': transgene_out['transgened_tumor_15_mer_snpeffed.faa']}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=True)
output_files = {}
for mhc in ('mhci', 'mhcii'):
parameters = [mhc,
input_files[''.join([mhc, '_merged_files.tsv'])],
input_files['rsem_quant.tsv'],
input_files[''.join([mhc, '_peptides.faa'])],
rank_boost_options[''.join([mhc, '_combo'])]
]
docker_call(tool='rankboost', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'])
output_files[mhc] = {
''.join([mhc, '_concise_results.tsv']):
job.fileStore.writeGlobalFile(''.join([work_dir, '/', mhc,
'_merged_files_concise_results.tsv'])),
''.join([mhc, '_detailed_results.tsv']):
job.fileStore.writeGlobalFile(''.join([work_dir, '/', mhc,
'_merged_files_detailed_results.tsv']))}
export_results(work_dir, univ_options)
return output_files | python | def boost_ranks(job, isoform_expression, merged_mhc_calls, transgene_out, univ_options,
rank_boost_options):
"""
This is the final module in the pipeline. It will call the rank boosting R
script.
This module corresponds to node 21 in the tree
"""
job.fileStore.logToMaster('Running boost_ranks on %s' % univ_options['patient'])
work_dir = os.path.join(job.fileStore.getLocalTempDir(), univ_options['patient'])
os.mkdir(work_dir)
input_files = {
'rsem_quant.tsv': isoform_expression,
'mhci_merged_files.tsv': merged_mhc_calls['mhci_merged_files.list'],
'mhcii_merged_files.tsv': merged_mhc_calls['mhcii_merged_files.list'],
'mhci_peptides.faa': transgene_out['transgened_tumor_10_mer_snpeffed.faa'],
'mhcii_peptides.faa': transgene_out['transgened_tumor_15_mer_snpeffed.faa']}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=True)
output_files = {}
for mhc in ('mhci', 'mhcii'):
parameters = [mhc,
input_files[''.join([mhc, '_merged_files.tsv'])],
input_files['rsem_quant.tsv'],
input_files[''.join([mhc, '_peptides.faa'])],
rank_boost_options[''.join([mhc, '_combo'])]
]
docker_call(tool='rankboost', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'])
output_files[mhc] = {
''.join([mhc, '_concise_results.tsv']):
job.fileStore.writeGlobalFile(''.join([work_dir, '/', mhc,
'_merged_files_concise_results.tsv'])),
''.join([mhc, '_detailed_results.tsv']):
job.fileStore.writeGlobalFile(''.join([work_dir, '/', mhc,
'_merged_files_detailed_results.tsv']))}
export_results(work_dir, univ_options)
return output_files | [
"def",
"boost_ranks",
"(",
"job",
",",
"isoform_expression",
",",
"merged_mhc_calls",
",",
"transgene_out",
",",
"univ_options",
",",
"rank_boost_options",
")",
":",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Running boost_ranks on %s'",
"%",
"univ_options",
"[",
"'patient'",
"]",
")",
"work_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
",",
"univ_options",
"[",
"'patient'",
"]",
")",
"os",
".",
"mkdir",
"(",
"work_dir",
")",
"input_files",
"=",
"{",
"'rsem_quant.tsv'",
":",
"isoform_expression",
",",
"'mhci_merged_files.tsv'",
":",
"merged_mhc_calls",
"[",
"'mhci_merged_files.list'",
"]",
",",
"'mhcii_merged_files.tsv'",
":",
"merged_mhc_calls",
"[",
"'mhcii_merged_files.list'",
"]",
",",
"'mhci_peptides.faa'",
":",
"transgene_out",
"[",
"'transgened_tumor_10_mer_snpeffed.faa'",
"]",
",",
"'mhcii_peptides.faa'",
":",
"transgene_out",
"[",
"'transgened_tumor_15_mer_snpeffed.faa'",
"]",
"}",
"input_files",
"=",
"get_files_from_filestore",
"(",
"job",
",",
"input_files",
",",
"work_dir",
",",
"docker",
"=",
"True",
")",
"output_files",
"=",
"{",
"}",
"for",
"mhc",
"in",
"(",
"'mhci'",
",",
"'mhcii'",
")",
":",
"parameters",
"=",
"[",
"mhc",
",",
"input_files",
"[",
"''",
".",
"join",
"(",
"[",
"mhc",
",",
"'_merged_files.tsv'",
"]",
")",
"]",
",",
"input_files",
"[",
"'rsem_quant.tsv'",
"]",
",",
"input_files",
"[",
"''",
".",
"join",
"(",
"[",
"mhc",
",",
"'_peptides.faa'",
"]",
")",
"]",
",",
"rank_boost_options",
"[",
"''",
".",
"join",
"(",
"[",
"mhc",
",",
"'_combo'",
"]",
")",
"]",
"]",
"docker_call",
"(",
"tool",
"=",
"'rankboost'",
",",
"tool_parameters",
"=",
"parameters",
",",
"work_dir",
"=",
"work_dir",
",",
"dockerhub",
"=",
"univ_options",
"[",
"'dockerhub'",
"]",
")",
"output_files",
"[",
"mhc",
"]",
"=",
"{",
"''",
".",
"join",
"(",
"[",
"mhc",
",",
"'_concise_results.tsv'",
"]",
")",
":",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"''",
".",
"join",
"(",
"[",
"work_dir",
",",
"'/'",
",",
"mhc",
",",
"'_merged_files_concise_results.tsv'",
"]",
")",
")",
",",
"''",
".",
"join",
"(",
"[",
"mhc",
",",
"'_detailed_results.tsv'",
"]",
")",
":",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"''",
".",
"join",
"(",
"[",
"work_dir",
",",
"'/'",
",",
"mhc",
",",
"'_merged_files_detailed_results.tsv'",
"]",
")",
")",
"}",
"export_results",
"(",
"work_dir",
",",
"univ_options",
")",
"return",
"output_files"
] | This is the final module in the pipeline. It will call the rank boosting R
script.
This module corresponds to node 21 in the tree | [
"This",
"is",
"the",
"final",
"module",
"in",
"the",
"pipeline",
".",
"It",
"will",
"call",
"the",
"rank",
"boosting",
"R",
"script",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/attic/ProTECT.py#L1676-L1712 | train |
BD2KGenomics/protect | attic/ProTECT.py | get_files_from_filestore | def get_files_from_filestore(job, files, work_dir, cache=True, docker=False):
"""
This is adapted from John Vivian's return_input_paths from the RNA-Seq pipeline.
Returns the paths of files from the FileStore if they are not present.
If docker=True, return the docker path for the file.
If the file extension is tar.gz, then tar -zxvf it.
files is a dict with:
keys = the name of the file to be returned in toil space
value = the input value for the file (can be toil temp file)
work_dir is the location where the file should be stored
cache indiciates whether caching should be used
"""
for name in files.keys():
outfile = job.fileStore.readGlobalFile(files[name], '/'.join([work_dir, name]), cache=cache)
# If the file pointed to a tarball, extract it to WORK_DIR
if tarfile.is_tarfile(outfile) and file_xext(outfile).startswith('.tar'):
untar_name = os.path.basename(strip_xext(outfile))
files[untar_name] = untargz(outfile, work_dir)
files.pop(name)
name = os.path.basename(untar_name)
# If the file is gzipped but NOT a tarfile, gunzip it to work_dir. However, the file is
# already named x.gz so we need to write to a temporary file x.gz_temp then do a move
# operation to overwrite x.gz.
elif is_gzipfile(outfile) and file_xext(outfile) == '.gz':
ungz_name = strip_xext(outfile)
with gzip.open(outfile, 'rb') as gz_in, open(ungz_name, 'w') as ungz_out:
shutil.copyfileobj(gz_in, ungz_out)
files[os.path.basename(ungz_name)] = outfile
files.pop(name)
name = os.path.basename(ungz_name)
else:
files[name] = outfile
# If the files will be sent to docker, we will mount work_dir to the container as /data and
# we want the /data prefixed path to the file
if docker:
files[name] = docker_path(files[name])
return files | python | def get_files_from_filestore(job, files, work_dir, cache=True, docker=False):
"""
This is adapted from John Vivian's return_input_paths from the RNA-Seq pipeline.
Returns the paths of files from the FileStore if they are not present.
If docker=True, return the docker path for the file.
If the file extension is tar.gz, then tar -zxvf it.
files is a dict with:
keys = the name of the file to be returned in toil space
value = the input value for the file (can be toil temp file)
work_dir is the location where the file should be stored
cache indiciates whether caching should be used
"""
for name in files.keys():
outfile = job.fileStore.readGlobalFile(files[name], '/'.join([work_dir, name]), cache=cache)
# If the file pointed to a tarball, extract it to WORK_DIR
if tarfile.is_tarfile(outfile) and file_xext(outfile).startswith('.tar'):
untar_name = os.path.basename(strip_xext(outfile))
files[untar_name] = untargz(outfile, work_dir)
files.pop(name)
name = os.path.basename(untar_name)
# If the file is gzipped but NOT a tarfile, gunzip it to work_dir. However, the file is
# already named x.gz so we need to write to a temporary file x.gz_temp then do a move
# operation to overwrite x.gz.
elif is_gzipfile(outfile) and file_xext(outfile) == '.gz':
ungz_name = strip_xext(outfile)
with gzip.open(outfile, 'rb') as gz_in, open(ungz_name, 'w') as ungz_out:
shutil.copyfileobj(gz_in, ungz_out)
files[os.path.basename(ungz_name)] = outfile
files.pop(name)
name = os.path.basename(ungz_name)
else:
files[name] = outfile
# If the files will be sent to docker, we will mount work_dir to the container as /data and
# we want the /data prefixed path to the file
if docker:
files[name] = docker_path(files[name])
return files | [
"def",
"get_files_from_filestore",
"(",
"job",
",",
"files",
",",
"work_dir",
",",
"cache",
"=",
"True",
",",
"docker",
"=",
"False",
")",
":",
"for",
"name",
"in",
"files",
".",
"keys",
"(",
")",
":",
"outfile",
"=",
"job",
".",
"fileStore",
".",
"readGlobalFile",
"(",
"files",
"[",
"name",
"]",
",",
"'/'",
".",
"join",
"(",
"[",
"work_dir",
",",
"name",
"]",
")",
",",
"cache",
"=",
"cache",
")",
"if",
"tarfile",
".",
"is_tarfile",
"(",
"outfile",
")",
"and",
"file_xext",
"(",
"outfile",
")",
".",
"startswith",
"(",
"'.tar'",
")",
":",
"untar_name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"strip_xext",
"(",
"outfile",
")",
")",
"files",
"[",
"untar_name",
"]",
"=",
"untargz",
"(",
"outfile",
",",
"work_dir",
")",
"files",
".",
"pop",
"(",
"name",
")",
"name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"untar_name",
")",
"elif",
"is_gzipfile",
"(",
"outfile",
")",
"and",
"file_xext",
"(",
"outfile",
")",
"==",
"'.gz'",
":",
"ungz_name",
"=",
"strip_xext",
"(",
"outfile",
")",
"with",
"gzip",
".",
"open",
"(",
"outfile",
",",
"'rb'",
")",
"as",
"gz_in",
",",
"open",
"(",
"ungz_name",
",",
"'w'",
")",
"as",
"ungz_out",
":",
"shutil",
".",
"copyfileobj",
"(",
"gz_in",
",",
"ungz_out",
")",
"files",
"[",
"os",
".",
"path",
".",
"basename",
"(",
"ungz_name",
")",
"]",
"=",
"outfile",
"files",
".",
"pop",
"(",
"name",
")",
"name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"ungz_name",
")",
"else",
":",
"files",
"[",
"name",
"]",
"=",
"outfile",
"if",
"docker",
":",
"files",
"[",
"name",
"]",
"=",
"docker_path",
"(",
"files",
"[",
"name",
"]",
")",
"return",
"files"
] | This is adapted from John Vivian's return_input_paths from the RNA-Seq pipeline.
Returns the paths of files from the FileStore if they are not present.
If docker=True, return the docker path for the file.
If the file extension is tar.gz, then tar -zxvf it.
files is a dict with:
keys = the name of the file to be returned in toil space
value = the input value for the file (can be toil temp file)
work_dir is the location where the file should be stored
cache indiciates whether caching should be used | [
"This",
"is",
"adapted",
"from",
"John",
"Vivian",
"s",
"return_input_paths",
"from",
"the",
"RNA",
"-",
"Seq",
"pipeline",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/attic/ProTECT.py#L1866-L1904 | train |
BD2KGenomics/protect | attic/ProTECT.py | merge_vcfs | def merge_vcfs(vcf_file, merged_mut_file):
"""
This module will accept the vcf files for mutect and radia read into memory in a dict object
VCF_FILE and will merge the calls. Merged calls are printed to MERGED_MUT_FILE.
VCF_FILE is a dict with
key : mutation caller (mutect or radia)
value : dict with
key: (chrom, pos, ref, alt)
value: vcf line in list form (split by tab)
"""
mutect_keys = set(vcf_file['mutect'].keys())
radia_keys = set(vcf_file['radia'].keys())
common_keys = radia_keys.intersection(mutect_keys)
# Open as append since the header is already written
with open(merged_mut_file, 'a') as outfile:
for mutation in common_keys:
print('\t'.join(vcf_file['radia'][mutation]), file=outfile)
return None | python | def merge_vcfs(vcf_file, merged_mut_file):
"""
This module will accept the vcf files for mutect and radia read into memory in a dict object
VCF_FILE and will merge the calls. Merged calls are printed to MERGED_MUT_FILE.
VCF_FILE is a dict with
key : mutation caller (mutect or radia)
value : dict with
key: (chrom, pos, ref, alt)
value: vcf line in list form (split by tab)
"""
mutect_keys = set(vcf_file['mutect'].keys())
radia_keys = set(vcf_file['radia'].keys())
common_keys = radia_keys.intersection(mutect_keys)
# Open as append since the header is already written
with open(merged_mut_file, 'a') as outfile:
for mutation in common_keys:
print('\t'.join(vcf_file['radia'][mutation]), file=outfile)
return None | [
"def",
"merge_vcfs",
"(",
"vcf_file",
",",
"merged_mut_file",
")",
":",
"mutect_keys",
"=",
"set",
"(",
"vcf_file",
"[",
"'mutect'",
"]",
".",
"keys",
"(",
")",
")",
"radia_keys",
"=",
"set",
"(",
"vcf_file",
"[",
"'radia'",
"]",
".",
"keys",
"(",
")",
")",
"common_keys",
"=",
"radia_keys",
".",
"intersection",
"(",
"mutect_keys",
")",
"with",
"open",
"(",
"merged_mut_file",
",",
"'a'",
")",
"as",
"outfile",
":",
"for",
"mutation",
"in",
"common_keys",
":",
"print",
"(",
"'\\t'",
".",
"join",
"(",
"vcf_file",
"[",
"'radia'",
"]",
"[",
"mutation",
"]",
")",
",",
"file",
"=",
"outfile",
")",
"return",
"None"
] | This module will accept the vcf files for mutect and radia read into memory in a dict object
VCF_FILE and will merge the calls. Merged calls are printed to MERGED_MUT_FILE.
VCF_FILE is a dict with
key : mutation caller (mutect or radia)
value : dict with
key: (chrom, pos, ref, alt)
value: vcf line in list form (split by tab) | [
"This",
"module",
"will",
"accept",
"the",
"vcf",
"files",
"for",
"mutect",
"and",
"radia",
"read",
"into",
"memory",
"in",
"a",
"dict",
"object",
"VCF_FILE",
"and",
"will",
"merge",
"the",
"calls",
".",
"Merged",
"calls",
"are",
"printed",
"to",
"MERGED_MUT_FILE",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/attic/ProTECT.py#L1956-L1974 | train |
BD2KGenomics/protect | attic/ProTECT.py | docker_call | def docker_call(tool, tool_parameters, work_dir, java_opts=None, outfile=None,
dockerhub='aarjunrao', interactive=False):
"""
Makes subprocess call of a command to a docker container. work_dir MUST BE AN ABSOLUTE PATH or
the call will fail. outfile is an open file descriptor to a writeable file.
"""
# If an outifle has been provided, then ensure that it is of type file, it is writeable, and
# that it is open.
if outfile:
assert isinstance(outfile, file), 'outfile was not passsed a file'
assert outfile.mode in ['w', 'a', 'wb', 'ab'], 'outfile not writeable'
assert not outfile.closed, 'outfile is closed'
# If the call is interactive, set intereactive to -i
if interactive:
interactive = '-i'
else:
interactive = ''
# If a tag is passed along with the image, use it.
if ':' in tool:
docker_tool = '/'.join([dockerhub, tool])
# Else use 'latest'
else:
docker_tool = ''.join([dockerhub, '/', tool, ':latest'])
# Get the docker image on the worker if needed
call = ['docker', 'images']
dimg_rv = subprocess.check_output(call)
existing_images = [':'.join(x.split()[0:2]) for x in dimg_rv.splitlines()
if x.startswith(dockerhub)]
if docker_tool not in existing_images:
try:
call = ' '.join(['docker', 'pull', docker_tool]).split()
subprocess.check_call(call)
except subprocess.CalledProcessError as err:
raise RuntimeError('docker command returned a non-zero exit status ' +
'(%s)' % err.returncode + 'for command \"%s\"' % ' '.join(call),)
except OSError:
raise RuntimeError('docker not found on system. Install on all' +
' nodes.')
# If java options have been provided, it needs to be in the docker call
if java_opts:
base_docker_call = ' docker run -e JAVA_OPTS=-Xmx{} '.format(java_opts) + '--rm=true ' + \
'-v {}:/data --log-driver=none '.format(work_dir) + interactive
else:
base_docker_call = ' docker run --rm=true -v {}:/data '.format(work_dir) + \
'--log-driver=none ' + interactive
call = base_docker_call.split() + [docker_tool] + tool_parameters
try:
subprocess.check_call(call, stdout=outfile)
except subprocess.CalledProcessError as err:
raise RuntimeError('docker command returned a non-zero exit status (%s)' % err.returncode +
'for command \"%s\"' % ' '.join(call),)
except OSError:
raise RuntimeError('docker not found on system. Install on all nodes.') | python | def docker_call(tool, tool_parameters, work_dir, java_opts=None, outfile=None,
dockerhub='aarjunrao', interactive=False):
"""
Makes subprocess call of a command to a docker container. work_dir MUST BE AN ABSOLUTE PATH or
the call will fail. outfile is an open file descriptor to a writeable file.
"""
# If an outifle has been provided, then ensure that it is of type file, it is writeable, and
# that it is open.
if outfile:
assert isinstance(outfile, file), 'outfile was not passsed a file'
assert outfile.mode in ['w', 'a', 'wb', 'ab'], 'outfile not writeable'
assert not outfile.closed, 'outfile is closed'
# If the call is interactive, set intereactive to -i
if interactive:
interactive = '-i'
else:
interactive = ''
# If a tag is passed along with the image, use it.
if ':' in tool:
docker_tool = '/'.join([dockerhub, tool])
# Else use 'latest'
else:
docker_tool = ''.join([dockerhub, '/', tool, ':latest'])
# Get the docker image on the worker if needed
call = ['docker', 'images']
dimg_rv = subprocess.check_output(call)
existing_images = [':'.join(x.split()[0:2]) for x in dimg_rv.splitlines()
if x.startswith(dockerhub)]
if docker_tool not in existing_images:
try:
call = ' '.join(['docker', 'pull', docker_tool]).split()
subprocess.check_call(call)
except subprocess.CalledProcessError as err:
raise RuntimeError('docker command returned a non-zero exit status ' +
'(%s)' % err.returncode + 'for command \"%s\"' % ' '.join(call),)
except OSError:
raise RuntimeError('docker not found on system. Install on all' +
' nodes.')
# If java options have been provided, it needs to be in the docker call
if java_opts:
base_docker_call = ' docker run -e JAVA_OPTS=-Xmx{} '.format(java_opts) + '--rm=true ' + \
'-v {}:/data --log-driver=none '.format(work_dir) + interactive
else:
base_docker_call = ' docker run --rm=true -v {}:/data '.format(work_dir) + \
'--log-driver=none ' + interactive
call = base_docker_call.split() + [docker_tool] + tool_parameters
try:
subprocess.check_call(call, stdout=outfile)
except subprocess.CalledProcessError as err:
raise RuntimeError('docker command returned a non-zero exit status (%s)' % err.returncode +
'for command \"%s\"' % ' '.join(call),)
except OSError:
raise RuntimeError('docker not found on system. Install on all nodes.') | [
"def",
"docker_call",
"(",
"tool",
",",
"tool_parameters",
",",
"work_dir",
",",
"java_opts",
"=",
"None",
",",
"outfile",
"=",
"None",
",",
"dockerhub",
"=",
"'aarjunrao'",
",",
"interactive",
"=",
"False",
")",
":",
"if",
"outfile",
":",
"assert",
"isinstance",
"(",
"outfile",
",",
"file",
")",
",",
"'outfile was not passsed a file'",
"assert",
"outfile",
".",
"mode",
"in",
"[",
"'w'",
",",
"'a'",
",",
"'wb'",
",",
"'ab'",
"]",
",",
"'outfile not writeable'",
"assert",
"not",
"outfile",
".",
"closed",
",",
"'outfile is closed'",
"if",
"interactive",
":",
"interactive",
"=",
"'-i'",
"else",
":",
"interactive",
"=",
"''",
"if",
"':'",
"in",
"tool",
":",
"docker_tool",
"=",
"'/'",
".",
"join",
"(",
"[",
"dockerhub",
",",
"tool",
"]",
")",
"else",
":",
"docker_tool",
"=",
"''",
".",
"join",
"(",
"[",
"dockerhub",
",",
"'/'",
",",
"tool",
",",
"':latest'",
"]",
")",
"call",
"=",
"[",
"'docker'",
",",
"'images'",
"]",
"dimg_rv",
"=",
"subprocess",
".",
"check_output",
"(",
"call",
")",
"existing_images",
"=",
"[",
"':'",
".",
"join",
"(",
"x",
".",
"split",
"(",
")",
"[",
"0",
":",
"2",
"]",
")",
"for",
"x",
"in",
"dimg_rv",
".",
"splitlines",
"(",
")",
"if",
"x",
".",
"startswith",
"(",
"dockerhub",
")",
"]",
"if",
"docker_tool",
"not",
"in",
"existing_images",
":",
"try",
":",
"call",
"=",
"' '",
".",
"join",
"(",
"[",
"'docker'",
",",
"'pull'",
",",
"docker_tool",
"]",
")",
".",
"split",
"(",
")",
"subprocess",
".",
"check_call",
"(",
"call",
")",
"except",
"subprocess",
".",
"CalledProcessError",
"as",
"err",
":",
"raise",
"RuntimeError",
"(",
"'docker command returned a non-zero exit status '",
"+",
"'(%s)'",
"%",
"err",
".",
"returncode",
"+",
"'for command \\\"%s\\\"'",
"%",
"' '",
".",
"join",
"(",
"call",
")",
",",
")",
"except",
"OSError",
":",
"raise",
"RuntimeError",
"(",
"'docker not found on system. Install on all'",
"+",
"' nodes.'",
")",
"if",
"java_opts",
":",
"base_docker_call",
"=",
"' docker run -e JAVA_OPTS=-Xmx{} '",
".",
"format",
"(",
"java_opts",
")",
"+",
"'--rm=true '",
"+",
"'-v {}:/data --log-driver=none '",
".",
"format",
"(",
"work_dir",
")",
"+",
"interactive",
"else",
":",
"base_docker_call",
"=",
"' docker run --rm=true -v {}:/data '",
".",
"format",
"(",
"work_dir",
")",
"+",
"'--log-driver=none '",
"+",
"interactive",
"call",
"=",
"base_docker_call",
".",
"split",
"(",
")",
"+",
"[",
"docker_tool",
"]",
"+",
"tool_parameters",
"try",
":",
"subprocess",
".",
"check_call",
"(",
"call",
",",
"stdout",
"=",
"outfile",
")",
"except",
"subprocess",
".",
"CalledProcessError",
"as",
"err",
":",
"raise",
"RuntimeError",
"(",
"'docker command returned a non-zero exit status (%s)'",
"%",
"err",
".",
"returncode",
"+",
"'for command \\\"%s\\\"'",
"%",
"' '",
".",
"join",
"(",
"call",
")",
",",
")",
"except",
"OSError",
":",
"raise",
"RuntimeError",
"(",
"'docker not found on system. Install on all nodes.'",
")"
] | Makes subprocess call of a command to a docker container. work_dir MUST BE AN ABSOLUTE PATH or
the call will fail. outfile is an open file descriptor to a writeable file. | [
"Makes",
"subprocess",
"call",
"of",
"a",
"command",
"to",
"a",
"docker",
"container",
".",
"work_dir",
"MUST",
"BE",
"AN",
"ABSOLUTE",
"PATH",
"or",
"the",
"call",
"will",
"fail",
".",
"outfile",
"is",
"an",
"open",
"file",
"descriptor",
"to",
"a",
"writeable",
"file",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/attic/ProTECT.py#L2130-L2182 | train |
BD2KGenomics/protect | attic/ProTECT.py | untargz | def untargz(input_targz_file, untar_to_dir):
"""
This module accepts a tar.gz archive and untars it.
RETURN VALUE: path to the untar-ed directory/file
NOTE: this module expects the multiple files to be in a directory before
being tar-ed.
"""
assert tarfile.is_tarfile(input_targz_file), 'Not a tar file.'
tarball = tarfile.open(input_targz_file)
return_value = os.path.join(untar_to_dir, tarball.getmembers()[0].name)
tarball.extractall(path=untar_to_dir)
tarball.close()
return return_value | python | def untargz(input_targz_file, untar_to_dir):
"""
This module accepts a tar.gz archive and untars it.
RETURN VALUE: path to the untar-ed directory/file
NOTE: this module expects the multiple files to be in a directory before
being tar-ed.
"""
assert tarfile.is_tarfile(input_targz_file), 'Not a tar file.'
tarball = tarfile.open(input_targz_file)
return_value = os.path.join(untar_to_dir, tarball.getmembers()[0].name)
tarball.extractall(path=untar_to_dir)
tarball.close()
return return_value | [
"def",
"untargz",
"(",
"input_targz_file",
",",
"untar_to_dir",
")",
":",
"assert",
"tarfile",
".",
"is_tarfile",
"(",
"input_targz_file",
")",
",",
"'Not a tar file.'",
"tarball",
"=",
"tarfile",
".",
"open",
"(",
"input_targz_file",
")",
"return_value",
"=",
"os",
".",
"path",
".",
"join",
"(",
"untar_to_dir",
",",
"tarball",
".",
"getmembers",
"(",
")",
"[",
"0",
"]",
".",
"name",
")",
"tarball",
".",
"extractall",
"(",
"path",
"=",
"untar_to_dir",
")",
"tarball",
".",
"close",
"(",
")",
"return",
"return_value"
] | This module accepts a tar.gz archive and untars it.
RETURN VALUE: path to the untar-ed directory/file
NOTE: this module expects the multiple files to be in a directory before
being tar-ed. | [
"This",
"module",
"accepts",
"a",
"tar",
".",
"gz",
"archive",
"and",
"untars",
"it",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/attic/ProTECT.py#L2185-L2199 | train |
BD2KGenomics/protect | attic/ProTECT.py | bam2fastq | def bam2fastq(job, bamfile, univ_options):
"""
split an input bam to paired fastqs.
ARGUMENTS
1. bamfile: Path to a bam file
2. univ_options: Dict of universal arguments used by almost all tools
univ_options
|- 'dockerhub': <dockerhub to use>
+- 'java_Xmx': value for max heap passed to java
"""
work_dir = os.path.split(bamfile)[0]
base_name = os.path.split(os.path.splitext(bamfile)[0])[1]
parameters = ['SamToFastq',
''.join(['I=', docker_path(bamfile)]),
''.join(['F=/data/', base_name, '_1.fastq']),
''.join(['F2=/data/', base_name, '_2.fastq']),
''.join(['FU=/data/', base_name, '_UP.fastq'])]
docker_call(tool='picard', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], java_opts=univ_options['java_Xmx'])
first_fastq = ''.join([work_dir, '/', base_name, '_1.fastq'])
assert os.path.exists(first_fastq)
return first_fastq | python | def bam2fastq(job, bamfile, univ_options):
"""
split an input bam to paired fastqs.
ARGUMENTS
1. bamfile: Path to a bam file
2. univ_options: Dict of universal arguments used by almost all tools
univ_options
|- 'dockerhub': <dockerhub to use>
+- 'java_Xmx': value for max heap passed to java
"""
work_dir = os.path.split(bamfile)[0]
base_name = os.path.split(os.path.splitext(bamfile)[0])[1]
parameters = ['SamToFastq',
''.join(['I=', docker_path(bamfile)]),
''.join(['F=/data/', base_name, '_1.fastq']),
''.join(['F2=/data/', base_name, '_2.fastq']),
''.join(['FU=/data/', base_name, '_UP.fastq'])]
docker_call(tool='picard', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], java_opts=univ_options['java_Xmx'])
first_fastq = ''.join([work_dir, '/', base_name, '_1.fastq'])
assert os.path.exists(first_fastq)
return first_fastq | [
"def",
"bam2fastq",
"(",
"job",
",",
"bamfile",
",",
"univ_options",
")",
":",
"work_dir",
"=",
"os",
".",
"path",
".",
"split",
"(",
"bamfile",
")",
"[",
"0",
"]",
"base_name",
"=",
"os",
".",
"path",
".",
"split",
"(",
"os",
".",
"path",
".",
"splitext",
"(",
"bamfile",
")",
"[",
"0",
"]",
")",
"[",
"1",
"]",
"parameters",
"=",
"[",
"'SamToFastq'",
",",
"''",
".",
"join",
"(",
"[",
"'I='",
",",
"docker_path",
"(",
"bamfile",
")",
"]",
")",
",",
"''",
".",
"join",
"(",
"[",
"'F=/data/'",
",",
"base_name",
",",
"'_1.fastq'",
"]",
")",
",",
"''",
".",
"join",
"(",
"[",
"'F2=/data/'",
",",
"base_name",
",",
"'_2.fastq'",
"]",
")",
",",
"''",
".",
"join",
"(",
"[",
"'FU=/data/'",
",",
"base_name",
",",
"'_UP.fastq'",
"]",
")",
"]",
"docker_call",
"(",
"tool",
"=",
"'picard'",
",",
"tool_parameters",
"=",
"parameters",
",",
"work_dir",
"=",
"work_dir",
",",
"dockerhub",
"=",
"univ_options",
"[",
"'dockerhub'",
"]",
",",
"java_opts",
"=",
"univ_options",
"[",
"'java_Xmx'",
"]",
")",
"first_fastq",
"=",
"''",
".",
"join",
"(",
"[",
"work_dir",
",",
"'/'",
",",
"base_name",
",",
"'_1.fastq'",
"]",
")",
"assert",
"os",
".",
"path",
".",
"exists",
"(",
"first_fastq",
")",
"return",
"first_fastq"
] | split an input bam to paired fastqs.
ARGUMENTS
1. bamfile: Path to a bam file
2. univ_options: Dict of universal arguments used by almost all tools
univ_options
|- 'dockerhub': <dockerhub to use>
+- 'java_Xmx': value for max heap passed to java | [
"split",
"an",
"input",
"bam",
"to",
"paired",
"fastqs",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/attic/ProTECT.py#L2386-L2408 | train |
BD2KGenomics/protect | attic/ProTECT.py | main | def main():
"""
This is the main function for the UCSC Precision Immuno pipeline.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--config_file', dest='config_file', help='Config file to be used in the' +
'run.', type=str, required=True, default=None)
Job.Runner.addToilOptions(parser)
params = parser.parse_args()
START = Job.wrapJobFn(parse_config_file, params.config_file).encapsulate()
Job.Runner.startToil(START, params)
return None | python | def main():
"""
This is the main function for the UCSC Precision Immuno pipeline.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--config_file', dest='config_file', help='Config file to be used in the' +
'run.', type=str, required=True, default=None)
Job.Runner.addToilOptions(parser)
params = parser.parse_args()
START = Job.wrapJobFn(parse_config_file, params.config_file).encapsulate()
Job.Runner.startToil(START, params)
return None | [
"def",
"main",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
")",
"parser",
".",
"add_argument",
"(",
"'--config_file'",
",",
"dest",
"=",
"'config_file'",
",",
"help",
"=",
"'Config file to be used in the'",
"+",
"'run.'",
",",
"type",
"=",
"str",
",",
"required",
"=",
"True",
",",
"default",
"=",
"None",
")",
"Job",
".",
"Runner",
".",
"addToilOptions",
"(",
"parser",
")",
"params",
"=",
"parser",
".",
"parse_args",
"(",
")",
"START",
"=",
"Job",
".",
"wrapJobFn",
"(",
"parse_config_file",
",",
"params",
".",
"config_file",
")",
".",
"encapsulate",
"(",
")",
"Job",
".",
"Runner",
".",
"startToil",
"(",
"START",
",",
"params",
")",
"return",
"None"
] | This is the main function for the UCSC Precision Immuno pipeline. | [
"This",
"is",
"the",
"main",
"function",
"for",
"the",
"UCSC",
"Precision",
"Immuno",
"pipeline",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/attic/ProTECT.py#L2530-L2541 | train |
BD2KGenomics/protect | src/protect/mutation_calling/strelka.py | run_strelka_with_merge | def run_strelka_with_merge(job, tumor_bam, normal_bam, univ_options, strelka_options):
"""
A wrapper for the the entire strelka sub-graph.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict strelka_options: Options specific to strelka
:return: fsID to the merged strelka calls
:rtype: toil.fileStore.FileID
"""
spawn = job.wrapJobFn(run_strelka, tumor_bam, normal_bam, univ_options,
strelka_options, split=False).encapsulate()
job.addChild(spawn)
return spawn.rv() | python | def run_strelka_with_merge(job, tumor_bam, normal_bam, univ_options, strelka_options):
"""
A wrapper for the the entire strelka sub-graph.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict strelka_options: Options specific to strelka
:return: fsID to the merged strelka calls
:rtype: toil.fileStore.FileID
"""
spawn = job.wrapJobFn(run_strelka, tumor_bam, normal_bam, univ_options,
strelka_options, split=False).encapsulate()
job.addChild(spawn)
return spawn.rv() | [
"def",
"run_strelka_with_merge",
"(",
"job",
",",
"tumor_bam",
",",
"normal_bam",
",",
"univ_options",
",",
"strelka_options",
")",
":",
"spawn",
"=",
"job",
".",
"wrapJobFn",
"(",
"run_strelka",
",",
"tumor_bam",
",",
"normal_bam",
",",
"univ_options",
",",
"strelka_options",
",",
"split",
"=",
"False",
")",
".",
"encapsulate",
"(",
")",
"job",
".",
"addChild",
"(",
"spawn",
")",
"return",
"spawn",
".",
"rv",
"(",
")"
] | A wrapper for the the entire strelka sub-graph.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict strelka_options: Options specific to strelka
:return: fsID to the merged strelka calls
:rtype: toil.fileStore.FileID | [
"A",
"wrapper",
"for",
"the",
"the",
"entire",
"strelka",
"sub",
"-",
"graph",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/strelka.py#L39-L53 | train |
BD2KGenomics/protect | src/protect/mutation_calling/strelka.py | run_strelka | def run_strelka(job, tumor_bam, normal_bam, univ_options, strelka_options, split=True):
"""
Run the strelka subgraph on the DNA bams. Optionally split the results into per-chromosome
vcfs.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict strelka_options: Options specific to strelka
:param bool split: Should the results be split into perchrom vcfs?
:return: Either the fsID to the genome-level vcf or a dict of results from running strelka
on every chromosome
perchrom_strelka:
|- 'chr1':
| |-'snvs': fsID
| +-'indels': fsID
|- 'chr2':
| |-'snvs': fsID
| +-'indels': fsID
|-...
|
+- 'chrM':
|-'snvs': fsID
+-'indels': fsID
:rtype: toil.fileStore.FileID|dict
"""
if strelka_options['chromosomes']:
chromosomes = strelka_options['chromosomes']
else:
chromosomes = sample_chromosomes(job, strelka_options['genome_fai'])
num_cores = min(len(chromosomes), univ_options['max_cores'])
strelka = job.wrapJobFn(run_strelka_full, tumor_bam, normal_bam, univ_options,
strelka_options,
disk=PromisedRequirement(strelka_disk,
tumor_bam['tumor_dna_fix_pg_sorted.bam'],
normal_bam['normal_dna_fix_pg_sorted.bam'],
strelka_options['genome_fasta']),
memory='6G',
cores=num_cores)
job.addChild(strelka)
if split:
unmerge_strelka = job.wrapJobFn(wrap_unmerge, strelka.rv(), chromosomes, strelka_options,
univ_options).encapsulate()
strelka.addChild(unmerge_strelka)
return unmerge_strelka.rv()
else:
return strelka.rv() | python | def run_strelka(job, tumor_bam, normal_bam, univ_options, strelka_options, split=True):
"""
Run the strelka subgraph on the DNA bams. Optionally split the results into per-chromosome
vcfs.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict strelka_options: Options specific to strelka
:param bool split: Should the results be split into perchrom vcfs?
:return: Either the fsID to the genome-level vcf or a dict of results from running strelka
on every chromosome
perchrom_strelka:
|- 'chr1':
| |-'snvs': fsID
| +-'indels': fsID
|- 'chr2':
| |-'snvs': fsID
| +-'indels': fsID
|-...
|
+- 'chrM':
|-'snvs': fsID
+-'indels': fsID
:rtype: toil.fileStore.FileID|dict
"""
if strelka_options['chromosomes']:
chromosomes = strelka_options['chromosomes']
else:
chromosomes = sample_chromosomes(job, strelka_options['genome_fai'])
num_cores = min(len(chromosomes), univ_options['max_cores'])
strelka = job.wrapJobFn(run_strelka_full, tumor_bam, normal_bam, univ_options,
strelka_options,
disk=PromisedRequirement(strelka_disk,
tumor_bam['tumor_dna_fix_pg_sorted.bam'],
normal_bam['normal_dna_fix_pg_sorted.bam'],
strelka_options['genome_fasta']),
memory='6G',
cores=num_cores)
job.addChild(strelka)
if split:
unmerge_strelka = job.wrapJobFn(wrap_unmerge, strelka.rv(), chromosomes, strelka_options,
univ_options).encapsulate()
strelka.addChild(unmerge_strelka)
return unmerge_strelka.rv()
else:
return strelka.rv() | [
"def",
"run_strelka",
"(",
"job",
",",
"tumor_bam",
",",
"normal_bam",
",",
"univ_options",
",",
"strelka_options",
",",
"split",
"=",
"True",
")",
":",
"if",
"strelka_options",
"[",
"'chromosomes'",
"]",
":",
"chromosomes",
"=",
"strelka_options",
"[",
"'chromosomes'",
"]",
"else",
":",
"chromosomes",
"=",
"sample_chromosomes",
"(",
"job",
",",
"strelka_options",
"[",
"'genome_fai'",
"]",
")",
"num_cores",
"=",
"min",
"(",
"len",
"(",
"chromosomes",
")",
",",
"univ_options",
"[",
"'max_cores'",
"]",
")",
"strelka",
"=",
"job",
".",
"wrapJobFn",
"(",
"run_strelka_full",
",",
"tumor_bam",
",",
"normal_bam",
",",
"univ_options",
",",
"strelka_options",
",",
"disk",
"=",
"PromisedRequirement",
"(",
"strelka_disk",
",",
"tumor_bam",
"[",
"'tumor_dna_fix_pg_sorted.bam'",
"]",
",",
"normal_bam",
"[",
"'normal_dna_fix_pg_sorted.bam'",
"]",
",",
"strelka_options",
"[",
"'genome_fasta'",
"]",
")",
",",
"memory",
"=",
"'6G'",
",",
"cores",
"=",
"num_cores",
")",
"job",
".",
"addChild",
"(",
"strelka",
")",
"if",
"split",
":",
"unmerge_strelka",
"=",
"job",
".",
"wrapJobFn",
"(",
"wrap_unmerge",
",",
"strelka",
".",
"rv",
"(",
")",
",",
"chromosomes",
",",
"strelka_options",
",",
"univ_options",
")",
".",
"encapsulate",
"(",
")",
"strelka",
".",
"addChild",
"(",
"unmerge_strelka",
")",
"return",
"unmerge_strelka",
".",
"rv",
"(",
")",
"else",
":",
"return",
"strelka",
".",
"rv",
"(",
")"
] | Run the strelka subgraph on the DNA bams. Optionally split the results into per-chromosome
vcfs.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict strelka_options: Options specific to strelka
:param bool split: Should the results be split into perchrom vcfs?
:return: Either the fsID to the genome-level vcf or a dict of results from running strelka
on every chromosome
perchrom_strelka:
|- 'chr1':
| |-'snvs': fsID
| +-'indels': fsID
|- 'chr2':
| |-'snvs': fsID
| +-'indels': fsID
|-...
|
+- 'chrM':
|-'snvs': fsID
+-'indels': fsID
:rtype: toil.fileStore.FileID|dict | [
"Run",
"the",
"strelka",
"subgraph",
"on",
"the",
"DNA",
"bams",
".",
"Optionally",
"split",
"the",
"results",
"into",
"per",
"-",
"chromosome",
"vcfs",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/strelka.py#L56-L102 | train |
BD2KGenomics/protect | src/protect/mutation_calling/strelka.py | run_strelka_full | def run_strelka_full(job, tumor_bam, normal_bam, univ_options, strelka_options):
"""
Run strelka on the DNA bams.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict strelka_options: Options specific to strelka
:return: Dict of fsIDs snv and indel prediction files
output_dict:
|-'snvs': fsID
+-'indels': fsID
:rtype: dict
"""
work_dir = os.getcwd()
input_files = {
'tumor.bam': tumor_bam['tumor_dna_fix_pg_sorted.bam'],
'tumor.bam.bai': tumor_bam['tumor_dna_fix_pg_sorted.bam.bai'],
'normal.bam': normal_bam['normal_dna_fix_pg_sorted.bam'],
'normal.bam.bai': normal_bam['normal_dna_fix_pg_sorted.bam.bai'],
'genome.fa.tar.gz': strelka_options['genome_fasta'],
'genome.fa.fai.tar.gz': strelka_options['genome_fai'],
'config.ini.tar.gz': strelka_options['config_file']
}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
for key in ('genome.fa', 'genome.fa.fai', 'config.ini'):
input_files[key] = untargz(input_files[key + '.tar.gz'], work_dir)
input_files = {key: docker_path(path) for key, path in input_files.items()}
parameters = [input_files['config.ini'],
input_files['tumor.bam'],
input_files['normal.bam'],
input_files['genome.fa'],
str(job.cores)
]
docker_call(tool='strelka', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], tool_version=strelka_options['version'])
output_dict = {}
for mutation_type in ['snvs', 'indels']:
output_dict[mutation_type] = job.fileStore.writeGlobalFile(os.path.join(
work_dir, 'strelka_out', 'results', 'passed.somatic.' + mutation_type + '.vcf'))
job.fileStore.logToMaster('Ran strelka on %s successfully' % univ_options['patient'])
return output_dict | python | def run_strelka_full(job, tumor_bam, normal_bam, univ_options, strelka_options):
"""
Run strelka on the DNA bams.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict strelka_options: Options specific to strelka
:return: Dict of fsIDs snv and indel prediction files
output_dict:
|-'snvs': fsID
+-'indels': fsID
:rtype: dict
"""
work_dir = os.getcwd()
input_files = {
'tumor.bam': tumor_bam['tumor_dna_fix_pg_sorted.bam'],
'tumor.bam.bai': tumor_bam['tumor_dna_fix_pg_sorted.bam.bai'],
'normal.bam': normal_bam['normal_dna_fix_pg_sorted.bam'],
'normal.bam.bai': normal_bam['normal_dna_fix_pg_sorted.bam.bai'],
'genome.fa.tar.gz': strelka_options['genome_fasta'],
'genome.fa.fai.tar.gz': strelka_options['genome_fai'],
'config.ini.tar.gz': strelka_options['config_file']
}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
for key in ('genome.fa', 'genome.fa.fai', 'config.ini'):
input_files[key] = untargz(input_files[key + '.tar.gz'], work_dir)
input_files = {key: docker_path(path) for key, path in input_files.items()}
parameters = [input_files['config.ini'],
input_files['tumor.bam'],
input_files['normal.bam'],
input_files['genome.fa'],
str(job.cores)
]
docker_call(tool='strelka', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], tool_version=strelka_options['version'])
output_dict = {}
for mutation_type in ['snvs', 'indels']:
output_dict[mutation_type] = job.fileStore.writeGlobalFile(os.path.join(
work_dir, 'strelka_out', 'results', 'passed.somatic.' + mutation_type + '.vcf'))
job.fileStore.logToMaster('Ran strelka on %s successfully' % univ_options['patient'])
return output_dict | [
"def",
"run_strelka_full",
"(",
"job",
",",
"tumor_bam",
",",
"normal_bam",
",",
"univ_options",
",",
"strelka_options",
")",
":",
"work_dir",
"=",
"os",
".",
"getcwd",
"(",
")",
"input_files",
"=",
"{",
"'tumor.bam'",
":",
"tumor_bam",
"[",
"'tumor_dna_fix_pg_sorted.bam'",
"]",
",",
"'tumor.bam.bai'",
":",
"tumor_bam",
"[",
"'tumor_dna_fix_pg_sorted.bam.bai'",
"]",
",",
"'normal.bam'",
":",
"normal_bam",
"[",
"'normal_dna_fix_pg_sorted.bam'",
"]",
",",
"'normal.bam.bai'",
":",
"normal_bam",
"[",
"'normal_dna_fix_pg_sorted.bam.bai'",
"]",
",",
"'genome.fa.tar.gz'",
":",
"strelka_options",
"[",
"'genome_fasta'",
"]",
",",
"'genome.fa.fai.tar.gz'",
":",
"strelka_options",
"[",
"'genome_fai'",
"]",
",",
"'config.ini.tar.gz'",
":",
"strelka_options",
"[",
"'config_file'",
"]",
"}",
"input_files",
"=",
"get_files_from_filestore",
"(",
"job",
",",
"input_files",
",",
"work_dir",
",",
"docker",
"=",
"False",
")",
"for",
"key",
"in",
"(",
"'genome.fa'",
",",
"'genome.fa.fai'",
",",
"'config.ini'",
")",
":",
"input_files",
"[",
"key",
"]",
"=",
"untargz",
"(",
"input_files",
"[",
"key",
"+",
"'.tar.gz'",
"]",
",",
"work_dir",
")",
"input_files",
"=",
"{",
"key",
":",
"docker_path",
"(",
"path",
")",
"for",
"key",
",",
"path",
"in",
"input_files",
".",
"items",
"(",
")",
"}",
"parameters",
"=",
"[",
"input_files",
"[",
"'config.ini'",
"]",
",",
"input_files",
"[",
"'tumor.bam'",
"]",
",",
"input_files",
"[",
"'normal.bam'",
"]",
",",
"input_files",
"[",
"'genome.fa'",
"]",
",",
"str",
"(",
"job",
".",
"cores",
")",
"]",
"docker_call",
"(",
"tool",
"=",
"'strelka'",
",",
"tool_parameters",
"=",
"parameters",
",",
"work_dir",
"=",
"work_dir",
",",
"dockerhub",
"=",
"univ_options",
"[",
"'dockerhub'",
"]",
",",
"tool_version",
"=",
"strelka_options",
"[",
"'version'",
"]",
")",
"output_dict",
"=",
"{",
"}",
"for",
"mutation_type",
"in",
"[",
"'snvs'",
",",
"'indels'",
"]",
":",
"output_dict",
"[",
"mutation_type",
"]",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'strelka_out'",
",",
"'results'",
",",
"'passed.somatic.'",
"+",
"mutation_type",
"+",
"'.vcf'",
")",
")",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Ran strelka on %s successfully'",
"%",
"univ_options",
"[",
"'patient'",
"]",
")",
"return",
"output_dict"
] | Run strelka on the DNA bams.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict strelka_options: Options specific to strelka
:return: Dict of fsIDs snv and indel prediction files
output_dict:
|-'snvs': fsID
+-'indels': fsID
:rtype: dict | [
"Run",
"strelka",
"on",
"the",
"DNA",
"bams",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/strelka.py#L105-L148 | train |
BD2KGenomics/protect | src/protect/mutation_calling/strelka.py | wrap_unmerge | def wrap_unmerge(job, strelka_out, chromosomes, strelka_options, univ_options):
"""
A wwrapper to unmerge the strelka snvs and indels
:param dict strelka_out: Results from run_strelka
:param list chromosomes: List of chromosomes to retain
:param dict strelka_options: Options specific to strelka
:param dict univ_options: Dict of universal options used by almost all tools
:return: Dict of dicts containing the fsIDs for the per-chromosome snv and indel calls
output:
|- 'snvs':
| |- 'chr1': fsID
| |- 'chr2': fsID
| |- ...
| +- 'chrM': fsID
+- 'indels':
|- 'chr1': fsID
|- 'chr2': fsID
|- ...
+- 'chrM': fsID
:rtype: dict
"""
return {'snvs': job.addChildJobFn(unmerge, strelka_out['snvs'], 'strelka/snv', chromosomes,
strelka_options, univ_options).rv(),
'indels': job.addChildJobFn(unmerge, strelka_out['indels'], 'strelka/indel',
chromosomes, strelka_options, univ_options).rv()} | python | def wrap_unmerge(job, strelka_out, chromosomes, strelka_options, univ_options):
"""
A wwrapper to unmerge the strelka snvs and indels
:param dict strelka_out: Results from run_strelka
:param list chromosomes: List of chromosomes to retain
:param dict strelka_options: Options specific to strelka
:param dict univ_options: Dict of universal options used by almost all tools
:return: Dict of dicts containing the fsIDs for the per-chromosome snv and indel calls
output:
|- 'snvs':
| |- 'chr1': fsID
| |- 'chr2': fsID
| |- ...
| +- 'chrM': fsID
+- 'indels':
|- 'chr1': fsID
|- 'chr2': fsID
|- ...
+- 'chrM': fsID
:rtype: dict
"""
return {'snvs': job.addChildJobFn(unmerge, strelka_out['snvs'], 'strelka/snv', chromosomes,
strelka_options, univ_options).rv(),
'indels': job.addChildJobFn(unmerge, strelka_out['indels'], 'strelka/indel',
chromosomes, strelka_options, univ_options).rv()} | [
"def",
"wrap_unmerge",
"(",
"job",
",",
"strelka_out",
",",
"chromosomes",
",",
"strelka_options",
",",
"univ_options",
")",
":",
"return",
"{",
"'snvs'",
":",
"job",
".",
"addChildJobFn",
"(",
"unmerge",
",",
"strelka_out",
"[",
"'snvs'",
"]",
",",
"'strelka/snv'",
",",
"chromosomes",
",",
"strelka_options",
",",
"univ_options",
")",
".",
"rv",
"(",
")",
",",
"'indels'",
":",
"job",
".",
"addChildJobFn",
"(",
"unmerge",
",",
"strelka_out",
"[",
"'indels'",
"]",
",",
"'strelka/indel'",
",",
"chromosomes",
",",
"strelka_options",
",",
"univ_options",
")",
".",
"rv",
"(",
")",
"}"
] | A wwrapper to unmerge the strelka snvs and indels
:param dict strelka_out: Results from run_strelka
:param list chromosomes: List of chromosomes to retain
:param dict strelka_options: Options specific to strelka
:param dict univ_options: Dict of universal options used by almost all tools
:return: Dict of dicts containing the fsIDs for the per-chromosome snv and indel calls
output:
|- 'snvs':
| |- 'chr1': fsID
| |- 'chr2': fsID
| |- ...
| +- 'chrM': fsID
+- 'indels':
|- 'chr1': fsID
|- 'chr2': fsID
|- ...
+- 'chrM': fsID
:rtype: dict | [
"A",
"wwrapper",
"to",
"unmerge",
"the",
"strelka",
"snvs",
"and",
"indels"
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/strelka.py#L165-L190 | train |
budacom/trading-bots | trading_bots/utils.py | get_iso_time_str | def get_iso_time_str(timestamp: Union[int, float, str, datetime]=None) -> str:
"""Get the ISO time string from a timestamp or date obj. Returns current time str if no timestamp is passed"""
if isinstance(timestamp, (int, float)):
maya_dt = maya.MayaDT(timestamp)
elif isinstance(timestamp, str):
maya_dt = maya.when(timestamp)
elif timestamp is None:
maya_dt = maya.now()
else:
raise ValueError(f'`{type(timestamp)}` is not supported')
return maya_dt.iso8601() | python | def get_iso_time_str(timestamp: Union[int, float, str, datetime]=None) -> str:
"""Get the ISO time string from a timestamp or date obj. Returns current time str if no timestamp is passed"""
if isinstance(timestamp, (int, float)):
maya_dt = maya.MayaDT(timestamp)
elif isinstance(timestamp, str):
maya_dt = maya.when(timestamp)
elif timestamp is None:
maya_dt = maya.now()
else:
raise ValueError(f'`{type(timestamp)}` is not supported')
return maya_dt.iso8601() | [
"def",
"get_iso_time_str",
"(",
"timestamp",
":",
"Union",
"[",
"int",
",",
"float",
",",
"str",
",",
"datetime",
"]",
"=",
"None",
")",
"->",
"str",
":",
"if",
"isinstance",
"(",
"timestamp",
",",
"(",
"int",
",",
"float",
")",
")",
":",
"maya_dt",
"=",
"maya",
".",
"MayaDT",
"(",
"timestamp",
")",
"elif",
"isinstance",
"(",
"timestamp",
",",
"str",
")",
":",
"maya_dt",
"=",
"maya",
".",
"when",
"(",
"timestamp",
")",
"elif",
"timestamp",
"is",
"None",
":",
"maya_dt",
"=",
"maya",
".",
"now",
"(",
")",
"else",
":",
"raise",
"ValueError",
"(",
"f'`{type(timestamp)}` is not supported'",
")",
"return",
"maya_dt",
".",
"iso8601",
"(",
")"
] | Get the ISO time string from a timestamp or date obj. Returns current time str if no timestamp is passed | [
"Get",
"the",
"ISO",
"time",
"string",
"from",
"a",
"timestamp",
"or",
"date",
"obj",
".",
"Returns",
"current",
"time",
"str",
"if",
"no",
"timestamp",
"is",
"passed"
] | 8cb68bb8d0b5f822108db1cc5dae336e3d3c3452 | https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/utils.py#L27-L37 | train |
budacom/trading-bots | trading_bots/utils.py | truncate | def truncate(value: Decimal, n_digits: int) -> Decimal:
"""Truncates a value to a number of decimals places"""
return Decimal(math.trunc(value * (10 ** n_digits))) / (10 ** n_digits) | python | def truncate(value: Decimal, n_digits: int) -> Decimal:
"""Truncates a value to a number of decimals places"""
return Decimal(math.trunc(value * (10 ** n_digits))) / (10 ** n_digits) | [
"def",
"truncate",
"(",
"value",
":",
"Decimal",
",",
"n_digits",
":",
"int",
")",
"->",
"Decimal",
":",
"return",
"Decimal",
"(",
"math",
".",
"trunc",
"(",
"value",
"*",
"(",
"10",
"**",
"n_digits",
")",
")",
")",
"/",
"(",
"10",
"**",
"n_digits",
")"
] | Truncates a value to a number of decimals places | [
"Truncates",
"a",
"value",
"to",
"a",
"number",
"of",
"decimals",
"places"
] | 8cb68bb8d0b5f822108db1cc5dae336e3d3c3452 | https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/utils.py#L40-L42 | train |
budacom/trading-bots | trading_bots/utils.py | truncate_to | def truncate_to(value: Decimal, currency: str) -> Decimal:
"""Truncates a value to the number of decimals corresponding to the currency"""
decimal_places = DECIMALS.get(currency.upper(), 2)
return truncate(value, decimal_places) | python | def truncate_to(value: Decimal, currency: str) -> Decimal:
"""Truncates a value to the number of decimals corresponding to the currency"""
decimal_places = DECIMALS.get(currency.upper(), 2)
return truncate(value, decimal_places) | [
"def",
"truncate_to",
"(",
"value",
":",
"Decimal",
",",
"currency",
":",
"str",
")",
"->",
"Decimal",
":",
"decimal_places",
"=",
"DECIMALS",
".",
"get",
"(",
"currency",
".",
"upper",
"(",
")",
",",
"2",
")",
"return",
"truncate",
"(",
"value",
",",
"decimal_places",
")"
] | Truncates a value to the number of decimals corresponding to the currency | [
"Truncates",
"a",
"value",
"to",
"the",
"number",
"of",
"decimals",
"corresponding",
"to",
"the",
"currency"
] | 8cb68bb8d0b5f822108db1cc5dae336e3d3c3452 | https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/utils.py#L45-L48 | train |
budacom/trading-bots | trading_bots/utils.py | truncate_money | def truncate_money(money: Money) -> Money:
"""Truncates money amount to the number of decimals corresponding to the currency"""
amount = truncate_to(money.amount, money.currency)
return Money(amount, money.currency) | python | def truncate_money(money: Money) -> Money:
"""Truncates money amount to the number of decimals corresponding to the currency"""
amount = truncate_to(money.amount, money.currency)
return Money(amount, money.currency) | [
"def",
"truncate_money",
"(",
"money",
":",
"Money",
")",
"->",
"Money",
":",
"amount",
"=",
"truncate_to",
"(",
"money",
".",
"amount",
",",
"money",
".",
"currency",
")",
"return",
"Money",
"(",
"amount",
",",
"money",
".",
"currency",
")"
] | Truncates money amount to the number of decimals corresponding to the currency | [
"Truncates",
"money",
"amount",
"to",
"the",
"number",
"of",
"decimals",
"corresponding",
"to",
"the",
"currency"
] | 8cb68bb8d0b5f822108db1cc5dae336e3d3c3452 | https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/utils.py#L51-L54 | train |
budacom/trading-bots | trading_bots/utils.py | spread_value | def spread_value(value: Decimal, spread_p: Decimal) -> Tuple[Decimal, Decimal]:
"""Returns a lower and upper value separated by a spread percentage"""
upper = value * (1 + spread_p)
lower = value / (1 + spread_p)
return lower, upper | python | def spread_value(value: Decimal, spread_p: Decimal) -> Tuple[Decimal, Decimal]:
"""Returns a lower and upper value separated by a spread percentage"""
upper = value * (1 + spread_p)
lower = value / (1 + spread_p)
return lower, upper | [
"def",
"spread_value",
"(",
"value",
":",
"Decimal",
",",
"spread_p",
":",
"Decimal",
")",
"->",
"Tuple",
"[",
"Decimal",
",",
"Decimal",
"]",
":",
"upper",
"=",
"value",
"*",
"(",
"1",
"+",
"spread_p",
")",
"lower",
"=",
"value",
"/",
"(",
"1",
"+",
"spread_p",
")",
"return",
"lower",
",",
"upper"
] | Returns a lower and upper value separated by a spread percentage | [
"Returns",
"a",
"lower",
"and",
"upper",
"value",
"separated",
"by",
"a",
"spread",
"percentage"
] | 8cb68bb8d0b5f822108db1cc5dae336e3d3c3452 | https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/utils.py#L57-L61 | train |
budacom/trading-bots | trading_bots/utils.py | spread_money | def spread_money(money: Money, spread_p: Decimal) -> Tuple[Money, Money]:
"""Returns a lower and upper money amount separated by a spread percentage"""
upper, lower = spread_value(money.amount, spread_p)
return Money(upper, money.currency), Money(lower, money.currency) | python | def spread_money(money: Money, spread_p: Decimal) -> Tuple[Money, Money]:
"""Returns a lower and upper money amount separated by a spread percentage"""
upper, lower = spread_value(money.amount, spread_p)
return Money(upper, money.currency), Money(lower, money.currency) | [
"def",
"spread_money",
"(",
"money",
":",
"Money",
",",
"spread_p",
":",
"Decimal",
")",
"->",
"Tuple",
"[",
"Money",
",",
"Money",
"]",
":",
"upper",
",",
"lower",
"=",
"spread_value",
"(",
"money",
".",
"amount",
",",
"spread_p",
")",
"return",
"Money",
"(",
"upper",
",",
"money",
".",
"currency",
")",
",",
"Money",
"(",
"lower",
",",
"money",
".",
"currency",
")"
] | Returns a lower and upper money amount separated by a spread percentage | [
"Returns",
"a",
"lower",
"and",
"upper",
"money",
"amount",
"separated",
"by",
"a",
"spread",
"percentage"
] | 8cb68bb8d0b5f822108db1cc5dae336e3d3c3452 | https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/utils.py#L64-L67 | train |
nepalicalendar/nepalicalendar-py | nepalicalendar/functions.py | check_valid_ad_range | def check_valid_ad_range(date):
"""
Checks if the english date is in valid range for conversion
"""
if date < values.START_EN_DATE or date > values.END_EN_DATE:
raise ValueError("Date out of range")
return True | python | def check_valid_ad_range(date):
"""
Checks if the english date is in valid range for conversion
"""
if date < values.START_EN_DATE or date > values.END_EN_DATE:
raise ValueError("Date out of range")
return True | [
"def",
"check_valid_ad_range",
"(",
"date",
")",
":",
"if",
"date",
"<",
"values",
".",
"START_EN_DATE",
"or",
"date",
">",
"values",
".",
"END_EN_DATE",
":",
"raise",
"ValueError",
"(",
"\"Date out of range\"",
")",
"return",
"True"
] | Checks if the english date is in valid range for conversion | [
"Checks",
"if",
"the",
"english",
"date",
"is",
"in",
"valid",
"range",
"for",
"conversion"
] | a589c28b8e085049f30a7287753476b59eca6f50 | https://github.com/nepalicalendar/nepalicalendar-py/blob/a589c28b8e085049f30a7287753476b59eca6f50/nepalicalendar/functions.py#L8-L14 | train |
nepalicalendar/nepalicalendar-py | nepalicalendar/functions.py | check_valid_bs_range | def check_valid_bs_range(date):
"""
Checks if the nepali date is in valid range for conversion
"""
ERR_MSG = "%s out of range" % str(date)
if date.year < values.START_NP_YEAR or date.year > values.END_NP_YEAR:
raise ValueError(ERR_MSG)
if date.month < 1 or date.month > 12:
raise ValueError(ERR_MSG)
if date.day < 1 or date.day > values.NEPALI_MONTH_DAY_DATA[date.year][date.month - 1]:
raise ValueError(ERR_MSG)
return True | python | def check_valid_bs_range(date):
"""
Checks if the nepali date is in valid range for conversion
"""
ERR_MSG = "%s out of range" % str(date)
if date.year < values.START_NP_YEAR or date.year > values.END_NP_YEAR:
raise ValueError(ERR_MSG)
if date.month < 1 or date.month > 12:
raise ValueError(ERR_MSG)
if date.day < 1 or date.day > values.NEPALI_MONTH_DAY_DATA[date.year][date.month - 1]:
raise ValueError(ERR_MSG)
return True | [
"def",
"check_valid_bs_range",
"(",
"date",
")",
":",
"ERR_MSG",
"=",
"\"%s out of range\"",
"%",
"str",
"(",
"date",
")",
"if",
"date",
".",
"year",
"<",
"values",
".",
"START_NP_YEAR",
"or",
"date",
".",
"year",
">",
"values",
".",
"END_NP_YEAR",
":",
"raise",
"ValueError",
"(",
"ERR_MSG",
")",
"if",
"date",
".",
"month",
"<",
"1",
"or",
"date",
".",
"month",
">",
"12",
":",
"raise",
"ValueError",
"(",
"ERR_MSG",
")",
"if",
"date",
".",
"day",
"<",
"1",
"or",
"date",
".",
"day",
">",
"values",
".",
"NEPALI_MONTH_DAY_DATA",
"[",
"date",
".",
"year",
"]",
"[",
"date",
".",
"month",
"-",
"1",
"]",
":",
"raise",
"ValueError",
"(",
"ERR_MSG",
")",
"return",
"True"
] | Checks if the nepali date is in valid range for conversion | [
"Checks",
"if",
"the",
"nepali",
"date",
"is",
"in",
"valid",
"range",
"for",
"conversion"
] | a589c28b8e085049f30a7287753476b59eca6f50 | https://github.com/nepalicalendar/nepalicalendar-py/blob/a589c28b8e085049f30a7287753476b59eca6f50/nepalicalendar/functions.py#L17-L29 | train |
nepalicalendar/nepalicalendar-py | nepalicalendar/functions.py | nepali_number | def nepali_number(number):
"""
Convert a number to nepali
"""
nepnum = ""
for n in str(number):
nepnum += values.NEPDIGITS[int(n)]
return nepnum | python | def nepali_number(number):
"""
Convert a number to nepali
"""
nepnum = ""
for n in str(number):
nepnum += values.NEPDIGITS[int(n)]
return nepnum | [
"def",
"nepali_number",
"(",
"number",
")",
":",
"nepnum",
"=",
"\"\"",
"for",
"n",
"in",
"str",
"(",
"number",
")",
":",
"nepnum",
"+=",
"values",
".",
"NEPDIGITS",
"[",
"int",
"(",
"n",
")",
"]",
"return",
"nepnum"
] | Convert a number to nepali | [
"Convert",
"a",
"number",
"to",
"nepali"
] | a589c28b8e085049f30a7287753476b59eca6f50 | https://github.com/nepalicalendar/nepalicalendar-py/blob/a589c28b8e085049f30a7287753476b59eca6f50/nepalicalendar/functions.py#L31-L38 | train |
bkg/django-spillway | spillway/serializers.py | GeoModelSerializer.get_fields | def get_fields(self):
"""Returns a fields dict for this serializer with a 'geometry' field
added.
"""
fields = super(GeoModelSerializer, self).get_fields()
# Set the geometry field name when it's undeclared.
if not self.Meta.geom_field:
for name, field in fields.items():
if isinstance(field, GeometryField):
self.Meta.geom_field = name
break
return fields | python | def get_fields(self):
"""Returns a fields dict for this serializer with a 'geometry' field
added.
"""
fields = super(GeoModelSerializer, self).get_fields()
# Set the geometry field name when it's undeclared.
if not self.Meta.geom_field:
for name, field in fields.items():
if isinstance(field, GeometryField):
self.Meta.geom_field = name
break
return fields | [
"def",
"get_fields",
"(",
"self",
")",
":",
"fields",
"=",
"super",
"(",
"GeoModelSerializer",
",",
"self",
")",
".",
"get_fields",
"(",
")",
"if",
"not",
"self",
".",
"Meta",
".",
"geom_field",
":",
"for",
"name",
",",
"field",
"in",
"fields",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"field",
",",
"GeometryField",
")",
":",
"self",
".",
"Meta",
".",
"geom_field",
"=",
"name",
"break",
"return",
"fields"
] | Returns a fields dict for this serializer with a 'geometry' field
added. | [
"Returns",
"a",
"fields",
"dict",
"for",
"this",
"serializer",
"with",
"a",
"geometry",
"field",
"added",
"."
] | c488a62642430b005f1e0d4a19e160d8d5964b67 | https://github.com/bkg/django-spillway/blob/c488a62642430b005f1e0d4a19e160d8d5964b67/spillway/serializers.py#L31-L42 | train |
BD2KGenomics/protect | src/protect/mutation_calling/muse.py | run_muse_with_merge | def run_muse_with_merge(job, tumor_bam, normal_bam, univ_options, muse_options):
"""
A wrapper for the the entire MuSE sub-graph.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict muse_options: Options specific to MuSE
:return: fsID to the merged MuSE calls
:rtype: toil.fileStore.FileID
"""
spawn = job.wrapJobFn(run_muse, tumor_bam, normal_bam, univ_options, muse_options,
disk='100M').encapsulate()
merge = job.wrapJobFn(merge_perchrom_vcfs, spawn.rv(), disk='100M')
job.addChild(spawn)
spawn.addChild(merge)
return merge.rv() | python | def run_muse_with_merge(job, tumor_bam, normal_bam, univ_options, muse_options):
"""
A wrapper for the the entire MuSE sub-graph.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict muse_options: Options specific to MuSE
:return: fsID to the merged MuSE calls
:rtype: toil.fileStore.FileID
"""
spawn = job.wrapJobFn(run_muse, tumor_bam, normal_bam, univ_options, muse_options,
disk='100M').encapsulate()
merge = job.wrapJobFn(merge_perchrom_vcfs, spawn.rv(), disk='100M')
job.addChild(spawn)
spawn.addChild(merge)
return merge.rv() | [
"def",
"run_muse_with_merge",
"(",
"job",
",",
"tumor_bam",
",",
"normal_bam",
",",
"univ_options",
",",
"muse_options",
")",
":",
"spawn",
"=",
"job",
".",
"wrapJobFn",
"(",
"run_muse",
",",
"tumor_bam",
",",
"normal_bam",
",",
"univ_options",
",",
"muse_options",
",",
"disk",
"=",
"'100M'",
")",
".",
"encapsulate",
"(",
")",
"merge",
"=",
"job",
".",
"wrapJobFn",
"(",
"merge_perchrom_vcfs",
",",
"spawn",
".",
"rv",
"(",
")",
",",
"disk",
"=",
"'100M'",
")",
"job",
".",
"addChild",
"(",
"spawn",
")",
"spawn",
".",
"addChild",
"(",
"merge",
")",
"return",
"merge",
".",
"rv",
"(",
")"
] | A wrapper for the the entire MuSE sub-graph.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict muse_options: Options specific to MuSE
:return: fsID to the merged MuSE calls
:rtype: toil.fileStore.FileID | [
"A",
"wrapper",
"for",
"the",
"the",
"entire",
"MuSE",
"sub",
"-",
"graph",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/muse.py#L44-L60 | train |
BD2KGenomics/protect | src/protect/mutation_calling/muse.py | run_muse | def run_muse(job, tumor_bam, normal_bam, univ_options, muse_options):
"""
Spawn a MuSE job for each chromosome on the DNA bams.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict muse_options: Options specific to MuSE
:return: Dict of results from running MuSE on every chromosome
perchrom_muse:
|- 'chr1': fsID
|- 'chr2' fsID
|
|-...
|
+- 'chrM': fsID
:rtype: dict
"""
# Get a list of chromosomes to handle
if muse_options['chromosomes']:
chromosomes = muse_options['chromosomes']
else:
chromosomes = sample_chromosomes(job, muse_options['genome_fai'])
perchrom_muse = defaultdict()
for chrom in chromosomes:
call = job.addChildJobFn(run_muse_perchrom, tumor_bam, normal_bam, univ_options,
muse_options, chrom, disk=PromisedRequirement(
muse_disk,
tumor_bam['tumor_dna_fix_pg_sorted.bam'],
normal_bam['normal_dna_fix_pg_sorted.bam'],
muse_options['genome_fasta']),
memory='6G')
sump = call.addChildJobFn(run_muse_sump_perchrom, call.rv(), univ_options, muse_options,
chrom,
disk=PromisedRequirement(muse_sump_disk,
muse_options['dbsnp_vcf']),
memory='6G')
perchrom_muse[chrom] = sump.rv()
return perchrom_muse | python | def run_muse(job, tumor_bam, normal_bam, univ_options, muse_options):
"""
Spawn a MuSE job for each chromosome on the DNA bams.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict muse_options: Options specific to MuSE
:return: Dict of results from running MuSE on every chromosome
perchrom_muse:
|- 'chr1': fsID
|- 'chr2' fsID
|
|-...
|
+- 'chrM': fsID
:rtype: dict
"""
# Get a list of chromosomes to handle
if muse_options['chromosomes']:
chromosomes = muse_options['chromosomes']
else:
chromosomes = sample_chromosomes(job, muse_options['genome_fai'])
perchrom_muse = defaultdict()
for chrom in chromosomes:
call = job.addChildJobFn(run_muse_perchrom, tumor_bam, normal_bam, univ_options,
muse_options, chrom, disk=PromisedRequirement(
muse_disk,
tumor_bam['tumor_dna_fix_pg_sorted.bam'],
normal_bam['normal_dna_fix_pg_sorted.bam'],
muse_options['genome_fasta']),
memory='6G')
sump = call.addChildJobFn(run_muse_sump_perchrom, call.rv(), univ_options, muse_options,
chrom,
disk=PromisedRequirement(muse_sump_disk,
muse_options['dbsnp_vcf']),
memory='6G')
perchrom_muse[chrom] = sump.rv()
return perchrom_muse | [
"def",
"run_muse",
"(",
"job",
",",
"tumor_bam",
",",
"normal_bam",
",",
"univ_options",
",",
"muse_options",
")",
":",
"if",
"muse_options",
"[",
"'chromosomes'",
"]",
":",
"chromosomes",
"=",
"muse_options",
"[",
"'chromosomes'",
"]",
"else",
":",
"chromosomes",
"=",
"sample_chromosomes",
"(",
"job",
",",
"muse_options",
"[",
"'genome_fai'",
"]",
")",
"perchrom_muse",
"=",
"defaultdict",
"(",
")",
"for",
"chrom",
"in",
"chromosomes",
":",
"call",
"=",
"job",
".",
"addChildJobFn",
"(",
"run_muse_perchrom",
",",
"tumor_bam",
",",
"normal_bam",
",",
"univ_options",
",",
"muse_options",
",",
"chrom",
",",
"disk",
"=",
"PromisedRequirement",
"(",
"muse_disk",
",",
"tumor_bam",
"[",
"'tumor_dna_fix_pg_sorted.bam'",
"]",
",",
"normal_bam",
"[",
"'normal_dna_fix_pg_sorted.bam'",
"]",
",",
"muse_options",
"[",
"'genome_fasta'",
"]",
")",
",",
"memory",
"=",
"'6G'",
")",
"sump",
"=",
"call",
".",
"addChildJobFn",
"(",
"run_muse_sump_perchrom",
",",
"call",
".",
"rv",
"(",
")",
",",
"univ_options",
",",
"muse_options",
",",
"chrom",
",",
"disk",
"=",
"PromisedRequirement",
"(",
"muse_sump_disk",
",",
"muse_options",
"[",
"'dbsnp_vcf'",
"]",
")",
",",
"memory",
"=",
"'6G'",
")",
"perchrom_muse",
"[",
"chrom",
"]",
"=",
"sump",
".",
"rv",
"(",
")",
"return",
"perchrom_muse"
] | Spawn a MuSE job for each chromosome on the DNA bams.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict muse_options: Options specific to MuSE
:return: Dict of results from running MuSE on every chromosome
perchrom_muse:
|- 'chr1': fsID
|- 'chr2' fsID
|
|-...
|
+- 'chrM': fsID
:rtype: dict | [
"Spawn",
"a",
"MuSE",
"job",
"for",
"each",
"chromosome",
"on",
"the",
"DNA",
"bams",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/muse.py#L63-L101 | train |
BD2KGenomics/protect | src/protect/mutation_calling/muse.py | run_muse_perchrom | def run_muse_perchrom(job, tumor_bam, normal_bam, univ_options, muse_options, chrom):
"""
Run MuSE call on a single chromosome in the input bams.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict muse_options: Options specific to MuSE
:param str chrom: Chromosome to process
:return: fsID for the chromsome vcf
:rtype: toil.fileStore.FileID
"""
work_dir = os.getcwd()
input_files = {
'tumor.bam': tumor_bam['tumor_dna_fix_pg_sorted.bam'],
'tumor.bam.bai': tumor_bam['tumor_dna_fix_pg_sorted.bam.bai'],
'normal.bam': normal_bam['normal_dna_fix_pg_sorted.bam'],
'normal.bam.bai': normal_bam['normal_dna_fix_pg_sorted.bam.bai'],
'genome.fa.tar.gz': muse_options['genome_fasta'],
'genome.fa.fai.tar.gz': muse_options['genome_fai']}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
for key in ('genome.fa', 'genome.fa.fai'):
input_files[key] = untargz(input_files[key + '.tar.gz'], work_dir)
input_files = {key: docker_path(path) for key, path in input_files.items()}
output_prefix = os.path.join(work_dir, chrom)
parameters = ['call',
'-f', input_files['genome.fa'],
'-r', chrom,
'-O', docker_path(output_prefix),
input_files['tumor.bam'],
input_files['normal.bam']]
docker_call(tool='muse', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], tool_version=muse_options['version'])
outfile = job.fileStore.writeGlobalFile(''.join([output_prefix, '.MuSE.txt']))
job.fileStore.logToMaster('Ran MuSE on %s:%s successfully' % (univ_options['patient'], chrom))
return outfile | python | def run_muse_perchrom(job, tumor_bam, normal_bam, univ_options, muse_options, chrom):
"""
Run MuSE call on a single chromosome in the input bams.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict muse_options: Options specific to MuSE
:param str chrom: Chromosome to process
:return: fsID for the chromsome vcf
:rtype: toil.fileStore.FileID
"""
work_dir = os.getcwd()
input_files = {
'tumor.bam': tumor_bam['tumor_dna_fix_pg_sorted.bam'],
'tumor.bam.bai': tumor_bam['tumor_dna_fix_pg_sorted.bam.bai'],
'normal.bam': normal_bam['normal_dna_fix_pg_sorted.bam'],
'normal.bam.bai': normal_bam['normal_dna_fix_pg_sorted.bam.bai'],
'genome.fa.tar.gz': muse_options['genome_fasta'],
'genome.fa.fai.tar.gz': muse_options['genome_fai']}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
for key in ('genome.fa', 'genome.fa.fai'):
input_files[key] = untargz(input_files[key + '.tar.gz'], work_dir)
input_files = {key: docker_path(path) for key, path in input_files.items()}
output_prefix = os.path.join(work_dir, chrom)
parameters = ['call',
'-f', input_files['genome.fa'],
'-r', chrom,
'-O', docker_path(output_prefix),
input_files['tumor.bam'],
input_files['normal.bam']]
docker_call(tool='muse', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], tool_version=muse_options['version'])
outfile = job.fileStore.writeGlobalFile(''.join([output_prefix, '.MuSE.txt']))
job.fileStore.logToMaster('Ran MuSE on %s:%s successfully' % (univ_options['patient'], chrom))
return outfile | [
"def",
"run_muse_perchrom",
"(",
"job",
",",
"tumor_bam",
",",
"normal_bam",
",",
"univ_options",
",",
"muse_options",
",",
"chrom",
")",
":",
"work_dir",
"=",
"os",
".",
"getcwd",
"(",
")",
"input_files",
"=",
"{",
"'tumor.bam'",
":",
"tumor_bam",
"[",
"'tumor_dna_fix_pg_sorted.bam'",
"]",
",",
"'tumor.bam.bai'",
":",
"tumor_bam",
"[",
"'tumor_dna_fix_pg_sorted.bam.bai'",
"]",
",",
"'normal.bam'",
":",
"normal_bam",
"[",
"'normal_dna_fix_pg_sorted.bam'",
"]",
",",
"'normal.bam.bai'",
":",
"normal_bam",
"[",
"'normal_dna_fix_pg_sorted.bam.bai'",
"]",
",",
"'genome.fa.tar.gz'",
":",
"muse_options",
"[",
"'genome_fasta'",
"]",
",",
"'genome.fa.fai.tar.gz'",
":",
"muse_options",
"[",
"'genome_fai'",
"]",
"}",
"input_files",
"=",
"get_files_from_filestore",
"(",
"job",
",",
"input_files",
",",
"work_dir",
",",
"docker",
"=",
"False",
")",
"for",
"key",
"in",
"(",
"'genome.fa'",
",",
"'genome.fa.fai'",
")",
":",
"input_files",
"[",
"key",
"]",
"=",
"untargz",
"(",
"input_files",
"[",
"key",
"+",
"'.tar.gz'",
"]",
",",
"work_dir",
")",
"input_files",
"=",
"{",
"key",
":",
"docker_path",
"(",
"path",
")",
"for",
"key",
",",
"path",
"in",
"input_files",
".",
"items",
"(",
")",
"}",
"output_prefix",
"=",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"chrom",
")",
"parameters",
"=",
"[",
"'call'",
",",
"'-f'",
",",
"input_files",
"[",
"'genome.fa'",
"]",
",",
"'-r'",
",",
"chrom",
",",
"'-O'",
",",
"docker_path",
"(",
"output_prefix",
")",
",",
"input_files",
"[",
"'tumor.bam'",
"]",
",",
"input_files",
"[",
"'normal.bam'",
"]",
"]",
"docker_call",
"(",
"tool",
"=",
"'muse'",
",",
"tool_parameters",
"=",
"parameters",
",",
"work_dir",
"=",
"work_dir",
",",
"dockerhub",
"=",
"univ_options",
"[",
"'dockerhub'",
"]",
",",
"tool_version",
"=",
"muse_options",
"[",
"'version'",
"]",
")",
"outfile",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"''",
".",
"join",
"(",
"[",
"output_prefix",
",",
"'.MuSE.txt'",
"]",
")",
")",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Ran MuSE on %s:%s successfully'",
"%",
"(",
"univ_options",
"[",
"'patient'",
"]",
",",
"chrom",
")",
")",
"return",
"outfile"
] | Run MuSE call on a single chromosome in the input bams.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict muse_options: Options specific to MuSE
:param str chrom: Chromosome to process
:return: fsID for the chromsome vcf
:rtype: toil.fileStore.FileID | [
"Run",
"MuSE",
"call",
"on",
"a",
"single",
"chromosome",
"in",
"the",
"input",
"bams",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/muse.py#L104-L142 | train |
BD2KGenomics/protect | src/protect/mutation_calling/muse.py | run_muse_sump_perchrom | def run_muse_sump_perchrom(job, muse_output, univ_options, muse_options, chrom):
"""
Run MuSE sump on the MuSE call generated vcf.
:param toil.fileStore.FileID muse_output: vcf generated by MuSE call
:param dict univ_options: Dict of universal options used by almost all tools
:param dict muse_options: Options specific to MuSE
:param str chrom: Chromosome to process
:return: fsID for the chromsome vcf
:rtype: toil.fileStore.FileID
"""
work_dir = os.getcwd()
input_files = {
'MuSE.txt': muse_output,
'dbsnp_coding.vcf.gz': muse_options['dbsnp_vcf'],
'dbsnp_coding.vcf.gz.tbi.tmp': muse_options['dbsnp_tbi']}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
tbi = os.path.splitext(input_files['dbsnp_coding.vcf.gz.tbi.tmp'])[0]
time.sleep(2)
shutil.copy(input_files['dbsnp_coding.vcf.gz.tbi.tmp'], tbi)
os.chmod(tbi, 0777)
open(tbi, 'a').close()
input_files = {key: docker_path(path) for key, path in input_files.items()}
output_file = ''.join([work_dir, '/', chrom, '.vcf'])
parameters = ['sump',
'-I', input_files['MuSE.txt'],
'-O', docker_path(output_file),
'-D', input_files['dbsnp_coding.vcf.gz'],
'-E']
docker_call(tool='muse', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], tool_version=muse_options['version'])
outfile = job.fileStore.writeGlobalFile(output_file)
export_results(job, outfile, output_file, univ_options, subfolder='mutations/muse')
job.fileStore.logToMaster('Ran MuSE sump on %s:%s successfully'
% (univ_options['patient'], chrom))
return outfile | python | def run_muse_sump_perchrom(job, muse_output, univ_options, muse_options, chrom):
"""
Run MuSE sump on the MuSE call generated vcf.
:param toil.fileStore.FileID muse_output: vcf generated by MuSE call
:param dict univ_options: Dict of universal options used by almost all tools
:param dict muse_options: Options specific to MuSE
:param str chrom: Chromosome to process
:return: fsID for the chromsome vcf
:rtype: toil.fileStore.FileID
"""
work_dir = os.getcwd()
input_files = {
'MuSE.txt': muse_output,
'dbsnp_coding.vcf.gz': muse_options['dbsnp_vcf'],
'dbsnp_coding.vcf.gz.tbi.tmp': muse_options['dbsnp_tbi']}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
tbi = os.path.splitext(input_files['dbsnp_coding.vcf.gz.tbi.tmp'])[0]
time.sleep(2)
shutil.copy(input_files['dbsnp_coding.vcf.gz.tbi.tmp'], tbi)
os.chmod(tbi, 0777)
open(tbi, 'a').close()
input_files = {key: docker_path(path) for key, path in input_files.items()}
output_file = ''.join([work_dir, '/', chrom, '.vcf'])
parameters = ['sump',
'-I', input_files['MuSE.txt'],
'-O', docker_path(output_file),
'-D', input_files['dbsnp_coding.vcf.gz'],
'-E']
docker_call(tool='muse', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], tool_version=muse_options['version'])
outfile = job.fileStore.writeGlobalFile(output_file)
export_results(job, outfile, output_file, univ_options, subfolder='mutations/muse')
job.fileStore.logToMaster('Ran MuSE sump on %s:%s successfully'
% (univ_options['patient'], chrom))
return outfile | [
"def",
"run_muse_sump_perchrom",
"(",
"job",
",",
"muse_output",
",",
"univ_options",
",",
"muse_options",
",",
"chrom",
")",
":",
"work_dir",
"=",
"os",
".",
"getcwd",
"(",
")",
"input_files",
"=",
"{",
"'MuSE.txt'",
":",
"muse_output",
",",
"'dbsnp_coding.vcf.gz'",
":",
"muse_options",
"[",
"'dbsnp_vcf'",
"]",
",",
"'dbsnp_coding.vcf.gz.tbi.tmp'",
":",
"muse_options",
"[",
"'dbsnp_tbi'",
"]",
"}",
"input_files",
"=",
"get_files_from_filestore",
"(",
"job",
",",
"input_files",
",",
"work_dir",
",",
"docker",
"=",
"False",
")",
"tbi",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"input_files",
"[",
"'dbsnp_coding.vcf.gz.tbi.tmp'",
"]",
")",
"[",
"0",
"]",
"time",
".",
"sleep",
"(",
"2",
")",
"shutil",
".",
"copy",
"(",
"input_files",
"[",
"'dbsnp_coding.vcf.gz.tbi.tmp'",
"]",
",",
"tbi",
")",
"os",
".",
"chmod",
"(",
"tbi",
",",
"0777",
")",
"open",
"(",
"tbi",
",",
"'a'",
")",
".",
"close",
"(",
")",
"input_files",
"=",
"{",
"key",
":",
"docker_path",
"(",
"path",
")",
"for",
"key",
",",
"path",
"in",
"input_files",
".",
"items",
"(",
")",
"}",
"output_file",
"=",
"''",
".",
"join",
"(",
"[",
"work_dir",
",",
"'/'",
",",
"chrom",
",",
"'.vcf'",
"]",
")",
"parameters",
"=",
"[",
"'sump'",
",",
"'-I'",
",",
"input_files",
"[",
"'MuSE.txt'",
"]",
",",
"'-O'",
",",
"docker_path",
"(",
"output_file",
")",
",",
"'-D'",
",",
"input_files",
"[",
"'dbsnp_coding.vcf.gz'",
"]",
",",
"'-E'",
"]",
"docker_call",
"(",
"tool",
"=",
"'muse'",
",",
"tool_parameters",
"=",
"parameters",
",",
"work_dir",
"=",
"work_dir",
",",
"dockerhub",
"=",
"univ_options",
"[",
"'dockerhub'",
"]",
",",
"tool_version",
"=",
"muse_options",
"[",
"'version'",
"]",
")",
"outfile",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"output_file",
")",
"export_results",
"(",
"job",
",",
"outfile",
",",
"output_file",
",",
"univ_options",
",",
"subfolder",
"=",
"'mutations/muse'",
")",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Ran MuSE sump on %s:%s successfully'",
"%",
"(",
"univ_options",
"[",
"'patient'",
"]",
",",
"chrom",
")",
")",
"return",
"outfile"
] | Run MuSE sump on the MuSE call generated vcf.
:param toil.fileStore.FileID muse_output: vcf generated by MuSE call
:param dict univ_options: Dict of universal options used by almost all tools
:param dict muse_options: Options specific to MuSE
:param str chrom: Chromosome to process
:return: fsID for the chromsome vcf
:rtype: toil.fileStore.FileID | [
"Run",
"MuSE",
"sump",
"on",
"the",
"MuSE",
"call",
"generated",
"vcf",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/muse.py#L145-L182 | train |
bkg/django-spillway | spillway/models.py | AbstractRasterStore.linear | def linear(self, limits=None, k=5):
"""Returns an ndarray of linear breaks."""
start, stop = limits or (self.minval, self.maxval)
return np.linspace(start, stop, k) | python | def linear(self, limits=None, k=5):
"""Returns an ndarray of linear breaks."""
start, stop = limits or (self.minval, self.maxval)
return np.linspace(start, stop, k) | [
"def",
"linear",
"(",
"self",
",",
"limits",
"=",
"None",
",",
"k",
"=",
"5",
")",
":",
"start",
",",
"stop",
"=",
"limits",
"or",
"(",
"self",
".",
"minval",
",",
"self",
".",
"maxval",
")",
"return",
"np",
".",
"linspace",
"(",
"start",
",",
"stop",
",",
"k",
")"
] | Returns an ndarray of linear breaks. | [
"Returns",
"an",
"ndarray",
"of",
"linear",
"breaks",
"."
] | c488a62642430b005f1e0d4a19e160d8d5964b67 | https://github.com/bkg/django-spillway/blob/c488a62642430b005f1e0d4a19e160d8d5964b67/spillway/models.py#L81-L84 | train |
bkg/django-spillway | spillway/models.py | AbstractRasterStore.quantiles | def quantiles(self, k=5):
"""Returns an ndarray of quantile breaks."""
arr = self.array()
q = list(np.linspace(0, 100, k))
return np.percentile(arr.compressed(), q) | python | def quantiles(self, k=5):
"""Returns an ndarray of quantile breaks."""
arr = self.array()
q = list(np.linspace(0, 100, k))
return np.percentile(arr.compressed(), q) | [
"def",
"quantiles",
"(",
"self",
",",
"k",
"=",
"5",
")",
":",
"arr",
"=",
"self",
".",
"array",
"(",
")",
"q",
"=",
"list",
"(",
"np",
".",
"linspace",
"(",
"0",
",",
"100",
",",
"k",
")",
")",
"return",
"np",
".",
"percentile",
"(",
"arr",
".",
"compressed",
"(",
")",
",",
"q",
")"
] | Returns an ndarray of quantile breaks. | [
"Returns",
"an",
"ndarray",
"of",
"quantile",
"breaks",
"."
] | c488a62642430b005f1e0d4a19e160d8d5964b67 | https://github.com/bkg/django-spillway/blob/c488a62642430b005f1e0d4a19e160d8d5964b67/spillway/models.py#L86-L90 | train |
bkg/django-spillway | spillway/forms/fields.py | CommaSepFloatField.to_python | def to_python(self, value):
"""Normalize data to a list of floats."""
if not value:
return []
return map(super(CommaSepFloatField, self).to_python, value.split(',')) | python | def to_python(self, value):
"""Normalize data to a list of floats."""
if not value:
return []
return map(super(CommaSepFloatField, self).to_python, value.split(',')) | [
"def",
"to_python",
"(",
"self",
",",
"value",
")",
":",
"if",
"not",
"value",
":",
"return",
"[",
"]",
"return",
"map",
"(",
"super",
"(",
"CommaSepFloatField",
",",
"self",
")",
".",
"to_python",
",",
"value",
".",
"split",
"(",
"','",
")",
")"
] | Normalize data to a list of floats. | [
"Normalize",
"data",
"to",
"a",
"list",
"of",
"floats",
"."
] | c488a62642430b005f1e0d4a19e160d8d5964b67 | https://github.com/bkg/django-spillway/blob/c488a62642430b005f1e0d4a19e160d8d5964b67/spillway/forms/fields.py#L27-L31 | train |
bkg/django-spillway | spillway/forms/fields.py | CommaSepFloatField.run_validators | def run_validators(self, values):
"""Run validators for each item separately."""
for val in values:
super(CommaSepFloatField, self).run_validators(val) | python | def run_validators(self, values):
"""Run validators for each item separately."""
for val in values:
super(CommaSepFloatField, self).run_validators(val) | [
"def",
"run_validators",
"(",
"self",
",",
"values",
")",
":",
"for",
"val",
"in",
"values",
":",
"super",
"(",
"CommaSepFloatField",
",",
"self",
")",
".",
"run_validators",
"(",
"val",
")"
] | Run validators for each item separately. | [
"Run",
"validators",
"for",
"each",
"item",
"separately",
"."
] | c488a62642430b005f1e0d4a19e160d8d5964b67 | https://github.com/bkg/django-spillway/blob/c488a62642430b005f1e0d4a19e160d8d5964b67/spillway/forms/fields.py#L33-L36 | train |
bkg/django-spillway | spillway/forms/fields.py | BoundingBoxField.to_python | def to_python(self, value):
"""Returns a GEOS Polygon from bounding box values."""
value = super(BoundingBoxField, self).to_python(value)
try:
bbox = gdal.OGRGeometry.from_bbox(value).geos
except (ValueError, AttributeError):
return []
bbox.srid = self.srid
return bbox | python | def to_python(self, value):
"""Returns a GEOS Polygon from bounding box values."""
value = super(BoundingBoxField, self).to_python(value)
try:
bbox = gdal.OGRGeometry.from_bbox(value).geos
except (ValueError, AttributeError):
return []
bbox.srid = self.srid
return bbox | [
"def",
"to_python",
"(",
"self",
",",
"value",
")",
":",
"value",
"=",
"super",
"(",
"BoundingBoxField",
",",
"self",
")",
".",
"to_python",
"(",
"value",
")",
"try",
":",
"bbox",
"=",
"gdal",
".",
"OGRGeometry",
".",
"from_bbox",
"(",
"value",
")",
".",
"geos",
"except",
"(",
"ValueError",
",",
"AttributeError",
")",
":",
"return",
"[",
"]",
"bbox",
".",
"srid",
"=",
"self",
".",
"srid",
"return",
"bbox"
] | Returns a GEOS Polygon from bounding box values. | [
"Returns",
"a",
"GEOS",
"Polygon",
"from",
"bounding",
"box",
"values",
"."
] | c488a62642430b005f1e0d4a19e160d8d5964b67 | https://github.com/bkg/django-spillway/blob/c488a62642430b005f1e0d4a19e160d8d5964b67/spillway/forms/fields.py#L46-L54 | train |
BD2KGenomics/protect | src/protect/mutation_calling/mutect.py | run_mutect_with_merge | def run_mutect_with_merge(job, tumor_bam, normal_bam, univ_options, mutect_options):
"""
A wrapper for the the entire MuTect sub-graph.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict mutect_options: Options specific to MuTect
:return: fsID to the merged MuTect calls
:rtype: toil.fileStore.FileID
"""
spawn = job.wrapJobFn(run_mutect, tumor_bam, normal_bam, univ_options,
mutect_options).encapsulate()
merge = job.wrapJobFn(merge_perchrom_vcfs, spawn.rv())
job.addChild(spawn)
spawn.addChild(merge)
return merge.rv() | python | def run_mutect_with_merge(job, tumor_bam, normal_bam, univ_options, mutect_options):
"""
A wrapper for the the entire MuTect sub-graph.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict mutect_options: Options specific to MuTect
:return: fsID to the merged MuTect calls
:rtype: toil.fileStore.FileID
"""
spawn = job.wrapJobFn(run_mutect, tumor_bam, normal_bam, univ_options,
mutect_options).encapsulate()
merge = job.wrapJobFn(merge_perchrom_vcfs, spawn.rv())
job.addChild(spawn)
spawn.addChild(merge)
return merge.rv() | [
"def",
"run_mutect_with_merge",
"(",
"job",
",",
"tumor_bam",
",",
"normal_bam",
",",
"univ_options",
",",
"mutect_options",
")",
":",
"spawn",
"=",
"job",
".",
"wrapJobFn",
"(",
"run_mutect",
",",
"tumor_bam",
",",
"normal_bam",
",",
"univ_options",
",",
"mutect_options",
")",
".",
"encapsulate",
"(",
")",
"merge",
"=",
"job",
".",
"wrapJobFn",
"(",
"merge_perchrom_vcfs",
",",
"spawn",
".",
"rv",
"(",
")",
")",
"job",
".",
"addChild",
"(",
"spawn",
")",
"spawn",
".",
"addChild",
"(",
"merge",
")",
"return",
"merge",
".",
"rv",
"(",
")"
] | A wrapper for the the entire MuTect sub-graph.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict mutect_options: Options specific to MuTect
:return: fsID to the merged MuTect calls
:rtype: toil.fileStore.FileID | [
"A",
"wrapper",
"for",
"the",
"the",
"entire",
"MuTect",
"sub",
"-",
"graph",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/mutect.py#L41-L57 | train |
BD2KGenomics/protect | src/protect/mutation_calling/mutect.py | run_mutect | def run_mutect(job, tumor_bam, normal_bam, univ_options, mutect_options):
"""
Spawn a MuTect job for each chromosome on the DNA bams.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict mutect_options: Options specific to MuTect
:return: Dict of results from running MuTect on every chromosome
perchrom_mutect:
|- 'chr1': fsID
|- 'chr2' fsID
|
|-...
|
+- 'chrM': fsID
:rtype: dict
"""
# Get a list of chromosomes to handle
if mutect_options['chromosomes']:
chromosomes = mutect_options['chromosomes']
else:
chromosomes = sample_chromosomes(job, mutect_options['genome_fai'])
perchrom_mutect = defaultdict()
for chrom in chromosomes:
perchrom_mutect[chrom] = job.addChildJobFn(
run_mutect_perchrom, tumor_bam, normal_bam, univ_options, mutect_options, chrom,
memory='6G', disk=PromisedRequirement(mutect_disk,
tumor_bam['tumor_dna_fix_pg_sorted.bam'],
normal_bam['normal_dna_fix_pg_sorted.bam'],
mutect_options['genome_fasta'],
mutect_options['dbsnp_vcf'],
mutect_options['cosmic_vcf'])).rv()
return perchrom_mutect | python | def run_mutect(job, tumor_bam, normal_bam, univ_options, mutect_options):
"""
Spawn a MuTect job for each chromosome on the DNA bams.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict mutect_options: Options specific to MuTect
:return: Dict of results from running MuTect on every chromosome
perchrom_mutect:
|- 'chr1': fsID
|- 'chr2' fsID
|
|-...
|
+- 'chrM': fsID
:rtype: dict
"""
# Get a list of chromosomes to handle
if mutect_options['chromosomes']:
chromosomes = mutect_options['chromosomes']
else:
chromosomes = sample_chromosomes(job, mutect_options['genome_fai'])
perchrom_mutect = defaultdict()
for chrom in chromosomes:
perchrom_mutect[chrom] = job.addChildJobFn(
run_mutect_perchrom, tumor_bam, normal_bam, univ_options, mutect_options, chrom,
memory='6G', disk=PromisedRequirement(mutect_disk,
tumor_bam['tumor_dna_fix_pg_sorted.bam'],
normal_bam['normal_dna_fix_pg_sorted.bam'],
mutect_options['genome_fasta'],
mutect_options['dbsnp_vcf'],
mutect_options['cosmic_vcf'])).rv()
return perchrom_mutect | [
"def",
"run_mutect",
"(",
"job",
",",
"tumor_bam",
",",
"normal_bam",
",",
"univ_options",
",",
"mutect_options",
")",
":",
"if",
"mutect_options",
"[",
"'chromosomes'",
"]",
":",
"chromosomes",
"=",
"mutect_options",
"[",
"'chromosomes'",
"]",
"else",
":",
"chromosomes",
"=",
"sample_chromosomes",
"(",
"job",
",",
"mutect_options",
"[",
"'genome_fai'",
"]",
")",
"perchrom_mutect",
"=",
"defaultdict",
"(",
")",
"for",
"chrom",
"in",
"chromosomes",
":",
"perchrom_mutect",
"[",
"chrom",
"]",
"=",
"job",
".",
"addChildJobFn",
"(",
"run_mutect_perchrom",
",",
"tumor_bam",
",",
"normal_bam",
",",
"univ_options",
",",
"mutect_options",
",",
"chrom",
",",
"memory",
"=",
"'6G'",
",",
"disk",
"=",
"PromisedRequirement",
"(",
"mutect_disk",
",",
"tumor_bam",
"[",
"'tumor_dna_fix_pg_sorted.bam'",
"]",
",",
"normal_bam",
"[",
"'normal_dna_fix_pg_sorted.bam'",
"]",
",",
"mutect_options",
"[",
"'genome_fasta'",
"]",
",",
"mutect_options",
"[",
"'dbsnp_vcf'",
"]",
",",
"mutect_options",
"[",
"'cosmic_vcf'",
"]",
")",
")",
".",
"rv",
"(",
")",
"return",
"perchrom_mutect"
] | Spawn a MuTect job for each chromosome on the DNA bams.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict mutect_options: Options specific to MuTect
:return: Dict of results from running MuTect on every chromosome
perchrom_mutect:
|- 'chr1': fsID
|- 'chr2' fsID
|
|-...
|
+- 'chrM': fsID
:rtype: dict | [
"Spawn",
"a",
"MuTect",
"job",
"for",
"each",
"chromosome",
"on",
"the",
"DNA",
"bams",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/mutect.py#L60-L93 | train |
BD2KGenomics/protect | src/protect/mutation_calling/mutect.py | run_mutect_perchrom | def run_mutect_perchrom(job, tumor_bam, normal_bam, univ_options, mutect_options, chrom):
"""
Run MuTect call on a single chromosome in the input bams.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict mutect_options: Options specific to MuTect
:param str chrom: Chromosome to process
:return: fsID for the chromsome vcf
:rtype: toil.fileStore.FileID
"""
work_dir = os.getcwd()
input_files = {
'tumor.bam': tumor_bam['tumor_dna_fix_pg_sorted.bam'],
'tumor.bam.bai': tumor_bam['tumor_dna_fix_pg_sorted.bam.bai'],
'normal.bam': normal_bam['normal_dna_fix_pg_sorted.bam'],
'normal.bam.bai': normal_bam['normal_dna_fix_pg_sorted.bam.bai'],
'genome.fa.tar.gz': mutect_options['genome_fasta'],
'genome.fa.fai.tar.gz': mutect_options['genome_fai'],
'genome.dict.tar.gz': mutect_options['genome_dict'],
'cosmic.vcf.tar.gz': mutect_options['cosmic_vcf'],
'cosmic.vcf.idx.tar.gz': mutect_options['cosmic_idx'],
'dbsnp.vcf.gz': mutect_options['dbsnp_vcf'],
'dbsnp.vcf.idx.tar.gz': mutect_options['dbsnp_idx']}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
# dbsnp.vcf should be bgzipped, but all others should be tar.gz'd
input_files['dbsnp.vcf'] = gunzip(input_files['dbsnp.vcf.gz'])
for key in ('genome.fa', 'genome.fa.fai', 'genome.dict', 'cosmic.vcf', 'cosmic.vcf.idx',
'dbsnp.vcf.idx'):
input_files[key] = untargz(input_files[key + '.tar.gz'], work_dir)
input_files = {key: docker_path(path) for key, path in input_files.items()}
mutout = ''.join([work_dir, '/', chrom, '.out'])
mutvcf = ''.join([work_dir, '/', chrom, '.vcf'])
parameters = ['-R', input_files['genome.fa'],
'--cosmic', input_files['cosmic.vcf'],
'--dbsnp', input_files['dbsnp.vcf'],
'--input_file:normal', input_files['normal.bam'],
'--input_file:tumor', input_files['tumor.bam'],
# '--tumor_lod', str(10),
# '--initial_tumor_lod', str(4.0),
'-L', chrom,
'--out', docker_path(mutout),
'--vcf', docker_path(mutvcf)
]
java_xmx = mutect_options['java_Xmx'] if mutect_options['java_Xmx'] \
else univ_options['java_Xmx']
docker_call(tool='mutect', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], java_xmx=java_xmx,
tool_version=mutect_options['version'])
output_file = job.fileStore.writeGlobalFile(mutvcf)
export_results(job, output_file, mutvcf, univ_options, subfolder='mutations/mutect')
job.fileStore.logToMaster('Ran MuTect on %s:%s successfully' % (univ_options['patient'], chrom))
return output_file | python | def run_mutect_perchrom(job, tumor_bam, normal_bam, univ_options, mutect_options, chrom):
"""
Run MuTect call on a single chromosome in the input bams.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict mutect_options: Options specific to MuTect
:param str chrom: Chromosome to process
:return: fsID for the chromsome vcf
:rtype: toil.fileStore.FileID
"""
work_dir = os.getcwd()
input_files = {
'tumor.bam': tumor_bam['tumor_dna_fix_pg_sorted.bam'],
'tumor.bam.bai': tumor_bam['tumor_dna_fix_pg_sorted.bam.bai'],
'normal.bam': normal_bam['normal_dna_fix_pg_sorted.bam'],
'normal.bam.bai': normal_bam['normal_dna_fix_pg_sorted.bam.bai'],
'genome.fa.tar.gz': mutect_options['genome_fasta'],
'genome.fa.fai.tar.gz': mutect_options['genome_fai'],
'genome.dict.tar.gz': mutect_options['genome_dict'],
'cosmic.vcf.tar.gz': mutect_options['cosmic_vcf'],
'cosmic.vcf.idx.tar.gz': mutect_options['cosmic_idx'],
'dbsnp.vcf.gz': mutect_options['dbsnp_vcf'],
'dbsnp.vcf.idx.tar.gz': mutect_options['dbsnp_idx']}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
# dbsnp.vcf should be bgzipped, but all others should be tar.gz'd
input_files['dbsnp.vcf'] = gunzip(input_files['dbsnp.vcf.gz'])
for key in ('genome.fa', 'genome.fa.fai', 'genome.dict', 'cosmic.vcf', 'cosmic.vcf.idx',
'dbsnp.vcf.idx'):
input_files[key] = untargz(input_files[key + '.tar.gz'], work_dir)
input_files = {key: docker_path(path) for key, path in input_files.items()}
mutout = ''.join([work_dir, '/', chrom, '.out'])
mutvcf = ''.join([work_dir, '/', chrom, '.vcf'])
parameters = ['-R', input_files['genome.fa'],
'--cosmic', input_files['cosmic.vcf'],
'--dbsnp', input_files['dbsnp.vcf'],
'--input_file:normal', input_files['normal.bam'],
'--input_file:tumor', input_files['tumor.bam'],
# '--tumor_lod', str(10),
# '--initial_tumor_lod', str(4.0),
'-L', chrom,
'--out', docker_path(mutout),
'--vcf', docker_path(mutvcf)
]
java_xmx = mutect_options['java_Xmx'] if mutect_options['java_Xmx'] \
else univ_options['java_Xmx']
docker_call(tool='mutect', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], java_xmx=java_xmx,
tool_version=mutect_options['version'])
output_file = job.fileStore.writeGlobalFile(mutvcf)
export_results(job, output_file, mutvcf, univ_options, subfolder='mutations/mutect')
job.fileStore.logToMaster('Ran MuTect on %s:%s successfully' % (univ_options['patient'], chrom))
return output_file | [
"def",
"run_mutect_perchrom",
"(",
"job",
",",
"tumor_bam",
",",
"normal_bam",
",",
"univ_options",
",",
"mutect_options",
",",
"chrom",
")",
":",
"work_dir",
"=",
"os",
".",
"getcwd",
"(",
")",
"input_files",
"=",
"{",
"'tumor.bam'",
":",
"tumor_bam",
"[",
"'tumor_dna_fix_pg_sorted.bam'",
"]",
",",
"'tumor.bam.bai'",
":",
"tumor_bam",
"[",
"'tumor_dna_fix_pg_sorted.bam.bai'",
"]",
",",
"'normal.bam'",
":",
"normal_bam",
"[",
"'normal_dna_fix_pg_sorted.bam'",
"]",
",",
"'normal.bam.bai'",
":",
"normal_bam",
"[",
"'normal_dna_fix_pg_sorted.bam.bai'",
"]",
",",
"'genome.fa.tar.gz'",
":",
"mutect_options",
"[",
"'genome_fasta'",
"]",
",",
"'genome.fa.fai.tar.gz'",
":",
"mutect_options",
"[",
"'genome_fai'",
"]",
",",
"'genome.dict.tar.gz'",
":",
"mutect_options",
"[",
"'genome_dict'",
"]",
",",
"'cosmic.vcf.tar.gz'",
":",
"mutect_options",
"[",
"'cosmic_vcf'",
"]",
",",
"'cosmic.vcf.idx.tar.gz'",
":",
"mutect_options",
"[",
"'cosmic_idx'",
"]",
",",
"'dbsnp.vcf.gz'",
":",
"mutect_options",
"[",
"'dbsnp_vcf'",
"]",
",",
"'dbsnp.vcf.idx.tar.gz'",
":",
"mutect_options",
"[",
"'dbsnp_idx'",
"]",
"}",
"input_files",
"=",
"get_files_from_filestore",
"(",
"job",
",",
"input_files",
",",
"work_dir",
",",
"docker",
"=",
"False",
")",
"input_files",
"[",
"'dbsnp.vcf'",
"]",
"=",
"gunzip",
"(",
"input_files",
"[",
"'dbsnp.vcf.gz'",
"]",
")",
"for",
"key",
"in",
"(",
"'genome.fa'",
",",
"'genome.fa.fai'",
",",
"'genome.dict'",
",",
"'cosmic.vcf'",
",",
"'cosmic.vcf.idx'",
",",
"'dbsnp.vcf.idx'",
")",
":",
"input_files",
"[",
"key",
"]",
"=",
"untargz",
"(",
"input_files",
"[",
"key",
"+",
"'.tar.gz'",
"]",
",",
"work_dir",
")",
"input_files",
"=",
"{",
"key",
":",
"docker_path",
"(",
"path",
")",
"for",
"key",
",",
"path",
"in",
"input_files",
".",
"items",
"(",
")",
"}",
"mutout",
"=",
"''",
".",
"join",
"(",
"[",
"work_dir",
",",
"'/'",
",",
"chrom",
",",
"'.out'",
"]",
")",
"mutvcf",
"=",
"''",
".",
"join",
"(",
"[",
"work_dir",
",",
"'/'",
",",
"chrom",
",",
"'.vcf'",
"]",
")",
"parameters",
"=",
"[",
"'-R'",
",",
"input_files",
"[",
"'genome.fa'",
"]",
",",
"'--cosmic'",
",",
"input_files",
"[",
"'cosmic.vcf'",
"]",
",",
"'--dbsnp'",
",",
"input_files",
"[",
"'dbsnp.vcf'",
"]",
",",
"'--input_file:normal'",
",",
"input_files",
"[",
"'normal.bam'",
"]",
",",
"'--input_file:tumor'",
",",
"input_files",
"[",
"'tumor.bam'",
"]",
",",
"'-L'",
",",
"chrom",
",",
"'--out'",
",",
"docker_path",
"(",
"mutout",
")",
",",
"'--vcf'",
",",
"docker_path",
"(",
"mutvcf",
")",
"]",
"java_xmx",
"=",
"mutect_options",
"[",
"'java_Xmx'",
"]",
"if",
"mutect_options",
"[",
"'java_Xmx'",
"]",
"else",
"univ_options",
"[",
"'java_Xmx'",
"]",
"docker_call",
"(",
"tool",
"=",
"'mutect'",
",",
"tool_parameters",
"=",
"parameters",
",",
"work_dir",
"=",
"work_dir",
",",
"dockerhub",
"=",
"univ_options",
"[",
"'dockerhub'",
"]",
",",
"java_xmx",
"=",
"java_xmx",
",",
"tool_version",
"=",
"mutect_options",
"[",
"'version'",
"]",
")",
"output_file",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"mutvcf",
")",
"export_results",
"(",
"job",
",",
"output_file",
",",
"mutvcf",
",",
"univ_options",
",",
"subfolder",
"=",
"'mutations/mutect'",
")",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Ran MuTect on %s:%s successfully'",
"%",
"(",
"univ_options",
"[",
"'patient'",
"]",
",",
"chrom",
")",
")",
"return",
"output_file"
] | Run MuTect call on a single chromosome in the input bams.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict mutect_options: Options specific to MuTect
:param str chrom: Chromosome to process
:return: fsID for the chromsome vcf
:rtype: toil.fileStore.FileID | [
"Run",
"MuTect",
"call",
"on",
"a",
"single",
"chromosome",
"in",
"the",
"input",
"bams",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/mutect.py#L96-L150 | train |
BD2KGenomics/protect | src/protect/mutation_calling/mutect.py | process_mutect_vcf | def process_mutect_vcf(job, mutect_vcf, work_dir, univ_options):
"""
Process the MuTect vcf for accepted calls.
:param toil.fileStore.FileID mutect_vcf: fsID for a MuTect generated chromosome vcf
:param str work_dir: Working directory
:param dict univ_options: Dict of universal options used by almost all tools
:return: Path to the processed vcf
:rtype: str
"""
mutect_vcf = job.fileStore.readGlobalFile(mutect_vcf)
with open(mutect_vcf, 'r') as infile, open(mutect_vcf + 'mutect_parsed.tmp', 'w') as outfile:
for line in infile:
line = line.strip()
if line.startswith('#'):
print(line, file=outfile)
continue
line = line.split('\t')
if line[6] != 'REJECT':
print('\t'.join(line), file=outfile)
return outfile.name | python | def process_mutect_vcf(job, mutect_vcf, work_dir, univ_options):
"""
Process the MuTect vcf for accepted calls.
:param toil.fileStore.FileID mutect_vcf: fsID for a MuTect generated chromosome vcf
:param str work_dir: Working directory
:param dict univ_options: Dict of universal options used by almost all tools
:return: Path to the processed vcf
:rtype: str
"""
mutect_vcf = job.fileStore.readGlobalFile(mutect_vcf)
with open(mutect_vcf, 'r') as infile, open(mutect_vcf + 'mutect_parsed.tmp', 'w') as outfile:
for line in infile:
line = line.strip()
if line.startswith('#'):
print(line, file=outfile)
continue
line = line.split('\t')
if line[6] != 'REJECT':
print('\t'.join(line), file=outfile)
return outfile.name | [
"def",
"process_mutect_vcf",
"(",
"job",
",",
"mutect_vcf",
",",
"work_dir",
",",
"univ_options",
")",
":",
"mutect_vcf",
"=",
"job",
".",
"fileStore",
".",
"readGlobalFile",
"(",
"mutect_vcf",
")",
"with",
"open",
"(",
"mutect_vcf",
",",
"'r'",
")",
"as",
"infile",
",",
"open",
"(",
"mutect_vcf",
"+",
"'mutect_parsed.tmp'",
",",
"'w'",
")",
"as",
"outfile",
":",
"for",
"line",
"in",
"infile",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"line",
".",
"startswith",
"(",
"'#'",
")",
":",
"print",
"(",
"line",
",",
"file",
"=",
"outfile",
")",
"continue",
"line",
"=",
"line",
".",
"split",
"(",
"'\\t'",
")",
"if",
"line",
"[",
"6",
"]",
"!=",
"'REJECT'",
":",
"print",
"(",
"'\\t'",
".",
"join",
"(",
"line",
")",
",",
"file",
"=",
"outfile",
")",
"return",
"outfile",
".",
"name"
] | Process the MuTect vcf for accepted calls.
:param toil.fileStore.FileID mutect_vcf: fsID for a MuTect generated chromosome vcf
:param str work_dir: Working directory
:param dict univ_options: Dict of universal options used by almost all tools
:return: Path to the processed vcf
:rtype: str | [
"Process",
"the",
"MuTect",
"vcf",
"for",
"accepted",
"calls",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/mutect.py#L153-L174 | train |
idlesign/steampak | steampak/libsteam/resources/utils.py | Utils.get_universe | def get_universe(self, as_str=False):
"""Returns universe the client is connected to. See ``Universe``.
:param bool as_str: Return human-friendly universe name instead of an ID.
:rtype: int|str
"""
result = self._iface.get_connected_universe()
if as_str:
return Universe.get_alias(result)
return result | python | def get_universe(self, as_str=False):
"""Returns universe the client is connected to. See ``Universe``.
:param bool as_str: Return human-friendly universe name instead of an ID.
:rtype: int|str
"""
result = self._iface.get_connected_universe()
if as_str:
return Universe.get_alias(result)
return result | [
"def",
"get_universe",
"(",
"self",
",",
"as_str",
"=",
"False",
")",
":",
"result",
"=",
"self",
".",
"_iface",
".",
"get_connected_universe",
"(",
")",
"if",
"as_str",
":",
"return",
"Universe",
".",
"get_alias",
"(",
"result",
")",
"return",
"result"
] | Returns universe the client is connected to. See ``Universe``.
:param bool as_str: Return human-friendly universe name instead of an ID.
:rtype: int|str | [
"Returns",
"universe",
"the",
"client",
"is",
"connected",
"to",
".",
"See",
"Universe",
"."
] | cb3f2c737e272b0360802d947e388df7e34f50f3 | https://github.com/idlesign/steampak/blob/cb3f2c737e272b0360802d947e388df7e34f50f3/steampak/libsteam/resources/utils.py#L136-L147 | train |
APSL/django-kaio | kaio/mixins/logs.py | LogsMixin.EXTRA_LOGGING | def EXTRA_LOGGING(self):
"""
lista modulos con los distintos niveles a logear y su
nivel de debug
Por ejemplo:
[Logs]
EXTRA_LOGGING = oscar.paypal:DEBUG, django.db:INFO
"""
input_text = get('EXTRA_LOGGING', '')
modules = input_text.split(',')
if input_text:
modules = input_text.split(',')
modules = [x.split(':') for x in modules]
else:
modules = []
return modules | python | def EXTRA_LOGGING(self):
"""
lista modulos con los distintos niveles a logear y su
nivel de debug
Por ejemplo:
[Logs]
EXTRA_LOGGING = oscar.paypal:DEBUG, django.db:INFO
"""
input_text = get('EXTRA_LOGGING', '')
modules = input_text.split(',')
if input_text:
modules = input_text.split(',')
modules = [x.split(':') for x in modules]
else:
modules = []
return modules | [
"def",
"EXTRA_LOGGING",
"(",
"self",
")",
":",
"input_text",
"=",
"get",
"(",
"'EXTRA_LOGGING'",
",",
"''",
")",
"modules",
"=",
"input_text",
".",
"split",
"(",
"','",
")",
"if",
"input_text",
":",
"modules",
"=",
"input_text",
".",
"split",
"(",
"','",
")",
"modules",
"=",
"[",
"x",
".",
"split",
"(",
"':'",
")",
"for",
"x",
"in",
"modules",
"]",
"else",
":",
"modules",
"=",
"[",
"]",
"return",
"modules"
] | lista modulos con los distintos niveles a logear y su
nivel de debug
Por ejemplo:
[Logs]
EXTRA_LOGGING = oscar.paypal:DEBUG, django.db:INFO | [
"lista",
"modulos",
"con",
"los",
"distintos",
"niveles",
"a",
"logear",
"y",
"su",
"nivel",
"de",
"debug"
] | b74b109bcfba31d973723bc419e2c95d190b80b7 | https://github.com/APSL/django-kaio/blob/b74b109bcfba31d973723bc419e2c95d190b80b7/kaio/mixins/logs.py#L26-L45 | train |
nepalicalendar/nepalicalendar-py | nepalicalendar/nepdate.py | NepDate.from_ad_date | def from_ad_date(cls, date):
""" Gets a NepDate object from gregorian calendar date """
functions.check_valid_ad_range(date)
days = values.START_EN_DATE - date
# Add the required number of days to the start nepali date
start_date = NepDate(values.START_NP_YEAR, 1, 1)
# No need to update as addition already calls update
return start_date + (date - values.START_EN_DATE) | python | def from_ad_date(cls, date):
""" Gets a NepDate object from gregorian calendar date """
functions.check_valid_ad_range(date)
days = values.START_EN_DATE - date
# Add the required number of days to the start nepali date
start_date = NepDate(values.START_NP_YEAR, 1, 1)
# No need to update as addition already calls update
return start_date + (date - values.START_EN_DATE) | [
"def",
"from_ad_date",
"(",
"cls",
",",
"date",
")",
":",
"functions",
".",
"check_valid_ad_range",
"(",
"date",
")",
"days",
"=",
"values",
".",
"START_EN_DATE",
"-",
"date",
"start_date",
"=",
"NepDate",
"(",
"values",
".",
"START_NP_YEAR",
",",
"1",
",",
"1",
")",
"return",
"start_date",
"+",
"(",
"date",
"-",
"values",
".",
"START_EN_DATE",
")"
] | Gets a NepDate object from gregorian calendar date | [
"Gets",
"a",
"NepDate",
"object",
"from",
"gregorian",
"calendar",
"date"
] | a589c28b8e085049f30a7287753476b59eca6f50 | https://github.com/nepalicalendar/nepalicalendar-py/blob/a589c28b8e085049f30a7287753476b59eca6f50/nepalicalendar/nepdate.py#L207-L215 | train |
nepalicalendar/nepalicalendar-py | nepalicalendar/nepdate.py | NepDate.from_bs_date | def from_bs_date(cls, year, month, day):
""" Create and update an NepDate object for bikram sambat date """
return NepDate(year, month, day).update() | python | def from_bs_date(cls, year, month, day):
""" Create and update an NepDate object for bikram sambat date """
return NepDate(year, month, day).update() | [
"def",
"from_bs_date",
"(",
"cls",
",",
"year",
",",
"month",
",",
"day",
")",
":",
"return",
"NepDate",
"(",
"year",
",",
"month",
",",
"day",
")",
".",
"update",
"(",
")"
] | Create and update an NepDate object for bikram sambat date | [
"Create",
"and",
"update",
"an",
"NepDate",
"object",
"for",
"bikram",
"sambat",
"date"
] | a589c28b8e085049f30a7287753476b59eca6f50 | https://github.com/nepalicalendar/nepalicalendar-py/blob/a589c28b8e085049f30a7287753476b59eca6f50/nepalicalendar/nepdate.py#L218-L220 | train |
nepalicalendar/nepalicalendar-py | nepalicalendar/nepdate.py | NepDate.events_list | def events_list(self):
""" Returns the events today """
evt = []
evt.extend(events.NEPALI_EVENTS[self.month, self.day])
evt.extend(events.ENGLISH_EVENTS[self.en_date.month, self.en_date.day])
return evt | python | def events_list(self):
""" Returns the events today """
evt = []
evt.extend(events.NEPALI_EVENTS[self.month, self.day])
evt.extend(events.ENGLISH_EVENTS[self.en_date.month, self.en_date.day])
return evt | [
"def",
"events_list",
"(",
"self",
")",
":",
"evt",
"=",
"[",
"]",
"evt",
".",
"extend",
"(",
"events",
".",
"NEPALI_EVENTS",
"[",
"self",
".",
"month",
",",
"self",
".",
"day",
"]",
")",
"evt",
".",
"extend",
"(",
"events",
".",
"ENGLISH_EVENTS",
"[",
"self",
".",
"en_date",
".",
"month",
",",
"self",
".",
"en_date",
".",
"day",
"]",
")",
"return",
"evt"
] | Returns the events today | [
"Returns",
"the",
"events",
"today"
] | a589c28b8e085049f30a7287753476b59eca6f50 | https://github.com/nepalicalendar/nepalicalendar-py/blob/a589c28b8e085049f30a7287753476b59eca6f50/nepalicalendar/nepdate.py#L269-L274 | train |
nepalicalendar/nepalicalendar-py | nepalicalendar/nepdate.py | NepDate.update | def update(self):
""" Updates information about the NepDate """
functions.check_valid_bs_range(self)
# Here's a trick to find the gregorian date:
# We find the number of days from earliest nepali date to the current
# day. We then add the number of days to the earliest english date
self.en_date = values.START_EN_DATE + \
(
self - NepDate(
values.START_NP_YEAR,
1,
1
)
)
return self | python | def update(self):
""" Updates information about the NepDate """
functions.check_valid_bs_range(self)
# Here's a trick to find the gregorian date:
# We find the number of days from earliest nepali date to the current
# day. We then add the number of days to the earliest english date
self.en_date = values.START_EN_DATE + \
(
self - NepDate(
values.START_NP_YEAR,
1,
1
)
)
return self | [
"def",
"update",
"(",
"self",
")",
":",
"functions",
".",
"check_valid_bs_range",
"(",
"self",
")",
"self",
".",
"en_date",
"=",
"values",
".",
"START_EN_DATE",
"+",
"(",
"self",
"-",
"NepDate",
"(",
"values",
".",
"START_NP_YEAR",
",",
"1",
",",
"1",
")",
")",
"return",
"self"
] | Updates information about the NepDate | [
"Updates",
"information",
"about",
"the",
"NepDate"
] | a589c28b8e085049f30a7287753476b59eca6f50 | https://github.com/nepalicalendar/nepalicalendar-py/blob/a589c28b8e085049f30a7287753476b59eca6f50/nepalicalendar/nepdate.py#L291-L305 | train |
BD2KGenomics/protect | attic/precision_immuno.py | get_file_from_s3 | def get_file_from_s3(job, s3_url, encryption_key=None, write_to_jobstore=True):
"""
Downloads a supplied URL that points to an unencrypted, unprotected file on Amazon S3. The file
is downloaded and a subsequently written to the jobstore and the return value is a the path to
the file in the jobstore.
"""
work_dir = job.fileStore.getLocalTempDir()
filename = '/'.join([work_dir, os.path.basename(s3_url)])
# This is common to encrypted and unencrypted downloads
download_call = ['curl', '-fs', '--retry', '5']
# If an encryption key was provided, use it to create teh headers that need to be injected into
# the curl script and append to the call
if encryption_key:
key = generate_unique_key(encryption_key, s3_url)
encoded_key = base64.b64encode(key)
encoded_key_md5 = base64.b64encode( hashlib.md5(key).digest() )
h1 = 'x-amz-server-side-encryption-customer-algorithm:AES256'
h2 = 'x-amz-server-side-encryption-customer-key:{}'.format(encoded_key)
h3 = 'x-amz-server-side-encryption-customer-key-md5:{}'.format(encoded_key_md5)
download_call.extend(['-H', h1, '-H', h2, '-H', h3])
# This is also common to both types of downloads
download_call.extend([s3_url, '-o', filename])
try:
subprocess.check_call(download_call)
except subprocess.CalledProcessError:
raise RuntimeError('Curl returned a non-zero exit status processing %s. Do you' % s3_url +
'have premssions to access the file?')
except OSError:
raise RuntimeError('Failed to find "curl". Install via "apt-get install curl"')
assert os.path.exists(filename)
if write_to_jobstore:
filename = job.fileStore.writeGlobalFile(filename)
return filename | python | def get_file_from_s3(job, s3_url, encryption_key=None, write_to_jobstore=True):
"""
Downloads a supplied URL that points to an unencrypted, unprotected file on Amazon S3. The file
is downloaded and a subsequently written to the jobstore and the return value is a the path to
the file in the jobstore.
"""
work_dir = job.fileStore.getLocalTempDir()
filename = '/'.join([work_dir, os.path.basename(s3_url)])
# This is common to encrypted and unencrypted downloads
download_call = ['curl', '-fs', '--retry', '5']
# If an encryption key was provided, use it to create teh headers that need to be injected into
# the curl script and append to the call
if encryption_key:
key = generate_unique_key(encryption_key, s3_url)
encoded_key = base64.b64encode(key)
encoded_key_md5 = base64.b64encode( hashlib.md5(key).digest() )
h1 = 'x-amz-server-side-encryption-customer-algorithm:AES256'
h2 = 'x-amz-server-side-encryption-customer-key:{}'.format(encoded_key)
h3 = 'x-amz-server-side-encryption-customer-key-md5:{}'.format(encoded_key_md5)
download_call.extend(['-H', h1, '-H', h2, '-H', h3])
# This is also common to both types of downloads
download_call.extend([s3_url, '-o', filename])
try:
subprocess.check_call(download_call)
except subprocess.CalledProcessError:
raise RuntimeError('Curl returned a non-zero exit status processing %s. Do you' % s3_url +
'have premssions to access the file?')
except OSError:
raise RuntimeError('Failed to find "curl". Install via "apt-get install curl"')
assert os.path.exists(filename)
if write_to_jobstore:
filename = job.fileStore.writeGlobalFile(filename)
return filename | [
"def",
"get_file_from_s3",
"(",
"job",
",",
"s3_url",
",",
"encryption_key",
"=",
"None",
",",
"write_to_jobstore",
"=",
"True",
")",
":",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"filename",
"=",
"'/'",
".",
"join",
"(",
"[",
"work_dir",
",",
"os",
".",
"path",
".",
"basename",
"(",
"s3_url",
")",
"]",
")",
"download_call",
"=",
"[",
"'curl'",
",",
"'-fs'",
",",
"'--retry'",
",",
"'5'",
"]",
"if",
"encryption_key",
":",
"key",
"=",
"generate_unique_key",
"(",
"encryption_key",
",",
"s3_url",
")",
"encoded_key",
"=",
"base64",
".",
"b64encode",
"(",
"key",
")",
"encoded_key_md5",
"=",
"base64",
".",
"b64encode",
"(",
"hashlib",
".",
"md5",
"(",
"key",
")",
".",
"digest",
"(",
")",
")",
"h1",
"=",
"'x-amz-server-side-encryption-customer-algorithm:AES256'",
"h2",
"=",
"'x-amz-server-side-encryption-customer-key:{}'",
".",
"format",
"(",
"encoded_key",
")",
"h3",
"=",
"'x-amz-server-side-encryption-customer-key-md5:{}'",
".",
"format",
"(",
"encoded_key_md5",
")",
"download_call",
".",
"extend",
"(",
"[",
"'-H'",
",",
"h1",
",",
"'-H'",
",",
"h2",
",",
"'-H'",
",",
"h3",
"]",
")",
"download_call",
".",
"extend",
"(",
"[",
"s3_url",
",",
"'-o'",
",",
"filename",
"]",
")",
"try",
":",
"subprocess",
".",
"check_call",
"(",
"download_call",
")",
"except",
"subprocess",
".",
"CalledProcessError",
":",
"raise",
"RuntimeError",
"(",
"'Curl returned a non-zero exit status processing %s. Do you'",
"%",
"s3_url",
"+",
"'have premssions to access the file?'",
")",
"except",
"OSError",
":",
"raise",
"RuntimeError",
"(",
"'Failed to find \"curl\". Install via \"apt-get install curl\"'",
")",
"assert",
"os",
".",
"path",
".",
"exists",
"(",
"filename",
")",
"if",
"write_to_jobstore",
":",
"filename",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"filename",
")",
"return",
"filename"
] | Downloads a supplied URL that points to an unencrypted, unprotected file on Amazon S3. The file
is downloaded and a subsequently written to the jobstore and the return value is a the path to
the file in the jobstore. | [
"Downloads",
"a",
"supplied",
"URL",
"that",
"points",
"to",
"an",
"unencrypted",
"unprotected",
"file",
"on",
"Amazon",
"S3",
".",
"The",
"file",
"is",
"downloaded",
"and",
"a",
"subsequently",
"written",
"to",
"the",
"jobstore",
"and",
"the",
"return",
"value",
"is",
"a",
"the",
"path",
"to",
"the",
"file",
"in",
"the",
"jobstore",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/attic/precision_immuno.py#L2159-L2191 | train |
bkg/django-spillway | spillway/query.py | filter_geometry | def filter_geometry(queryset, **filters):
"""Helper function for spatial lookups filters.
Provide spatial lookup types as keywords without underscores instead of the
usual "geometryfield__lookuptype" format.
"""
fieldname = geo_field(queryset).name
query = {'%s__%s' % (fieldname, k): v for k, v in filters.items()}
return queryset.filter(**query) | python | def filter_geometry(queryset, **filters):
"""Helper function for spatial lookups filters.
Provide spatial lookup types as keywords without underscores instead of the
usual "geometryfield__lookuptype" format.
"""
fieldname = geo_field(queryset).name
query = {'%s__%s' % (fieldname, k): v for k, v in filters.items()}
return queryset.filter(**query) | [
"def",
"filter_geometry",
"(",
"queryset",
",",
"**",
"filters",
")",
":",
"fieldname",
"=",
"geo_field",
"(",
"queryset",
")",
".",
"name",
"query",
"=",
"{",
"'%s__%s'",
"%",
"(",
"fieldname",
",",
"k",
")",
":",
"v",
"for",
"k",
",",
"v",
"in",
"filters",
".",
"items",
"(",
")",
"}",
"return",
"queryset",
".",
"filter",
"(",
"**",
"query",
")"
] | Helper function for spatial lookups filters.
Provide spatial lookup types as keywords without underscores instead of the
usual "geometryfield__lookuptype" format. | [
"Helper",
"function",
"for",
"spatial",
"lookups",
"filters",
"."
] | c488a62642430b005f1e0d4a19e160d8d5964b67 | https://github.com/bkg/django-spillway/blob/c488a62642430b005f1e0d4a19e160d8d5964b67/spillway/query.py#L16-L24 | train |
bkg/django-spillway | spillway/query.py | geo_field | def geo_field(queryset):
"""Returns the GeometryField for a django or spillway GeoQuerySet."""
for field in queryset.model._meta.fields:
if isinstance(field, models.GeometryField):
return field
raise exceptions.FieldDoesNotExist('No GeometryField found') | python | def geo_field(queryset):
"""Returns the GeometryField for a django or spillway GeoQuerySet."""
for field in queryset.model._meta.fields:
if isinstance(field, models.GeometryField):
return field
raise exceptions.FieldDoesNotExist('No GeometryField found') | [
"def",
"geo_field",
"(",
"queryset",
")",
":",
"for",
"field",
"in",
"queryset",
".",
"model",
".",
"_meta",
".",
"fields",
":",
"if",
"isinstance",
"(",
"field",
",",
"models",
".",
"GeometryField",
")",
":",
"return",
"field",
"raise",
"exceptions",
".",
"FieldDoesNotExist",
"(",
"'No GeometryField found'",
")"
] | Returns the GeometryField for a django or spillway GeoQuerySet. | [
"Returns",
"the",
"GeometryField",
"for",
"a",
"django",
"or",
"spillway",
"GeoQuerySet",
"."
] | c488a62642430b005f1e0d4a19e160d8d5964b67 | https://github.com/bkg/django-spillway/blob/c488a62642430b005f1e0d4a19e160d8d5964b67/spillway/query.py#L26-L31 | train |
bkg/django-spillway | spillway/query.py | get_srid | def get_srid(queryset):
"""Returns the GeoQuerySet spatial reference identifier."""
try:
srid = list(six.viewvalues(queryset.query.annotations))[0].srid
except (AttributeError, IndexError):
srid = None
return srid or geo_field(queryset).srid | python | def get_srid(queryset):
"""Returns the GeoQuerySet spatial reference identifier."""
try:
srid = list(six.viewvalues(queryset.query.annotations))[0].srid
except (AttributeError, IndexError):
srid = None
return srid or geo_field(queryset).srid | [
"def",
"get_srid",
"(",
"queryset",
")",
":",
"try",
":",
"srid",
"=",
"list",
"(",
"six",
".",
"viewvalues",
"(",
"queryset",
".",
"query",
".",
"annotations",
")",
")",
"[",
"0",
"]",
".",
"srid",
"except",
"(",
"AttributeError",
",",
"IndexError",
")",
":",
"srid",
"=",
"None",
"return",
"srid",
"or",
"geo_field",
"(",
"queryset",
")",
".",
"srid"
] | Returns the GeoQuerySet spatial reference identifier. | [
"Returns",
"the",
"GeoQuerySet",
"spatial",
"reference",
"identifier",
"."
] | c488a62642430b005f1e0d4a19e160d8d5964b67 | https://github.com/bkg/django-spillway/blob/c488a62642430b005f1e0d4a19e160d8d5964b67/spillway/query.py#L33-L39 | train |
bkg/django-spillway | spillway/query.py | agg_dims | def agg_dims(arr, stat):
"""Returns a 1D array with higher dimensions aggregated using stat fn.
Arguments:
arr -- ndarray
stat -- numpy or numpy.ma function as str to call
"""
axis = None
if arr.ndim > 2:
axis = 1
arr = arr.reshape(arr.shape[0], -1)
module = np.ma if hasattr(arr, 'mask') else np
return getattr(module, stat)(arr, axis) | python | def agg_dims(arr, stat):
"""Returns a 1D array with higher dimensions aggregated using stat fn.
Arguments:
arr -- ndarray
stat -- numpy or numpy.ma function as str to call
"""
axis = None
if arr.ndim > 2:
axis = 1
arr = arr.reshape(arr.shape[0], -1)
module = np.ma if hasattr(arr, 'mask') else np
return getattr(module, stat)(arr, axis) | [
"def",
"agg_dims",
"(",
"arr",
",",
"stat",
")",
":",
"axis",
"=",
"None",
"if",
"arr",
".",
"ndim",
">",
"2",
":",
"axis",
"=",
"1",
"arr",
"=",
"arr",
".",
"reshape",
"(",
"arr",
".",
"shape",
"[",
"0",
"]",
",",
"-",
"1",
")",
"module",
"=",
"np",
".",
"ma",
"if",
"hasattr",
"(",
"arr",
",",
"'mask'",
")",
"else",
"np",
"return",
"getattr",
"(",
"module",
",",
"stat",
")",
"(",
"arr",
",",
"axis",
")"
] | Returns a 1D array with higher dimensions aggregated using stat fn.
Arguments:
arr -- ndarray
stat -- numpy or numpy.ma function as str to call | [
"Returns",
"a",
"1D",
"array",
"with",
"higher",
"dimensions",
"aggregated",
"using",
"stat",
"fn",
"."
] | c488a62642430b005f1e0d4a19e160d8d5964b67 | https://github.com/bkg/django-spillway/blob/c488a62642430b005f1e0d4a19e160d8d5964b67/spillway/query.py#L41-L53 | train |
bkg/django-spillway | spillway/query.py | GeoQuerySet.extent | def extent(self, srid=None):
"""Returns the GeoQuerySet extent as a 4-tuple.
Keyword args:
srid -- EPSG id for for transforming the output geometry.
"""
expr = self.geo_field.name
if srid:
expr = geofn.Transform(expr, srid)
expr = models.Extent(expr)
clone = self.all()
name, val = clone.aggregate(expr).popitem()
return val | python | def extent(self, srid=None):
"""Returns the GeoQuerySet extent as a 4-tuple.
Keyword args:
srid -- EPSG id for for transforming the output geometry.
"""
expr = self.geo_field.name
if srid:
expr = geofn.Transform(expr, srid)
expr = models.Extent(expr)
clone = self.all()
name, val = clone.aggregate(expr).popitem()
return val | [
"def",
"extent",
"(",
"self",
",",
"srid",
"=",
"None",
")",
":",
"expr",
"=",
"self",
".",
"geo_field",
".",
"name",
"if",
"srid",
":",
"expr",
"=",
"geofn",
".",
"Transform",
"(",
"expr",
",",
"srid",
")",
"expr",
"=",
"models",
".",
"Extent",
"(",
"expr",
")",
"clone",
"=",
"self",
".",
"all",
"(",
")",
"name",
",",
"val",
"=",
"clone",
".",
"aggregate",
"(",
"expr",
")",
".",
"popitem",
"(",
")",
"return",
"val"
] | Returns the GeoQuerySet extent as a 4-tuple.
Keyword args:
srid -- EPSG id for for transforming the output geometry. | [
"Returns",
"the",
"GeoQuerySet",
"extent",
"as",
"a",
"4",
"-",
"tuple",
"."
] | c488a62642430b005f1e0d4a19e160d8d5964b67 | https://github.com/bkg/django-spillway/blob/c488a62642430b005f1e0d4a19e160d8d5964b67/spillway/query.py#L87-L99 | train |
bkg/django-spillway | spillway/query.py | GeoQuerySet.pbf | def pbf(self, bbox, geo_col=None, scale=4096):
"""Returns tranlated and scaled geometries suitable for Mapbox vector
tiles.
"""
col = geo_col or self.geo_field.name
w, s, e, n = bbox.extent
trans = self._trans_scale(col, -w, -s,
scale / (e - w),
scale / (n - s))
g = AsText(trans)
return self.annotate(pbf=g) | python | def pbf(self, bbox, geo_col=None, scale=4096):
"""Returns tranlated and scaled geometries suitable for Mapbox vector
tiles.
"""
col = geo_col or self.geo_field.name
w, s, e, n = bbox.extent
trans = self._trans_scale(col, -w, -s,
scale / (e - w),
scale / (n - s))
g = AsText(trans)
return self.annotate(pbf=g) | [
"def",
"pbf",
"(",
"self",
",",
"bbox",
",",
"geo_col",
"=",
"None",
",",
"scale",
"=",
"4096",
")",
":",
"col",
"=",
"geo_col",
"or",
"self",
".",
"geo_field",
".",
"name",
"w",
",",
"s",
",",
"e",
",",
"n",
"=",
"bbox",
".",
"extent",
"trans",
"=",
"self",
".",
"_trans_scale",
"(",
"col",
",",
"-",
"w",
",",
"-",
"s",
",",
"scale",
"/",
"(",
"e",
"-",
"w",
")",
",",
"scale",
"/",
"(",
"n",
"-",
"s",
")",
")",
"g",
"=",
"AsText",
"(",
"trans",
")",
"return",
"self",
".",
"annotate",
"(",
"pbf",
"=",
"g",
")"
] | Returns tranlated and scaled geometries suitable for Mapbox vector
tiles. | [
"Returns",
"tranlated",
"and",
"scaled",
"geometries",
"suitable",
"for",
"Mapbox",
"vector",
"tiles",
"."
] | c488a62642430b005f1e0d4a19e160d8d5964b67 | https://github.com/bkg/django-spillway/blob/c488a62642430b005f1e0d4a19e160d8d5964b67/spillway/query.py#L110-L120 | train |
bkg/django-spillway | spillway/query.py | GeoQuerySet.tile | def tile(self, bbox, z=0, format=None, clip=True):
"""Returns a GeoQuerySet intersecting a tile boundary.
Arguments:
bbox -- tile extent as geometry
Keyword args:
z -- tile zoom level used as basis for geometry simplification
format -- vector tile format as str (pbf, geojson)
clip -- clip geometries to tile boundary as boolean
"""
# Tile grid uses 3857, but GeoJSON coordinates should be in 4326.
tile_srid = 3857
bbox = getattr(bbox, 'geos', bbox)
clone = filter_geometry(self, intersects=bbox)
field = clone.geo_field
srid = field.srid
sql = field.name
try:
tilew = self.tilewidths[z]
except IndexError:
tilew = self.tilewidths[-1]
if bbox.srid != srid:
bbox = bbox.transform(srid, clone=True)
# Estimate tile width in degrees instead of meters.
if bbox.srs.geographic:
p = geos.Point(tilew, tilew, srid=tile_srid)
p.transform(srid)
tilew = p.x
if clip:
bufbox = bbox.buffer(tilew)
sql = geofn.Intersection(sql, bufbox.envelope)
sql = SimplifyPreserveTopology(sql, tilew)
if format == 'pbf':
return clone.pbf(bbox, geo_col=sql)
sql = geofn.Transform(sql, 4326)
return clone.annotate(**{format: sql}) | python | def tile(self, bbox, z=0, format=None, clip=True):
"""Returns a GeoQuerySet intersecting a tile boundary.
Arguments:
bbox -- tile extent as geometry
Keyword args:
z -- tile zoom level used as basis for geometry simplification
format -- vector tile format as str (pbf, geojson)
clip -- clip geometries to tile boundary as boolean
"""
# Tile grid uses 3857, but GeoJSON coordinates should be in 4326.
tile_srid = 3857
bbox = getattr(bbox, 'geos', bbox)
clone = filter_geometry(self, intersects=bbox)
field = clone.geo_field
srid = field.srid
sql = field.name
try:
tilew = self.tilewidths[z]
except IndexError:
tilew = self.tilewidths[-1]
if bbox.srid != srid:
bbox = bbox.transform(srid, clone=True)
# Estimate tile width in degrees instead of meters.
if bbox.srs.geographic:
p = geos.Point(tilew, tilew, srid=tile_srid)
p.transform(srid)
tilew = p.x
if clip:
bufbox = bbox.buffer(tilew)
sql = geofn.Intersection(sql, bufbox.envelope)
sql = SimplifyPreserveTopology(sql, tilew)
if format == 'pbf':
return clone.pbf(bbox, geo_col=sql)
sql = geofn.Transform(sql, 4326)
return clone.annotate(**{format: sql}) | [
"def",
"tile",
"(",
"self",
",",
"bbox",
",",
"z",
"=",
"0",
",",
"format",
"=",
"None",
",",
"clip",
"=",
"True",
")",
":",
"tile_srid",
"=",
"3857",
"bbox",
"=",
"getattr",
"(",
"bbox",
",",
"'geos'",
",",
"bbox",
")",
"clone",
"=",
"filter_geometry",
"(",
"self",
",",
"intersects",
"=",
"bbox",
")",
"field",
"=",
"clone",
".",
"geo_field",
"srid",
"=",
"field",
".",
"srid",
"sql",
"=",
"field",
".",
"name",
"try",
":",
"tilew",
"=",
"self",
".",
"tilewidths",
"[",
"z",
"]",
"except",
"IndexError",
":",
"tilew",
"=",
"self",
".",
"tilewidths",
"[",
"-",
"1",
"]",
"if",
"bbox",
".",
"srid",
"!=",
"srid",
":",
"bbox",
"=",
"bbox",
".",
"transform",
"(",
"srid",
",",
"clone",
"=",
"True",
")",
"if",
"bbox",
".",
"srs",
".",
"geographic",
":",
"p",
"=",
"geos",
".",
"Point",
"(",
"tilew",
",",
"tilew",
",",
"srid",
"=",
"tile_srid",
")",
"p",
".",
"transform",
"(",
"srid",
")",
"tilew",
"=",
"p",
".",
"x",
"if",
"clip",
":",
"bufbox",
"=",
"bbox",
".",
"buffer",
"(",
"tilew",
")",
"sql",
"=",
"geofn",
".",
"Intersection",
"(",
"sql",
",",
"bufbox",
".",
"envelope",
")",
"sql",
"=",
"SimplifyPreserveTopology",
"(",
"sql",
",",
"tilew",
")",
"if",
"format",
"==",
"'pbf'",
":",
"return",
"clone",
".",
"pbf",
"(",
"bbox",
",",
"geo_col",
"=",
"sql",
")",
"sql",
"=",
"geofn",
".",
"Transform",
"(",
"sql",
",",
"4326",
")",
"return",
"clone",
".",
"annotate",
"(",
"**",
"{",
"format",
":",
"sql",
"}",
")"
] | Returns a GeoQuerySet intersecting a tile boundary.
Arguments:
bbox -- tile extent as geometry
Keyword args:
z -- tile zoom level used as basis for geometry simplification
format -- vector tile format as str (pbf, geojson)
clip -- clip geometries to tile boundary as boolean | [
"Returns",
"a",
"GeoQuerySet",
"intersecting",
"a",
"tile",
"boundary",
"."
] | c488a62642430b005f1e0d4a19e160d8d5964b67 | https://github.com/bkg/django-spillway/blob/c488a62642430b005f1e0d4a19e160d8d5964b67/spillway/query.py#L122-L157 | train |
bkg/django-spillway | spillway/query.py | RasterQuerySet.arrays | def arrays(self, field_name=None):
"""Returns a list of ndarrays.
Keyword args:
field_name -- raster field name as str
"""
fieldname = field_name or self.raster_field.name
arrays = []
for obj in self:
arr = getattr(obj, fieldname)
if isinstance(arr, np.ndarray):
arrays.append(arr)
else:
arrays.append(obj.array())
return arrays | python | def arrays(self, field_name=None):
"""Returns a list of ndarrays.
Keyword args:
field_name -- raster field name as str
"""
fieldname = field_name or self.raster_field.name
arrays = []
for obj in self:
arr = getattr(obj, fieldname)
if isinstance(arr, np.ndarray):
arrays.append(arr)
else:
arrays.append(obj.array())
return arrays | [
"def",
"arrays",
"(",
"self",
",",
"field_name",
"=",
"None",
")",
":",
"fieldname",
"=",
"field_name",
"or",
"self",
".",
"raster_field",
".",
"name",
"arrays",
"=",
"[",
"]",
"for",
"obj",
"in",
"self",
":",
"arr",
"=",
"getattr",
"(",
"obj",
",",
"fieldname",
")",
"if",
"isinstance",
"(",
"arr",
",",
"np",
".",
"ndarray",
")",
":",
"arrays",
".",
"append",
"(",
"arr",
")",
"else",
":",
"arrays",
".",
"append",
"(",
"obj",
".",
"array",
"(",
")",
")",
"return",
"arrays"
] | Returns a list of ndarrays.
Keyword args:
field_name -- raster field name as str | [
"Returns",
"a",
"list",
"of",
"ndarrays",
"."
] | c488a62642430b005f1e0d4a19e160d8d5964b67 | https://github.com/bkg/django-spillway/blob/c488a62642430b005f1e0d4a19e160d8d5964b67/spillway/query.py#L161-L175 | train |
bkg/django-spillway | spillway/query.py | RasterQuerySet.aggregate_periods | def aggregate_periods(self, periods):
"""Returns list of ndarrays averaged to a given number of periods.
Arguments:
periods -- desired number of periods as int
"""
try:
fieldname = self.raster_field.name
except TypeError:
raise exceptions.FieldDoesNotExist('Raster field not found')
arrays = self.arrays(fieldname)
arr = arrays[0]
if len(arrays) > 1:
if getattr(arr, 'ndim', 0) > 2:
arrays = np.vstack(arrays)
fill = getattr(arr, 'fill_value', None)
arr = np.ma.masked_values(arrays, fill, copy=False)
# Try to reshape using equal sizes first and fall back to unequal
# splits.
try:
means = arr.reshape((periods, -1)).mean(axis=1)
except ValueError:
means = np.array([a.mean() for a in np.array_split(arr, periods)])
obj = self[0]
setattr(obj, fieldname, means)
return [obj] | python | def aggregate_periods(self, periods):
"""Returns list of ndarrays averaged to a given number of periods.
Arguments:
periods -- desired number of periods as int
"""
try:
fieldname = self.raster_field.name
except TypeError:
raise exceptions.FieldDoesNotExist('Raster field not found')
arrays = self.arrays(fieldname)
arr = arrays[0]
if len(arrays) > 1:
if getattr(arr, 'ndim', 0) > 2:
arrays = np.vstack(arrays)
fill = getattr(arr, 'fill_value', None)
arr = np.ma.masked_values(arrays, fill, copy=False)
# Try to reshape using equal sizes first and fall back to unequal
# splits.
try:
means = arr.reshape((periods, -1)).mean(axis=1)
except ValueError:
means = np.array([a.mean() for a in np.array_split(arr, periods)])
obj = self[0]
setattr(obj, fieldname, means)
return [obj] | [
"def",
"aggregate_periods",
"(",
"self",
",",
"periods",
")",
":",
"try",
":",
"fieldname",
"=",
"self",
".",
"raster_field",
".",
"name",
"except",
"TypeError",
":",
"raise",
"exceptions",
".",
"FieldDoesNotExist",
"(",
"'Raster field not found'",
")",
"arrays",
"=",
"self",
".",
"arrays",
"(",
"fieldname",
")",
"arr",
"=",
"arrays",
"[",
"0",
"]",
"if",
"len",
"(",
"arrays",
")",
">",
"1",
":",
"if",
"getattr",
"(",
"arr",
",",
"'ndim'",
",",
"0",
")",
">",
"2",
":",
"arrays",
"=",
"np",
".",
"vstack",
"(",
"arrays",
")",
"fill",
"=",
"getattr",
"(",
"arr",
",",
"'fill_value'",
",",
"None",
")",
"arr",
"=",
"np",
".",
"ma",
".",
"masked_values",
"(",
"arrays",
",",
"fill",
",",
"copy",
"=",
"False",
")",
"try",
":",
"means",
"=",
"arr",
".",
"reshape",
"(",
"(",
"periods",
",",
"-",
"1",
")",
")",
".",
"mean",
"(",
"axis",
"=",
"1",
")",
"except",
"ValueError",
":",
"means",
"=",
"np",
".",
"array",
"(",
"[",
"a",
".",
"mean",
"(",
")",
"for",
"a",
"in",
"np",
".",
"array_split",
"(",
"arr",
",",
"periods",
")",
"]",
")",
"obj",
"=",
"self",
"[",
"0",
"]",
"setattr",
"(",
"obj",
",",
"fieldname",
",",
"means",
")",
"return",
"[",
"obj",
"]"
] | Returns list of ndarrays averaged to a given number of periods.
Arguments:
periods -- desired number of periods as int | [
"Returns",
"list",
"of",
"ndarrays",
"averaged",
"to",
"a",
"given",
"number",
"of",
"periods",
"."
] | c488a62642430b005f1e0d4a19e160d8d5964b67 | https://github.com/bkg/django-spillway/blob/c488a62642430b005f1e0d4a19e160d8d5964b67/spillway/query.py#L177-L202 | train |
bkg/django-spillway | spillway/query.py | RasterQuerySet.raster_field | def raster_field(self):
"""Returns the raster FileField instance on the model."""
for field in self.model._meta.fields:
if isinstance(field, models.FileField):
return field
return False | python | def raster_field(self):
"""Returns the raster FileField instance on the model."""
for field in self.model._meta.fields:
if isinstance(field, models.FileField):
return field
return False | [
"def",
"raster_field",
"(",
"self",
")",
":",
"for",
"field",
"in",
"self",
".",
"model",
".",
"_meta",
".",
"fields",
":",
"if",
"isinstance",
"(",
"field",
",",
"models",
".",
"FileField",
")",
":",
"return",
"field",
"return",
"False"
] | Returns the raster FileField instance on the model. | [
"Returns",
"the",
"raster",
"FileField",
"instance",
"on",
"the",
"model",
"."
] | c488a62642430b005f1e0d4a19e160d8d5964b67 | https://github.com/bkg/django-spillway/blob/c488a62642430b005f1e0d4a19e160d8d5964b67/spillway/query.py#L218-L223 | train |
bkg/django-spillway | spillway/query.py | RasterQuerySet.zipfiles | def zipfiles(self, path=None, arcdirname='data'):
"""Returns a .zip archive of selected rasters."""
if path:
fp = open(path, 'w+b')
else:
prefix = '%s-' % arcdirname
fp = tempfile.NamedTemporaryFile(prefix=prefix, suffix='.zip')
with zipfile.ZipFile(fp, mode='w') as zf:
for obj in self:
img = obj.image
arcname = os.path.join(arcdirname, os.path.basename(img.name))
try:
zf.write(img.path, arcname=arcname)
except OSError:
img.seek(0)
zf.writestr(arcname, img.read())
img.close()
fp.seek(0)
zobj = self.model(image=fp)
return [zobj] | python | def zipfiles(self, path=None, arcdirname='data'):
"""Returns a .zip archive of selected rasters."""
if path:
fp = open(path, 'w+b')
else:
prefix = '%s-' % arcdirname
fp = tempfile.NamedTemporaryFile(prefix=prefix, suffix='.zip')
with zipfile.ZipFile(fp, mode='w') as zf:
for obj in self:
img = obj.image
arcname = os.path.join(arcdirname, os.path.basename(img.name))
try:
zf.write(img.path, arcname=arcname)
except OSError:
img.seek(0)
zf.writestr(arcname, img.read())
img.close()
fp.seek(0)
zobj = self.model(image=fp)
return [zobj] | [
"def",
"zipfiles",
"(",
"self",
",",
"path",
"=",
"None",
",",
"arcdirname",
"=",
"'data'",
")",
":",
"if",
"path",
":",
"fp",
"=",
"open",
"(",
"path",
",",
"'w+b'",
")",
"else",
":",
"prefix",
"=",
"'%s-'",
"%",
"arcdirname",
"fp",
"=",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"prefix",
"=",
"prefix",
",",
"suffix",
"=",
"'.zip'",
")",
"with",
"zipfile",
".",
"ZipFile",
"(",
"fp",
",",
"mode",
"=",
"'w'",
")",
"as",
"zf",
":",
"for",
"obj",
"in",
"self",
":",
"img",
"=",
"obj",
".",
"image",
"arcname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"arcdirname",
",",
"os",
".",
"path",
".",
"basename",
"(",
"img",
".",
"name",
")",
")",
"try",
":",
"zf",
".",
"write",
"(",
"img",
".",
"path",
",",
"arcname",
"=",
"arcname",
")",
"except",
"OSError",
":",
"img",
".",
"seek",
"(",
"0",
")",
"zf",
".",
"writestr",
"(",
"arcname",
",",
"img",
".",
"read",
"(",
")",
")",
"img",
".",
"close",
"(",
")",
"fp",
".",
"seek",
"(",
"0",
")",
"zobj",
"=",
"self",
".",
"model",
"(",
"image",
"=",
"fp",
")",
"return",
"[",
"zobj",
"]"
] | Returns a .zip archive of selected rasters. | [
"Returns",
"a",
".",
"zip",
"archive",
"of",
"selected",
"rasters",
"."
] | c488a62642430b005f1e0d4a19e160d8d5964b67 | https://github.com/bkg/django-spillway/blob/c488a62642430b005f1e0d4a19e160d8d5964b67/spillway/query.py#L265-L284 | train |
idlesign/steampak | steampak/libsteam/resources/main.py | Api.init | def init(self, app_id=None):
"""Initializes Steam API library.
:param str|int app_id: Application ID.
:raises: SteamApiStartupError
"""
self.set_app_id(app_id)
err_msg = (
'Unable to initialize. Check Steam client is running '
'and Steam application ID is defined in steam_appid.txt or passed to Api.'
)
if self._lib.steam_init():
try:
_set_client(self._lib.Client())
self.utils = Utils()
self.current_user = CurrentUser()
self.friends = Friends()
self.groups = Groups()
self.apps = Applications()
self.overlay = Overlay()
self.screenshots = Screenshots()
except Exception as e:
raise SteamApiStartupError('%s:\n%s' % (err_msg, e))
else:
raise SteamApiStartupError(err_msg) | python | def init(self, app_id=None):
"""Initializes Steam API library.
:param str|int app_id: Application ID.
:raises: SteamApiStartupError
"""
self.set_app_id(app_id)
err_msg = (
'Unable to initialize. Check Steam client is running '
'and Steam application ID is defined in steam_appid.txt or passed to Api.'
)
if self._lib.steam_init():
try:
_set_client(self._lib.Client())
self.utils = Utils()
self.current_user = CurrentUser()
self.friends = Friends()
self.groups = Groups()
self.apps = Applications()
self.overlay = Overlay()
self.screenshots = Screenshots()
except Exception as e:
raise SteamApiStartupError('%s:\n%s' % (err_msg, e))
else:
raise SteamApiStartupError(err_msg) | [
"def",
"init",
"(",
"self",
",",
"app_id",
"=",
"None",
")",
":",
"self",
".",
"set_app_id",
"(",
"app_id",
")",
"err_msg",
"=",
"(",
"'Unable to initialize. Check Steam client is running '",
"'and Steam application ID is defined in steam_appid.txt or passed to Api.'",
")",
"if",
"self",
".",
"_lib",
".",
"steam_init",
"(",
")",
":",
"try",
":",
"_set_client",
"(",
"self",
".",
"_lib",
".",
"Client",
"(",
")",
")",
"self",
".",
"utils",
"=",
"Utils",
"(",
")",
"self",
".",
"current_user",
"=",
"CurrentUser",
"(",
")",
"self",
".",
"friends",
"=",
"Friends",
"(",
")",
"self",
".",
"groups",
"=",
"Groups",
"(",
")",
"self",
".",
"apps",
"=",
"Applications",
"(",
")",
"self",
".",
"overlay",
"=",
"Overlay",
"(",
")",
"self",
".",
"screenshots",
"=",
"Screenshots",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"SteamApiStartupError",
"(",
"'%s:\\n%s'",
"%",
"(",
"err_msg",
",",
"e",
")",
")",
"else",
":",
"raise",
"SteamApiStartupError",
"(",
"err_msg",
")"
] | Initializes Steam API library.
:param str|int app_id: Application ID.
:raises: SteamApiStartupError | [
"Initializes",
"Steam",
"API",
"library",
"."
] | cb3f2c737e272b0360802d947e388df7e34f50f3 | https://github.com/idlesign/steampak/blob/cb3f2c737e272b0360802d947e388df7e34f50f3/steampak/libsteam/resources/main.py#L125-L155 | train |
BD2KGenomics/protect | src/protect/common.py | get_files_from_filestore | def get_files_from_filestore(job, files, work_dir, docker=False):
"""
Download a dict of files to the given directory and modify the path to a docker-friendly one if
requested.
:param dict files: A dictionary of filenames: fsIDs
:param str work_dir: The destination directory
:param bool docker: Should the file path be converted to our standard docker '/data/filename'?
:return: Dict of files: (optionallly docker-friendly) fileepaths
:rtype: dict
"""
for name in files.keys():
outfile = job.fileStore.readGlobalFile(files[name], '/'.join([work_dir, name]))
# If the files will be sent to docker, we will mount work_dir to the container as /data and
# we want the /data prefixed path to the file
if docker:
files[name] = docker_path(outfile)
else:
files[name] = outfile
return files | python | def get_files_from_filestore(job, files, work_dir, docker=False):
"""
Download a dict of files to the given directory and modify the path to a docker-friendly one if
requested.
:param dict files: A dictionary of filenames: fsIDs
:param str work_dir: The destination directory
:param bool docker: Should the file path be converted to our standard docker '/data/filename'?
:return: Dict of files: (optionallly docker-friendly) fileepaths
:rtype: dict
"""
for name in files.keys():
outfile = job.fileStore.readGlobalFile(files[name], '/'.join([work_dir, name]))
# If the files will be sent to docker, we will mount work_dir to the container as /data and
# we want the /data prefixed path to the file
if docker:
files[name] = docker_path(outfile)
else:
files[name] = outfile
return files | [
"def",
"get_files_from_filestore",
"(",
"job",
",",
"files",
",",
"work_dir",
",",
"docker",
"=",
"False",
")",
":",
"for",
"name",
"in",
"files",
".",
"keys",
"(",
")",
":",
"outfile",
"=",
"job",
".",
"fileStore",
".",
"readGlobalFile",
"(",
"files",
"[",
"name",
"]",
",",
"'/'",
".",
"join",
"(",
"[",
"work_dir",
",",
"name",
"]",
")",
")",
"if",
"docker",
":",
"files",
"[",
"name",
"]",
"=",
"docker_path",
"(",
"outfile",
")",
"else",
":",
"files",
"[",
"name",
"]",
"=",
"outfile",
"return",
"files"
] | Download a dict of files to the given directory and modify the path to a docker-friendly one if
requested.
:param dict files: A dictionary of filenames: fsIDs
:param str work_dir: The destination directory
:param bool docker: Should the file path be converted to our standard docker '/data/filename'?
:return: Dict of files: (optionallly docker-friendly) fileepaths
:rtype: dict | [
"Download",
"a",
"dict",
"of",
"files",
"to",
"the",
"given",
"directory",
"and",
"modify",
"the",
"path",
"to",
"a",
"docker",
"-",
"friendly",
"one",
"if",
"requested",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/common.py#L45-L64 | train |
BD2KGenomics/protect | src/protect/common.py | gunzip | def gunzip(input_gzip_file, block_size=1024):
"""
Gunzips the input file to the same directory
:param input_gzip_file: File to be gunzipped
:return: path to the gunzipped file
:rtype: str
"""
assert os.path.splitext(input_gzip_file)[1] == '.gz'
assert is_gzipfile(input_gzip_file)
with gzip.open(input_gzip_file) as infile:
with open(os.path.splitext(input_gzip_file)[0], 'w') as outfile:
while True:
block = infile.read(block_size)
if block == '':
break
else:
outfile.write(block)
return outfile.name | python | def gunzip(input_gzip_file, block_size=1024):
"""
Gunzips the input file to the same directory
:param input_gzip_file: File to be gunzipped
:return: path to the gunzipped file
:rtype: str
"""
assert os.path.splitext(input_gzip_file)[1] == '.gz'
assert is_gzipfile(input_gzip_file)
with gzip.open(input_gzip_file) as infile:
with open(os.path.splitext(input_gzip_file)[0], 'w') as outfile:
while True:
block = infile.read(block_size)
if block == '':
break
else:
outfile.write(block)
return outfile.name | [
"def",
"gunzip",
"(",
"input_gzip_file",
",",
"block_size",
"=",
"1024",
")",
":",
"assert",
"os",
".",
"path",
".",
"splitext",
"(",
"input_gzip_file",
")",
"[",
"1",
"]",
"==",
"'.gz'",
"assert",
"is_gzipfile",
"(",
"input_gzip_file",
")",
"with",
"gzip",
".",
"open",
"(",
"input_gzip_file",
")",
"as",
"infile",
":",
"with",
"open",
"(",
"os",
".",
"path",
".",
"splitext",
"(",
"input_gzip_file",
")",
"[",
"0",
"]",
",",
"'w'",
")",
"as",
"outfile",
":",
"while",
"True",
":",
"block",
"=",
"infile",
".",
"read",
"(",
"block_size",
")",
"if",
"block",
"==",
"''",
":",
"break",
"else",
":",
"outfile",
".",
"write",
"(",
"block",
")",
"return",
"outfile",
".",
"name"
] | Gunzips the input file to the same directory
:param input_gzip_file: File to be gunzipped
:return: path to the gunzipped file
:rtype: str | [
"Gunzips",
"the",
"input",
"file",
"to",
"the",
"same",
"directory"
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/common.py#L163-L181 | train |
BD2KGenomics/protect | src/protect/common.py | is_gzipfile | def is_gzipfile(filename):
"""
Attempt to ascertain the gzip status of a file based on the "magic signatures" of the file.
This was taken from the stack overflow post
http://stackoverflow.com/questions/13044562/python-mechanism-to-identify-compressed-file-type\
-and-uncompress
:param str filename: A path to a file
:return: True if the file appears to be gzipped else false
:rtype: bool
"""
assert os.path.exists(filename), 'Input {} does not '.format(filename) + \
'point to a file.'
with open(filename, 'rb') as in_f:
start_of_file = in_f.read(3)
if start_of_file == '\x1f\x8b\x08':
return True
else:
return False | python | def is_gzipfile(filename):
"""
Attempt to ascertain the gzip status of a file based on the "magic signatures" of the file.
This was taken from the stack overflow post
http://stackoverflow.com/questions/13044562/python-mechanism-to-identify-compressed-file-type\
-and-uncompress
:param str filename: A path to a file
:return: True if the file appears to be gzipped else false
:rtype: bool
"""
assert os.path.exists(filename), 'Input {} does not '.format(filename) + \
'point to a file.'
with open(filename, 'rb') as in_f:
start_of_file = in_f.read(3)
if start_of_file == '\x1f\x8b\x08':
return True
else:
return False | [
"def",
"is_gzipfile",
"(",
"filename",
")",
":",
"assert",
"os",
".",
"path",
".",
"exists",
"(",
"filename",
")",
",",
"'Input {} does not '",
".",
"format",
"(",
"filename",
")",
"+",
"'point to a file.'",
"with",
"open",
"(",
"filename",
",",
"'rb'",
")",
"as",
"in_f",
":",
"start_of_file",
"=",
"in_f",
".",
"read",
"(",
"3",
")",
"if",
"start_of_file",
"==",
"'\\x1f\\x8b\\x08'",
":",
"return",
"True",
"else",
":",
"return",
"False"
] | Attempt to ascertain the gzip status of a file based on the "magic signatures" of the file.
This was taken from the stack overflow post
http://stackoverflow.com/questions/13044562/python-mechanism-to-identify-compressed-file-type\
-and-uncompress
:param str filename: A path to a file
:return: True if the file appears to be gzipped else false
:rtype: bool | [
"Attempt",
"to",
"ascertain",
"the",
"gzip",
"status",
"of",
"a",
"file",
"based",
"on",
"the",
"magic",
"signatures",
"of",
"the",
"file",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/common.py#L184-L203 | train |
BD2KGenomics/protect | src/protect/common.py | get_file_from_gdc | def get_file_from_gdc(job, gdc_url, gdc_download_token, write_to_jobstore=True):
"""
Download a supplied "URL" that points to a file in the NCBI GDC database. The path to the gdc
download token must be provided. The file is downloaded and written to the jobstore if
requested.
:param str gdc_url: URL for the file (in the form of gdc://<UUID>)
:param str gdc_download_token: Path to the gdc download token
:param bool write_to_jobstore: Should the file be written to the job store?
:return: Path to the downloaded file or fsID (if write_to_jobstore was True)
:rtype: list(str|toil.fileStore.FileID)
"""
work_dir = job.fileStore.getLocalTempDir()
parsed_url = urlparse(gdc_url)
assert parsed_url.scheme == 'gdc', 'Unexpected url scheme: %s' % gdc_url
file_dir = '/'.join([work_dir, parsed_url.netloc])
# This is common to encrypted and unencrypted downloads
currwd = os.getcwd()
os.chdir(work_dir)
try:
download_call = ['gdc-client', 'download', '-t', gdc_download_token, parsed_url.netloc]
subprocess.check_call(download_call)
finally:
os.chdir(currwd)
assert os.path.exists(file_dir)
output_files = [os.path.join(file_dir, x) for x in os.listdir(file_dir)
if not x.endswith('logs')]
# NOTE: We only handle vcf and bam+bai
if len(output_files) == 1:
assert output_files[0].endswith('vcf')
else:
if not {os.path.splitext(x)[1] for x in output_files} >= {'.bam', '.bai'}:
raise ParameterError('Can currently only handle pre-indexed GDC bams.')
# Always [bam, bai]
output_files = [x for x in output_files if x.endswith(('bam', 'bai'))]
output_files = sorted(output_files, key=lambda x: os.path.splitext(x)[1], reverse=True)
if write_to_jobstore:
output_files = [job.fileStore.writeGlobalFile(f) for f in output_files]
return output_files | python | def get_file_from_gdc(job, gdc_url, gdc_download_token, write_to_jobstore=True):
"""
Download a supplied "URL" that points to a file in the NCBI GDC database. The path to the gdc
download token must be provided. The file is downloaded and written to the jobstore if
requested.
:param str gdc_url: URL for the file (in the form of gdc://<UUID>)
:param str gdc_download_token: Path to the gdc download token
:param bool write_to_jobstore: Should the file be written to the job store?
:return: Path to the downloaded file or fsID (if write_to_jobstore was True)
:rtype: list(str|toil.fileStore.FileID)
"""
work_dir = job.fileStore.getLocalTempDir()
parsed_url = urlparse(gdc_url)
assert parsed_url.scheme == 'gdc', 'Unexpected url scheme: %s' % gdc_url
file_dir = '/'.join([work_dir, parsed_url.netloc])
# This is common to encrypted and unencrypted downloads
currwd = os.getcwd()
os.chdir(work_dir)
try:
download_call = ['gdc-client', 'download', '-t', gdc_download_token, parsed_url.netloc]
subprocess.check_call(download_call)
finally:
os.chdir(currwd)
assert os.path.exists(file_dir)
output_files = [os.path.join(file_dir, x) for x in os.listdir(file_dir)
if not x.endswith('logs')]
# NOTE: We only handle vcf and bam+bai
if len(output_files) == 1:
assert output_files[0].endswith('vcf')
else:
if not {os.path.splitext(x)[1] for x in output_files} >= {'.bam', '.bai'}:
raise ParameterError('Can currently only handle pre-indexed GDC bams.')
# Always [bam, bai]
output_files = [x for x in output_files if x.endswith(('bam', 'bai'))]
output_files = sorted(output_files, key=lambda x: os.path.splitext(x)[1], reverse=True)
if write_to_jobstore:
output_files = [job.fileStore.writeGlobalFile(f) for f in output_files]
return output_files | [
"def",
"get_file_from_gdc",
"(",
"job",
",",
"gdc_url",
",",
"gdc_download_token",
",",
"write_to_jobstore",
"=",
"True",
")",
":",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"parsed_url",
"=",
"urlparse",
"(",
"gdc_url",
")",
"assert",
"parsed_url",
".",
"scheme",
"==",
"'gdc'",
",",
"'Unexpected url scheme: %s'",
"%",
"gdc_url",
"file_dir",
"=",
"'/'",
".",
"join",
"(",
"[",
"work_dir",
",",
"parsed_url",
".",
"netloc",
"]",
")",
"currwd",
"=",
"os",
".",
"getcwd",
"(",
")",
"os",
".",
"chdir",
"(",
"work_dir",
")",
"try",
":",
"download_call",
"=",
"[",
"'gdc-client'",
",",
"'download'",
",",
"'-t'",
",",
"gdc_download_token",
",",
"parsed_url",
".",
"netloc",
"]",
"subprocess",
".",
"check_call",
"(",
"download_call",
")",
"finally",
":",
"os",
".",
"chdir",
"(",
"currwd",
")",
"assert",
"os",
".",
"path",
".",
"exists",
"(",
"file_dir",
")",
"output_files",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"file_dir",
",",
"x",
")",
"for",
"x",
"in",
"os",
".",
"listdir",
"(",
"file_dir",
")",
"if",
"not",
"x",
".",
"endswith",
"(",
"'logs'",
")",
"]",
"if",
"len",
"(",
"output_files",
")",
"==",
"1",
":",
"assert",
"output_files",
"[",
"0",
"]",
".",
"endswith",
"(",
"'vcf'",
")",
"else",
":",
"if",
"not",
"{",
"os",
".",
"path",
".",
"splitext",
"(",
"x",
")",
"[",
"1",
"]",
"for",
"x",
"in",
"output_files",
"}",
">=",
"{",
"'.bam'",
",",
"'.bai'",
"}",
":",
"raise",
"ParameterError",
"(",
"'Can currently only handle pre-indexed GDC bams.'",
")",
"output_files",
"=",
"[",
"x",
"for",
"x",
"in",
"output_files",
"if",
"x",
".",
"endswith",
"(",
"(",
"'bam'",
",",
"'bai'",
")",
")",
"]",
"output_files",
"=",
"sorted",
"(",
"output_files",
",",
"key",
"=",
"lambda",
"x",
":",
"os",
".",
"path",
".",
"splitext",
"(",
"x",
")",
"[",
"1",
"]",
",",
"reverse",
"=",
"True",
")",
"if",
"write_to_jobstore",
":",
"output_files",
"=",
"[",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"f",
")",
"for",
"f",
"in",
"output_files",
"]",
"return",
"output_files"
] | Download a supplied "URL" that points to a file in the NCBI GDC database. The path to the gdc
download token must be provided. The file is downloaded and written to the jobstore if
requested.
:param str gdc_url: URL for the file (in the form of gdc://<UUID>)
:param str gdc_download_token: Path to the gdc download token
:param bool write_to_jobstore: Should the file be written to the job store?
:return: Path to the downloaded file or fsID (if write_to_jobstore was True)
:rtype: list(str|toil.fileStore.FileID) | [
"Download",
"a",
"supplied",
"URL",
"that",
"points",
"to",
"a",
"file",
"in",
"the",
"NCBI",
"GDC",
"database",
".",
"The",
"path",
"to",
"the",
"gdc",
"download",
"token",
"must",
"be",
"provided",
".",
"The",
"file",
"is",
"downloaded",
"and",
"written",
"to",
"the",
"jobstore",
"if",
"requested",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/common.py#L206-L248 | train |
BD2KGenomics/protect | src/protect/common.py | get_file_from_url | def get_file_from_url(job, any_url, encryption_key=None, per_file_encryption=True,
write_to_jobstore=True):
"""
Download a supplied URL that points to a file on an http, https or ftp server. If the file is
found to be an https s3 link then the file is downloaded using `get_file_from_s3`. The file is
downloaded and written to the jobstore if requested.
Encryption arguments are for passing to `get_file_from_s3` if required.
:param str any_url: URL for the file
:param str encryption_key: Path to the master key
:param bool per_file_encryption: If encrypted, was the file encrypted using the per-file method?
:param bool write_to_jobstore: Should the file be written to the job store?
:return: Path to the downloaded file or fsID (if write_to_jobstore was True)
:rtype: str|toil.fileStore.FileID
"""
work_dir = job.fileStore.getLocalTempDir()
filename = '/'.join([work_dir, str(uuid.uuid4())])
url = any_url
parsed_url = urlparse(any_url)
try:
response = urllib2.urlopen(url)
except urllib2.HTTPError:
if parsed_url.netloc.startswith(('s3', 'S3')):
job.fileStore.logToMaster("Detected https link is for an encrypted s3 file.")
return get_file_from_s3(job, any_url, encryption_key=encryption_key,
per_file_encryption=per_file_encryption,
write_to_jobstore=write_to_jobstore)
else:
raise
else:
with open(filename, 'w') as f:
f.write(response.read())
if write_to_jobstore:
filename = job.fileStore.writeGlobalFile(filename)
return filename | python | def get_file_from_url(job, any_url, encryption_key=None, per_file_encryption=True,
write_to_jobstore=True):
"""
Download a supplied URL that points to a file on an http, https or ftp server. If the file is
found to be an https s3 link then the file is downloaded using `get_file_from_s3`. The file is
downloaded and written to the jobstore if requested.
Encryption arguments are for passing to `get_file_from_s3` if required.
:param str any_url: URL for the file
:param str encryption_key: Path to the master key
:param bool per_file_encryption: If encrypted, was the file encrypted using the per-file method?
:param bool write_to_jobstore: Should the file be written to the job store?
:return: Path to the downloaded file or fsID (if write_to_jobstore was True)
:rtype: str|toil.fileStore.FileID
"""
work_dir = job.fileStore.getLocalTempDir()
filename = '/'.join([work_dir, str(uuid.uuid4())])
url = any_url
parsed_url = urlparse(any_url)
try:
response = urllib2.urlopen(url)
except urllib2.HTTPError:
if parsed_url.netloc.startswith(('s3', 'S3')):
job.fileStore.logToMaster("Detected https link is for an encrypted s3 file.")
return get_file_from_s3(job, any_url, encryption_key=encryption_key,
per_file_encryption=per_file_encryption,
write_to_jobstore=write_to_jobstore)
else:
raise
else:
with open(filename, 'w') as f:
f.write(response.read())
if write_to_jobstore:
filename = job.fileStore.writeGlobalFile(filename)
return filename | [
"def",
"get_file_from_url",
"(",
"job",
",",
"any_url",
",",
"encryption_key",
"=",
"None",
",",
"per_file_encryption",
"=",
"True",
",",
"write_to_jobstore",
"=",
"True",
")",
":",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"filename",
"=",
"'/'",
".",
"join",
"(",
"[",
"work_dir",
",",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
"]",
")",
"url",
"=",
"any_url",
"parsed_url",
"=",
"urlparse",
"(",
"any_url",
")",
"try",
":",
"response",
"=",
"urllib2",
".",
"urlopen",
"(",
"url",
")",
"except",
"urllib2",
".",
"HTTPError",
":",
"if",
"parsed_url",
".",
"netloc",
".",
"startswith",
"(",
"(",
"'s3'",
",",
"'S3'",
")",
")",
":",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"\"Detected https link is for an encrypted s3 file.\"",
")",
"return",
"get_file_from_s3",
"(",
"job",
",",
"any_url",
",",
"encryption_key",
"=",
"encryption_key",
",",
"per_file_encryption",
"=",
"per_file_encryption",
",",
"write_to_jobstore",
"=",
"write_to_jobstore",
")",
"else",
":",
"raise",
"else",
":",
"with",
"open",
"(",
"filename",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"response",
".",
"read",
"(",
")",
")",
"if",
"write_to_jobstore",
":",
"filename",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"filename",
")",
"return",
"filename"
] | Download a supplied URL that points to a file on an http, https or ftp server. If the file is
found to be an https s3 link then the file is downloaded using `get_file_from_s3`. The file is
downloaded and written to the jobstore if requested.
Encryption arguments are for passing to `get_file_from_s3` if required.
:param str any_url: URL for the file
:param str encryption_key: Path to the master key
:param bool per_file_encryption: If encrypted, was the file encrypted using the per-file method?
:param bool write_to_jobstore: Should the file be written to the job store?
:return: Path to the downloaded file or fsID (if write_to_jobstore was True)
:rtype: str|toil.fileStore.FileID | [
"Download",
"a",
"supplied",
"URL",
"that",
"points",
"to",
"a",
"file",
"on",
"an",
"http",
"https",
"or",
"ftp",
"server",
".",
"If",
"the",
"file",
"is",
"found",
"to",
"be",
"an",
"https",
"s3",
"link",
"then",
"the",
"file",
"is",
"downloaded",
"using",
"get_file_from_s3",
".",
"The",
"file",
"is",
"downloaded",
"and",
"written",
"to",
"the",
"jobstore",
"if",
"requested",
".",
"Encryption",
"arguments",
"are",
"for",
"passing",
"to",
"get_file_from_s3",
"if",
"required",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/common.py#L337-L373 | train |
BD2KGenomics/protect | src/protect/common.py | bam2fastq | def bam2fastq(bamfile, univ_options, picard_options):
"""
Split an input bam to paired fastqs.
:param str bamfile: Path to a bam file
:param dict univ_options: Dict of universal options used by almost all tools
:param dict picard_options: Dict of options specific to Picard
:return: Path to the _1.fastq file
:rtype: str
"""
work_dir = os.path.split(bamfile)[0]
base_name = os.path.split(os.path.splitext(bamfile)[0])[1]
parameters = ['SamToFastq',
''.join(['I=', docker_path(bamfile)]),
''.join(['F=/data/', base_name, '_1.fastq']),
''.join(['F2=/data/', base_name, '_2.fastq']),
''.join(['FU=/data/', base_name, '_UP.fastq'])]
docker_call(tool='picard', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], java_xmx=univ_options['java_Xmx'],
tool_version=picard_options['version'])
first_fastq = ''.join([work_dir, '/', base_name, '_1.fastq'])
assert os.path.exists(first_fastq)
return first_fastq | python | def bam2fastq(bamfile, univ_options, picard_options):
"""
Split an input bam to paired fastqs.
:param str bamfile: Path to a bam file
:param dict univ_options: Dict of universal options used by almost all tools
:param dict picard_options: Dict of options specific to Picard
:return: Path to the _1.fastq file
:rtype: str
"""
work_dir = os.path.split(bamfile)[0]
base_name = os.path.split(os.path.splitext(bamfile)[0])[1]
parameters = ['SamToFastq',
''.join(['I=', docker_path(bamfile)]),
''.join(['F=/data/', base_name, '_1.fastq']),
''.join(['F2=/data/', base_name, '_2.fastq']),
''.join(['FU=/data/', base_name, '_UP.fastq'])]
docker_call(tool='picard', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], java_xmx=univ_options['java_Xmx'],
tool_version=picard_options['version'])
first_fastq = ''.join([work_dir, '/', base_name, '_1.fastq'])
assert os.path.exists(first_fastq)
return first_fastq | [
"def",
"bam2fastq",
"(",
"bamfile",
",",
"univ_options",
",",
"picard_options",
")",
":",
"work_dir",
"=",
"os",
".",
"path",
".",
"split",
"(",
"bamfile",
")",
"[",
"0",
"]",
"base_name",
"=",
"os",
".",
"path",
".",
"split",
"(",
"os",
".",
"path",
".",
"splitext",
"(",
"bamfile",
")",
"[",
"0",
"]",
")",
"[",
"1",
"]",
"parameters",
"=",
"[",
"'SamToFastq'",
",",
"''",
".",
"join",
"(",
"[",
"'I='",
",",
"docker_path",
"(",
"bamfile",
")",
"]",
")",
",",
"''",
".",
"join",
"(",
"[",
"'F=/data/'",
",",
"base_name",
",",
"'_1.fastq'",
"]",
")",
",",
"''",
".",
"join",
"(",
"[",
"'F2=/data/'",
",",
"base_name",
",",
"'_2.fastq'",
"]",
")",
",",
"''",
".",
"join",
"(",
"[",
"'FU=/data/'",
",",
"base_name",
",",
"'_UP.fastq'",
"]",
")",
"]",
"docker_call",
"(",
"tool",
"=",
"'picard'",
",",
"tool_parameters",
"=",
"parameters",
",",
"work_dir",
"=",
"work_dir",
",",
"dockerhub",
"=",
"univ_options",
"[",
"'dockerhub'",
"]",
",",
"java_xmx",
"=",
"univ_options",
"[",
"'java_Xmx'",
"]",
",",
"tool_version",
"=",
"picard_options",
"[",
"'version'",
"]",
")",
"first_fastq",
"=",
"''",
".",
"join",
"(",
"[",
"work_dir",
",",
"'/'",
",",
"base_name",
",",
"'_1.fastq'",
"]",
")",
"assert",
"os",
".",
"path",
".",
"exists",
"(",
"first_fastq",
")",
"return",
"first_fastq"
] | Split an input bam to paired fastqs.
:param str bamfile: Path to a bam file
:param dict univ_options: Dict of universal options used by almost all tools
:param dict picard_options: Dict of options specific to Picard
:return: Path to the _1.fastq file
:rtype: str | [
"Split",
"an",
"input",
"bam",
"to",
"paired",
"fastqs",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/common.py#L376-L398 | train |
BD2KGenomics/protect | src/protect/common.py | export_results | def export_results(job, fsid, file_name, univ_options, subfolder=None):
"""
Write out a file to a given location. The location can be either a directory on the local
machine, or a folder with a bucket on AWS.
:param str fsid: The file store id for the file to be exported
:param str file_name: The name of the file that neeeds to be exported (path to file is also
acceptable)
:param dict univ_options: Dict of universal options used by almost all tools
:param str subfolder: A sub folder within the main folder where this data should go
:return: None
"""
job.fileStore.logToMaster('Exporting %s to output location' % fsid)
file_name = os.path.basename(file_name)
try:
assert univ_options['output_folder'], 'Need a path to a folder to write out files'
assert univ_options['storage_location'], 'Need to know where the files need to go. ' + \
'Local or AWS/Azure, etc.'
except AssertionError as err:
# This isn't a game killer. Continue the pipeline without erroring out but do inform the
# user about it.
print('ERROR:', err.message, file=sys.stderr)
return
if univ_options['output_folder'] == 'NA':
output_folder = ''
else:
output_folder = univ_options['output_folder']
output_folder = os.path.join(output_folder, univ_options['patient'])
output_folder = os.path.join(output_folder, subfolder) if subfolder else output_folder
if univ_options['storage_location'] == 'local':
# Handle Local
try:
# Create the directory if required
os.makedirs(output_folder, 0755)
except OSError as err:
if err.errno != errno.EEXIST:
raise
output_url = 'file://' + os.path.join(output_folder, file_name)
elif univ_options['storage_location'].startswith('aws'):
# Handle AWS
bucket_name = univ_options['storage_location'].split(':')[-1]
output_url = os.path.join('S3://', bucket_name, output_folder.strip('/'), file_name)
# Can't do Azure or google yet.
else:
# TODO: Azure support
print("Currently doesn't support anything but Local and aws.")
return
job.fileStore.exportFile(fsid, output_url) | python | def export_results(job, fsid, file_name, univ_options, subfolder=None):
"""
Write out a file to a given location. The location can be either a directory on the local
machine, or a folder with a bucket on AWS.
:param str fsid: The file store id for the file to be exported
:param str file_name: The name of the file that neeeds to be exported (path to file is also
acceptable)
:param dict univ_options: Dict of universal options used by almost all tools
:param str subfolder: A sub folder within the main folder where this data should go
:return: None
"""
job.fileStore.logToMaster('Exporting %s to output location' % fsid)
file_name = os.path.basename(file_name)
try:
assert univ_options['output_folder'], 'Need a path to a folder to write out files'
assert univ_options['storage_location'], 'Need to know where the files need to go. ' + \
'Local or AWS/Azure, etc.'
except AssertionError as err:
# This isn't a game killer. Continue the pipeline without erroring out but do inform the
# user about it.
print('ERROR:', err.message, file=sys.stderr)
return
if univ_options['output_folder'] == 'NA':
output_folder = ''
else:
output_folder = univ_options['output_folder']
output_folder = os.path.join(output_folder, univ_options['patient'])
output_folder = os.path.join(output_folder, subfolder) if subfolder else output_folder
if univ_options['storage_location'] == 'local':
# Handle Local
try:
# Create the directory if required
os.makedirs(output_folder, 0755)
except OSError as err:
if err.errno != errno.EEXIST:
raise
output_url = 'file://' + os.path.join(output_folder, file_name)
elif univ_options['storage_location'].startswith('aws'):
# Handle AWS
bucket_name = univ_options['storage_location'].split(':')[-1]
output_url = os.path.join('S3://', bucket_name, output_folder.strip('/'), file_name)
# Can't do Azure or google yet.
else:
# TODO: Azure support
print("Currently doesn't support anything but Local and aws.")
return
job.fileStore.exportFile(fsid, output_url) | [
"def",
"export_results",
"(",
"job",
",",
"fsid",
",",
"file_name",
",",
"univ_options",
",",
"subfolder",
"=",
"None",
")",
":",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Exporting %s to output location'",
"%",
"fsid",
")",
"file_name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"file_name",
")",
"try",
":",
"assert",
"univ_options",
"[",
"'output_folder'",
"]",
",",
"'Need a path to a folder to write out files'",
"assert",
"univ_options",
"[",
"'storage_location'",
"]",
",",
"'Need to know where the files need to go. '",
"+",
"'Local or AWS/Azure, etc.'",
"except",
"AssertionError",
"as",
"err",
":",
"print",
"(",
"'ERROR:'",
",",
"err",
".",
"message",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"return",
"if",
"univ_options",
"[",
"'output_folder'",
"]",
"==",
"'NA'",
":",
"output_folder",
"=",
"''",
"else",
":",
"output_folder",
"=",
"univ_options",
"[",
"'output_folder'",
"]",
"output_folder",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_folder",
",",
"univ_options",
"[",
"'patient'",
"]",
")",
"output_folder",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_folder",
",",
"subfolder",
")",
"if",
"subfolder",
"else",
"output_folder",
"if",
"univ_options",
"[",
"'storage_location'",
"]",
"==",
"'local'",
":",
"try",
":",
"os",
".",
"makedirs",
"(",
"output_folder",
",",
"0755",
")",
"except",
"OSError",
"as",
"err",
":",
"if",
"err",
".",
"errno",
"!=",
"errno",
".",
"EEXIST",
":",
"raise",
"output_url",
"=",
"'file://'",
"+",
"os",
".",
"path",
".",
"join",
"(",
"output_folder",
",",
"file_name",
")",
"elif",
"univ_options",
"[",
"'storage_location'",
"]",
".",
"startswith",
"(",
"'aws'",
")",
":",
"bucket_name",
"=",
"univ_options",
"[",
"'storage_location'",
"]",
".",
"split",
"(",
"':'",
")",
"[",
"-",
"1",
"]",
"output_url",
"=",
"os",
".",
"path",
".",
"join",
"(",
"'S3://'",
",",
"bucket_name",
",",
"output_folder",
".",
"strip",
"(",
"'/'",
")",
",",
"file_name",
")",
"else",
":",
"print",
"(",
"\"Currently doesn't support anything but Local and aws.\"",
")",
"return",
"job",
".",
"fileStore",
".",
"exportFile",
"(",
"fsid",
",",
"output_url",
")"
] | Write out a file to a given location. The location can be either a directory on the local
machine, or a folder with a bucket on AWS.
:param str fsid: The file store id for the file to be exported
:param str file_name: The name of the file that neeeds to be exported (path to file is also
acceptable)
:param dict univ_options: Dict of universal options used by almost all tools
:param str subfolder: A sub folder within the main folder where this data should go
:return: None | [
"Write",
"out",
"a",
"file",
"to",
"a",
"given",
"location",
".",
"The",
"location",
"can",
"be",
"either",
"a",
"directory",
"on",
"the",
"local",
"machine",
"or",
"a",
"folder",
"with",
"a",
"bucket",
"on",
"AWS",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/common.py#L401-L448 | train |
BD2KGenomics/protect | src/protect/common.py | parse_chromosome_string | def parse_chromosome_string(job, chromosome_string):
"""
Parse a chromosome string into a list.
:param chromosome_string: Input chromosome string
:return: list of chromosomes to handle
:rtype: list
"""
if chromosome_string is None:
return []
else:
assert isinstance(chromosome_string, str)
chroms = [c.strip() for c in chromosome_string.split(',')]
if 'canonical' in chroms:
assert 'canonical_chr' not in chroms, 'Cannot have canonical and canonical_chr'
chr_prefix = False
chroms.remove('canonical')
out_chroms = [str(c) for c in range(1, 23)] + ['X', 'Y']
elif 'canonical_chr' in chroms:
assert 'canonical' not in chroms, 'Cannot have canonical and canonical_chr'
chr_prefix = True
chroms.remove('canonical_chr')
out_chroms = ['chr' + str(c) for c in range(1, 23)] + ['chrX', 'chrY']
else:
chr_prefix = None
out_chroms = []
for chrom in chroms:
if chr_prefix is not None and chrom.startswith('chr') is not chr_prefix:
job.fileStore.logToMaster('chromosome %s does not match the rest that %s begin '
'with "chr".' % (chrom,
'all' if chr_prefix else 'don\'t'),
level=logging.WARNING)
out_chroms.append(chrom)
return chrom_sorted(out_chroms) | python | def parse_chromosome_string(job, chromosome_string):
"""
Parse a chromosome string into a list.
:param chromosome_string: Input chromosome string
:return: list of chromosomes to handle
:rtype: list
"""
if chromosome_string is None:
return []
else:
assert isinstance(chromosome_string, str)
chroms = [c.strip() for c in chromosome_string.split(',')]
if 'canonical' in chroms:
assert 'canonical_chr' not in chroms, 'Cannot have canonical and canonical_chr'
chr_prefix = False
chroms.remove('canonical')
out_chroms = [str(c) for c in range(1, 23)] + ['X', 'Y']
elif 'canonical_chr' in chroms:
assert 'canonical' not in chroms, 'Cannot have canonical and canonical_chr'
chr_prefix = True
chroms.remove('canonical_chr')
out_chroms = ['chr' + str(c) for c in range(1, 23)] + ['chrX', 'chrY']
else:
chr_prefix = None
out_chroms = []
for chrom in chroms:
if chr_prefix is not None and chrom.startswith('chr') is not chr_prefix:
job.fileStore.logToMaster('chromosome %s does not match the rest that %s begin '
'with "chr".' % (chrom,
'all' if chr_prefix else 'don\'t'),
level=logging.WARNING)
out_chroms.append(chrom)
return chrom_sorted(out_chroms) | [
"def",
"parse_chromosome_string",
"(",
"job",
",",
"chromosome_string",
")",
":",
"if",
"chromosome_string",
"is",
"None",
":",
"return",
"[",
"]",
"else",
":",
"assert",
"isinstance",
"(",
"chromosome_string",
",",
"str",
")",
"chroms",
"=",
"[",
"c",
".",
"strip",
"(",
")",
"for",
"c",
"in",
"chromosome_string",
".",
"split",
"(",
"','",
")",
"]",
"if",
"'canonical'",
"in",
"chroms",
":",
"assert",
"'canonical_chr'",
"not",
"in",
"chroms",
",",
"'Cannot have canonical and canonical_chr'",
"chr_prefix",
"=",
"False",
"chroms",
".",
"remove",
"(",
"'canonical'",
")",
"out_chroms",
"=",
"[",
"str",
"(",
"c",
")",
"for",
"c",
"in",
"range",
"(",
"1",
",",
"23",
")",
"]",
"+",
"[",
"'X'",
",",
"'Y'",
"]",
"elif",
"'canonical_chr'",
"in",
"chroms",
":",
"assert",
"'canonical'",
"not",
"in",
"chroms",
",",
"'Cannot have canonical and canonical_chr'",
"chr_prefix",
"=",
"True",
"chroms",
".",
"remove",
"(",
"'canonical_chr'",
")",
"out_chroms",
"=",
"[",
"'chr'",
"+",
"str",
"(",
"c",
")",
"for",
"c",
"in",
"range",
"(",
"1",
",",
"23",
")",
"]",
"+",
"[",
"'chrX'",
",",
"'chrY'",
"]",
"else",
":",
"chr_prefix",
"=",
"None",
"out_chroms",
"=",
"[",
"]",
"for",
"chrom",
"in",
"chroms",
":",
"if",
"chr_prefix",
"is",
"not",
"None",
"and",
"chrom",
".",
"startswith",
"(",
"'chr'",
")",
"is",
"not",
"chr_prefix",
":",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'chromosome %s does not match the rest that %s begin '",
"'with \"chr\".'",
"%",
"(",
"chrom",
",",
"'all'",
"if",
"chr_prefix",
"else",
"'don\\'t'",
")",
",",
"level",
"=",
"logging",
".",
"WARNING",
")",
"out_chroms",
".",
"append",
"(",
"chrom",
")",
"return",
"chrom_sorted",
"(",
"out_chroms",
")"
] | Parse a chromosome string into a list.
:param chromosome_string: Input chromosome string
:return: list of chromosomes to handle
:rtype: list | [
"Parse",
"a",
"chromosome",
"string",
"into",
"a",
"list",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/common.py#L521-L554 | train |
BD2KGenomics/protect | src/protect/common.py | email_report | def email_report(job, univ_options):
"""
Send an email to the user when the run finishes.
:param dict univ_options: Dict of universal options used by almost all tools
"""
fromadd = "[email protected]"
msg = MIMEMultipart()
msg['From'] = fromadd
if univ_options['mail_to'] is None:
return
else:
msg['To'] = univ_options['mail_to']
msg['Subject'] = "Protect run for sample %s completed successfully." % univ_options['patient']
body = "Protect run for sample %s completed successfully." % univ_options['patient']
msg.attach(MIMEText(body, 'plain'))
text = msg.as_string()
try:
server = smtplib.SMTP('localhost')
except socket.error as e:
if e.errno == 111:
print('No mail utils on this maachine')
else:
print('Unexpected error while attempting to send an email.')
print('Could not send email report')
except:
print('Could not send email report')
else:
server.sendmail(fromadd, msg['To'], text)
server.quit() | python | def email_report(job, univ_options):
"""
Send an email to the user when the run finishes.
:param dict univ_options: Dict of universal options used by almost all tools
"""
fromadd = "[email protected]"
msg = MIMEMultipart()
msg['From'] = fromadd
if univ_options['mail_to'] is None:
return
else:
msg['To'] = univ_options['mail_to']
msg['Subject'] = "Protect run for sample %s completed successfully." % univ_options['patient']
body = "Protect run for sample %s completed successfully." % univ_options['patient']
msg.attach(MIMEText(body, 'plain'))
text = msg.as_string()
try:
server = smtplib.SMTP('localhost')
except socket.error as e:
if e.errno == 111:
print('No mail utils on this maachine')
else:
print('Unexpected error while attempting to send an email.')
print('Could not send email report')
except:
print('Could not send email report')
else:
server.sendmail(fromadd, msg['To'], text)
server.quit() | [
"def",
"email_report",
"(",
"job",
",",
"univ_options",
")",
":",
"fromadd",
"=",
"\"[email protected]\"",
"msg",
"=",
"MIMEMultipart",
"(",
")",
"msg",
"[",
"'From'",
"]",
"=",
"fromadd",
"if",
"univ_options",
"[",
"'mail_to'",
"]",
"is",
"None",
":",
"return",
"else",
":",
"msg",
"[",
"'To'",
"]",
"=",
"univ_options",
"[",
"'mail_to'",
"]",
"msg",
"[",
"'Subject'",
"]",
"=",
"\"Protect run for sample %s completed successfully.\"",
"%",
"univ_options",
"[",
"'patient'",
"]",
"body",
"=",
"\"Protect run for sample %s completed successfully.\"",
"%",
"univ_options",
"[",
"'patient'",
"]",
"msg",
".",
"attach",
"(",
"MIMEText",
"(",
"body",
",",
"'plain'",
")",
")",
"text",
"=",
"msg",
".",
"as_string",
"(",
")",
"try",
":",
"server",
"=",
"smtplib",
".",
"SMTP",
"(",
"'localhost'",
")",
"except",
"socket",
".",
"error",
"as",
"e",
":",
"if",
"e",
".",
"errno",
"==",
"111",
":",
"print",
"(",
"'No mail utils on this maachine'",
")",
"else",
":",
"print",
"(",
"'Unexpected error while attempting to send an email.'",
")",
"print",
"(",
"'Could not send email report'",
")",
"except",
":",
"print",
"(",
"'Could not send email report'",
")",
"else",
":",
"server",
".",
"sendmail",
"(",
"fromadd",
",",
"msg",
"[",
"'To'",
"]",
",",
"text",
")",
"server",
".",
"quit",
"(",
")"
] | Send an email to the user when the run finishes.
:param dict univ_options: Dict of universal options used by almost all tools | [
"Send",
"an",
"email",
"to",
"the",
"user",
"when",
"the",
"run",
"finishes",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/common.py#L619-L649 | train |
0k/kids.cache | src/kids/cache/__init__.py | make_key_hippie | def make_key_hippie(obj, typed=True):
"""Return hashable structure from non-hashable structure using hippie means
dict and set are sorted and their content subjected to same hippie means.
Note that the key identifies the current content of the structure.
"""
ftype = type if typed else lambda o: None
if is_hashable(obj):
## DO NOT RETURN hash(obj), as hash collision would generate bad
## cache collisions.
return obj, ftype(obj)
## should we try to convert to frozen{set,dict} to get the C
## hashing function speed ? But the convertion has a cost also.
if isinstance(obj, set):
obj = sorted(obj)
if isinstance(obj, (list, tuple)):
return tuple(make_key_hippie(e, typed) for e in obj)
if isinstance(obj, dict):
return tuple(sorted(((make_key_hippie(k, typed),
make_key_hippie(v, typed))
for k, v in obj.items())))
raise ValueError(
"%r can not be hashed. Try providing a custom key function."
% obj) | python | def make_key_hippie(obj, typed=True):
"""Return hashable structure from non-hashable structure using hippie means
dict and set are sorted and their content subjected to same hippie means.
Note that the key identifies the current content of the structure.
"""
ftype = type if typed else lambda o: None
if is_hashable(obj):
## DO NOT RETURN hash(obj), as hash collision would generate bad
## cache collisions.
return obj, ftype(obj)
## should we try to convert to frozen{set,dict} to get the C
## hashing function speed ? But the convertion has a cost also.
if isinstance(obj, set):
obj = sorted(obj)
if isinstance(obj, (list, tuple)):
return tuple(make_key_hippie(e, typed) for e in obj)
if isinstance(obj, dict):
return tuple(sorted(((make_key_hippie(k, typed),
make_key_hippie(v, typed))
for k, v in obj.items())))
raise ValueError(
"%r can not be hashed. Try providing a custom key function."
% obj) | [
"def",
"make_key_hippie",
"(",
"obj",
",",
"typed",
"=",
"True",
")",
":",
"ftype",
"=",
"type",
"if",
"typed",
"else",
"lambda",
"o",
":",
"None",
"if",
"is_hashable",
"(",
"obj",
")",
":",
"return",
"obj",
",",
"ftype",
"(",
"obj",
")",
"if",
"isinstance",
"(",
"obj",
",",
"set",
")",
":",
"obj",
"=",
"sorted",
"(",
"obj",
")",
"if",
"isinstance",
"(",
"obj",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"return",
"tuple",
"(",
"make_key_hippie",
"(",
"e",
",",
"typed",
")",
"for",
"e",
"in",
"obj",
")",
"if",
"isinstance",
"(",
"obj",
",",
"dict",
")",
":",
"return",
"tuple",
"(",
"sorted",
"(",
"(",
"(",
"make_key_hippie",
"(",
"k",
",",
"typed",
")",
",",
"make_key_hippie",
"(",
"v",
",",
"typed",
")",
")",
"for",
"k",
",",
"v",
"in",
"obj",
".",
"items",
"(",
")",
")",
")",
")",
"raise",
"ValueError",
"(",
"\"%r can not be hashed. Try providing a custom key function.\"",
"%",
"obj",
")"
] | Return hashable structure from non-hashable structure using hippie means
dict and set are sorted and their content subjected to same hippie means.
Note that the key identifies the current content of the structure. | [
"Return",
"hashable",
"structure",
"from",
"non",
"-",
"hashable",
"structure",
"using",
"hippie",
"means"
] | 668f3b966877c4a0855d60e05cc3706cf37e4570 | https://github.com/0k/kids.cache/blob/668f3b966877c4a0855d60e05cc3706cf37e4570/src/kids/cache/__init__.py#L29-L54 | train |
Subsets and Splits