repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
acutesoftware/AIKIF | aikif/lib/cls_file.py | TextFile.count_lines_in_file | def count_lines_in_file(self, fname=''):
""" you wont believe what this method does """
i = 0
if fname == '':
fname = self.fullname
try:
#with open(fname, encoding="utf8") as f:
with codecs.open(fname, "r",encoding='utf8', errors='ignore') as f:
for i, _ in enumerate(f):
pass
return i + 1
except Exception as ex:
print('cant count lines in file in "', fname, '":', str(ex))
return 0 | python | def count_lines_in_file(self, fname=''):
""" you wont believe what this method does """
i = 0
if fname == '':
fname = self.fullname
try:
#with open(fname, encoding="utf8") as f:
with codecs.open(fname, "r",encoding='utf8', errors='ignore') as f:
for i, _ in enumerate(f):
pass
return i + 1
except Exception as ex:
print('cant count lines in file in "', fname, '":', str(ex))
return 0 | [
"def",
"count_lines_in_file",
"(",
"self",
",",
"fname",
"=",
"''",
")",
":",
"i",
"=",
"0",
"if",
"fname",
"==",
"''",
":",
"fname",
"=",
"self",
".",
"fullname",
"try",
":",
"with",
"codecs",
".",
"open",
"(",
"fname",
",",
"\"r\"",
",",
"encoding",
"=",
"'utf8'",
",",
"errors",
"=",
"'ignore'",
")",
"as",
"f",
":",
"for",
"i",
",",
"_",
"in",
"enumerate",
"(",
"f",
")",
":",
"pass",
"return",
"i",
"+",
"1",
"except",
"Exception",
"as",
"ex",
":",
"print",
"(",
"'cant count lines in file in \"'",
",",
"fname",
",",
"'\":'",
",",
"str",
"(",
"ex",
")",
")",
"return",
"0"
] | you wont believe what this method does | [
"you",
"wont",
"believe",
"what",
"this",
"method",
"does"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/lib/cls_file.py#L109-L122 | train |
acutesoftware/AIKIF | aikif/lib/cls_file.py | TextFile.count_lines_of_code | def count_lines_of_code(self, fname=''):
""" counts non blank lines """
if fname == '':
fname = self.fullname
loc = 0
try:
with open(fname) as f:
for l in f:
if l.strip() != '':
loc += 1
return loc
except Exception as ex:
print('cant count lines of code in "', fname, '":', str(ex))
return 0 | python | def count_lines_of_code(self, fname=''):
""" counts non blank lines """
if fname == '':
fname = self.fullname
loc = 0
try:
with open(fname) as f:
for l in f:
if l.strip() != '':
loc += 1
return loc
except Exception as ex:
print('cant count lines of code in "', fname, '":', str(ex))
return 0 | [
"def",
"count_lines_of_code",
"(",
"self",
",",
"fname",
"=",
"''",
")",
":",
"if",
"fname",
"==",
"''",
":",
"fname",
"=",
"self",
".",
"fullname",
"loc",
"=",
"0",
"try",
":",
"with",
"open",
"(",
"fname",
")",
"as",
"f",
":",
"for",
"l",
"in",
"f",
":",
"if",
"l",
".",
"strip",
"(",
")",
"!=",
"''",
":",
"loc",
"+=",
"1",
"return",
"loc",
"except",
"Exception",
"as",
"ex",
":",
"print",
"(",
"'cant count lines of code in \"'",
",",
"fname",
",",
"'\":'",
",",
"str",
"(",
"ex",
")",
")",
"return",
"0"
] | counts non blank lines | [
"counts",
"non",
"blank",
"lines"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/lib/cls_file.py#L124-L137 | train |
acutesoftware/AIKIF | aikif/lib/cls_file.py | TextFile.get_file_sample | def get_file_sample(self, numLines=10):
""" retrieve a sample of the file """
res = ''
try:
with open(self.fullname, 'r') as f:
for line_num, line in enumerate(f):
res += str(line_num).zfill(5) + ' ' + line
if line_num >= numLines-1:
break
return res
except Exception as ex:
print('cant get_file_sample in "', self.fullname, '":', str(ex))
return res | python | def get_file_sample(self, numLines=10):
""" retrieve a sample of the file """
res = ''
try:
with open(self.fullname, 'r') as f:
for line_num, line in enumerate(f):
res += str(line_num).zfill(5) + ' ' + line
if line_num >= numLines-1:
break
return res
except Exception as ex:
print('cant get_file_sample in "', self.fullname, '":', str(ex))
return res | [
"def",
"get_file_sample",
"(",
"self",
",",
"numLines",
"=",
"10",
")",
":",
"res",
"=",
"''",
"try",
":",
"with",
"open",
"(",
"self",
".",
"fullname",
",",
"'r'",
")",
"as",
"f",
":",
"for",
"line_num",
",",
"line",
"in",
"enumerate",
"(",
"f",
")",
":",
"res",
"+=",
"str",
"(",
"line_num",
")",
".",
"zfill",
"(",
"5",
")",
"+",
"' '",
"+",
"line",
"if",
"line_num",
">=",
"numLines",
"-",
"1",
":",
"break",
"return",
"res",
"except",
"Exception",
"as",
"ex",
":",
"print",
"(",
"'cant get_file_sample in \"'",
",",
"self",
".",
"fullname",
",",
"'\":'",
",",
"str",
"(",
"ex",
")",
")",
"return",
"res"
] | retrieve a sample of the file | [
"retrieve",
"a",
"sample",
"of",
"the",
"file"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/lib/cls_file.py#L140-L152 | train |
acutesoftware/AIKIF | aikif/lib/cls_file.py | TextFile.append_text | def append_text(self, txt):
""" adds a line of text to a file """
with open(self.fullname, "a") as myfile:
myfile.write(txt) | python | def append_text(self, txt):
""" adds a line of text to a file """
with open(self.fullname, "a") as myfile:
myfile.write(txt) | [
"def",
"append_text",
"(",
"self",
",",
"txt",
")",
":",
"with",
"open",
"(",
"self",
".",
"fullname",
",",
"\"a\"",
")",
"as",
"myfile",
":",
"myfile",
".",
"write",
"(",
"txt",
")"
] | adds a line of text to a file | [
"adds",
"a",
"line",
"of",
"text",
"to",
"a",
"file"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/lib/cls_file.py#L155-L158 | train |
acutesoftware/AIKIF | aikif/lib/cls_file.py | TextFile.load_file_to_string | def load_file_to_string(self):
""" load a file to a string """
try:
with open(self.fullname, 'r') as f:
txt = f.read()
return txt
except IOError:
return '' | python | def load_file_to_string(self):
""" load a file to a string """
try:
with open(self.fullname, 'r') as f:
txt = f.read()
return txt
except IOError:
return '' | [
"def",
"load_file_to_string",
"(",
"self",
")",
":",
"try",
":",
"with",
"open",
"(",
"self",
".",
"fullname",
",",
"'r'",
")",
"as",
"f",
":",
"txt",
"=",
"f",
".",
"read",
"(",
")",
"return",
"txt",
"except",
"IOError",
":",
"return",
"''"
] | load a file to a string | [
"load",
"a",
"file",
"to",
"a",
"string"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/lib/cls_file.py#L172-L179 | train |
acutesoftware/AIKIF | aikif/lib/cls_file.py | TextFile.load_file_to_list | def load_file_to_list(self):
""" load a file to a list """
lst = []
try:
with open(self.fullname, 'r') as f:
for line in f:
lst.append(line)
return lst
except IOError:
return lst | python | def load_file_to_list(self):
""" load a file to a list """
lst = []
try:
with open(self.fullname, 'r') as f:
for line in f:
lst.append(line)
return lst
except IOError:
return lst | [
"def",
"load_file_to_list",
"(",
"self",
")",
":",
"lst",
"=",
"[",
"]",
"try",
":",
"with",
"open",
"(",
"self",
".",
"fullname",
",",
"'r'",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
":",
"lst",
".",
"append",
"(",
"line",
")",
"return",
"lst",
"except",
"IOError",
":",
"return",
"lst"
] | load a file to a list | [
"load",
"a",
"file",
"to",
"a",
"list"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/lib/cls_file.py#L181-L190 | train |
acutesoftware/AIKIF | aikif/web_app/page_programs.py | get_program_list | def get_program_list():
"""
get a HTML formatted view of all Python programs
in all subfolders of AIKIF, including imports and
lists of functions and classes
"""
colList = ['FileName','FileSize','Functions', 'Imports']
txt = '<TABLE width=90% border=0>'
txt += format_file_table_header(colList)
fl = web.GetFileList(aikif_folder, ['*.py'], 'N')
for f in fl:
if '__init__.py' in f:
txt += '<TR><TD colspan=4><HR><H3>' + get_subfolder(f) + '</h3></td></tr>\n'
else:
txt += format_file_to_html_row(f, colList)
txt += '</TABLE>\n\n'
return txt | python | def get_program_list():
"""
get a HTML formatted view of all Python programs
in all subfolders of AIKIF, including imports and
lists of functions and classes
"""
colList = ['FileName','FileSize','Functions', 'Imports']
txt = '<TABLE width=90% border=0>'
txt += format_file_table_header(colList)
fl = web.GetFileList(aikif_folder, ['*.py'], 'N')
for f in fl:
if '__init__.py' in f:
txt += '<TR><TD colspan=4><HR><H3>' + get_subfolder(f) + '</h3></td></tr>\n'
else:
txt += format_file_to_html_row(f, colList)
txt += '</TABLE>\n\n'
return txt | [
"def",
"get_program_list",
"(",
")",
":",
"colList",
"=",
"[",
"'FileName'",
",",
"'FileSize'",
",",
"'Functions'",
",",
"'Imports'",
"]",
"txt",
"=",
"'<TABLE width=90% border=0>'",
"txt",
"+=",
"format_file_table_header",
"(",
"colList",
")",
"fl",
"=",
"web",
".",
"GetFileList",
"(",
"aikif_folder",
",",
"[",
"'*.py'",
"]",
",",
"'N'",
")",
"for",
"f",
"in",
"fl",
":",
"if",
"'__init__.py'",
"in",
"f",
":",
"txt",
"+=",
"'<TR><TD colspan=4><HR><H3>'",
"+",
"get_subfolder",
"(",
"f",
")",
"+",
"'</h3></td></tr>\\n'",
"else",
":",
"txt",
"+=",
"format_file_to_html_row",
"(",
"f",
",",
"colList",
")",
"txt",
"+=",
"'</TABLE>\\n\\n'",
"return",
"txt"
] | get a HTML formatted view of all Python programs
in all subfolders of AIKIF, including imports and
lists of functions and classes | [
"get",
"a",
"HTML",
"formatted",
"view",
"of",
"all",
"Python",
"programs",
"in",
"all",
"subfolders",
"of",
"AIKIF",
"including",
"imports",
"and",
"lists",
"of",
"functions",
"and",
"classes"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/web_app/page_programs.py#L32-L49 | train |
acutesoftware/AIKIF | aikif/web_app/page_programs.py | get_subfolder | def get_subfolder(txt):
"""
extracts a displayable subfolder name from full filename
"""
root_folder = os.sep + 'aikif' + os.sep
ndx = txt.find(root_folder, 1)
return txt[ndx:].replace('__init__.py', '') | python | def get_subfolder(txt):
"""
extracts a displayable subfolder name from full filename
"""
root_folder = os.sep + 'aikif' + os.sep
ndx = txt.find(root_folder, 1)
return txt[ndx:].replace('__init__.py', '') | [
"def",
"get_subfolder",
"(",
"txt",
")",
":",
"root_folder",
"=",
"os",
".",
"sep",
"+",
"'aikif'",
"+",
"os",
".",
"sep",
"ndx",
"=",
"txt",
".",
"find",
"(",
"root_folder",
",",
"1",
")",
"return",
"txt",
"[",
"ndx",
":",
"]",
".",
"replace",
"(",
"'__init__.py'",
",",
"''",
")"
] | extracts a displayable subfolder name from full filename | [
"extracts",
"a",
"displayable",
"subfolder",
"name",
"from",
"full",
"filename"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/web_app/page_programs.py#L51-L57 | train |
acutesoftware/AIKIF | aikif/web_app/page_programs.py | get_functions | def get_functions(fname):
""" get a list of functions from a Python program """
txt = ''
with open(fname, 'r') as f:
for line in f:
if line.strip()[0:4] == 'def ':
txt += '<PRE>' + strip_text_after_string(strip_text_after_string(line, '#')[4:], ':') + '</PRE>\n'
if line[0:5] == 'class':
txt += '<PRE>' + strip_text_after_string(strip_text_after_string(line, '#'), ':') + '</PRE>\n'
return txt + '<BR>' | python | def get_functions(fname):
""" get a list of functions from a Python program """
txt = ''
with open(fname, 'r') as f:
for line in f:
if line.strip()[0:4] == 'def ':
txt += '<PRE>' + strip_text_after_string(strip_text_after_string(line, '#')[4:], ':') + '</PRE>\n'
if line[0:5] == 'class':
txt += '<PRE>' + strip_text_after_string(strip_text_after_string(line, '#'), ':') + '</PRE>\n'
return txt + '<BR>' | [
"def",
"get_functions",
"(",
"fname",
")",
":",
"txt",
"=",
"''",
"with",
"open",
"(",
"fname",
",",
"'r'",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
":",
"if",
"line",
".",
"strip",
"(",
")",
"[",
"0",
":",
"4",
"]",
"==",
"'def '",
":",
"txt",
"+=",
"'<PRE>'",
"+",
"strip_text_after_string",
"(",
"strip_text_after_string",
"(",
"line",
",",
"'#'",
")",
"[",
"4",
":",
"]",
",",
"':'",
")",
"+",
"'</PRE>\\n'",
"if",
"line",
"[",
"0",
":",
"5",
"]",
"==",
"'class'",
":",
"txt",
"+=",
"'<PRE>'",
"+",
"strip_text_after_string",
"(",
"strip_text_after_string",
"(",
"line",
",",
"'#'",
")",
",",
"':'",
")",
"+",
"'</PRE>\\n'",
"return",
"txt",
"+",
"'<BR>'"
] | get a list of functions from a Python program | [
"get",
"a",
"list",
"of",
"functions",
"from",
"a",
"Python",
"program"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/web_app/page_programs.py#L95-L104 | train |
acutesoftware/AIKIF | aikif/web_app/page_programs.py | strip_text_after_string | def strip_text_after_string(txt, junk):
""" used to strip any poorly documented comments at the end of function defs """
if junk in txt:
return txt[:txt.find(junk)]
else:
return txt | python | def strip_text_after_string(txt, junk):
""" used to strip any poorly documented comments at the end of function defs """
if junk in txt:
return txt[:txt.find(junk)]
else:
return txt | [
"def",
"strip_text_after_string",
"(",
"txt",
",",
"junk",
")",
":",
"if",
"junk",
"in",
"txt",
":",
"return",
"txt",
"[",
":",
"txt",
".",
"find",
"(",
"junk",
")",
"]",
"else",
":",
"return",
"txt"
] | used to strip any poorly documented comments at the end of function defs | [
"used",
"to",
"strip",
"any",
"poorly",
"documented",
"comments",
"at",
"the",
"end",
"of",
"function",
"defs"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/web_app/page_programs.py#L106-L111 | train |
acutesoftware/AIKIF | aikif/web_app/page_programs.py | get_imports | def get_imports(fname):
""" get a list of imports from a Python program """
txt = ''
with open(fname, 'r') as f:
for line in f:
if line[0:6] == 'import':
txt += '<PRE>' + strip_text_after_string(line[7:], ' as ') + '</PRE>\n'
return txt + '<BR>' | python | def get_imports(fname):
""" get a list of imports from a Python program """
txt = ''
with open(fname, 'r') as f:
for line in f:
if line[0:6] == 'import':
txt += '<PRE>' + strip_text_after_string(line[7:], ' as ') + '</PRE>\n'
return txt + '<BR>' | [
"def",
"get_imports",
"(",
"fname",
")",
":",
"txt",
"=",
"''",
"with",
"open",
"(",
"fname",
",",
"'r'",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
":",
"if",
"line",
"[",
"0",
":",
"6",
"]",
"==",
"'import'",
":",
"txt",
"+=",
"'<PRE>'",
"+",
"strip_text_after_string",
"(",
"line",
"[",
"7",
":",
"]",
",",
"' as '",
")",
"+",
"'</PRE>\\n'",
"return",
"txt",
"+",
"'<BR>'"
] | get a list of imports from a Python program | [
"get",
"a",
"list",
"of",
"imports",
"from",
"a",
"Python",
"program"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/web_app/page_programs.py#L113-L120 | train |
acutesoftware/AIKIF | aikif/agents/learn/dummy_learn_1.py | main | def main(arg1=55, arg2='test', arg3=None):
"""
This is a sample program to show how a learning agent can
be logged using AIKIF.
The idea is that this main function is your algorithm, which
will run until it finds a successful result. The result is
returned and the time taken is logged.
There can optionally be have additional functions
to call to allow for easy logging access
"""
print('Starting dummy AI algorithm with :', arg1, arg2, arg3)
if arg3 is None:
arg3=[5,6,7,5,4,]
result = arg1 + arg3[0] * 7566.545 # dummy result
print('Done - returning ', result)
return result | python | def main(arg1=55, arg2='test', arg3=None):
"""
This is a sample program to show how a learning agent can
be logged using AIKIF.
The idea is that this main function is your algorithm, which
will run until it finds a successful result. The result is
returned and the time taken is logged.
There can optionally be have additional functions
to call to allow for easy logging access
"""
print('Starting dummy AI algorithm with :', arg1, arg2, arg3)
if arg3 is None:
arg3=[5,6,7,5,4,]
result = arg1 + arg3[0] * 7566.545 # dummy result
print('Done - returning ', result)
return result | [
"def",
"main",
"(",
"arg1",
"=",
"55",
",",
"arg2",
"=",
"'test'",
",",
"arg3",
"=",
"None",
")",
":",
"print",
"(",
"'Starting dummy AI algorithm with :'",
",",
"arg1",
",",
"arg2",
",",
"arg3",
")",
"if",
"arg3",
"is",
"None",
":",
"arg3",
"=",
"[",
"5",
",",
"6",
",",
"7",
",",
"5",
",",
"4",
",",
"]",
"result",
"=",
"arg1",
"+",
"arg3",
"[",
"0",
"]",
"*",
"7566.545",
"print",
"(",
"'Done - returning '",
",",
"result",
")",
"return",
"result"
] | This is a sample program to show how a learning agent can
be logged using AIKIF.
The idea is that this main function is your algorithm, which
will run until it finds a successful result. The result is
returned and the time taken is logged.
There can optionally be have additional functions
to call to allow for easy logging access | [
"This",
"is",
"a",
"sample",
"program",
"to",
"show",
"how",
"a",
"learning",
"agent",
"can",
"be",
"logged",
"using",
"AIKIF",
".",
"The",
"idea",
"is",
"that",
"this",
"main",
"function",
"is",
"your",
"algorithm",
"which",
"will",
"run",
"until",
"it",
"finds",
"a",
"successful",
"result",
".",
"The",
"result",
"is",
"returned",
"and",
"the",
"time",
"taken",
"is",
"logged",
".",
"There",
"can",
"optionally",
"be",
"have",
"additional",
"functions",
"to",
"call",
"to",
"allow",
"for",
"easy",
"logging",
"access"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/agents/learn/dummy_learn_1.py#L5-L23 | train |
acutesoftware/AIKIF | aikif/dataTools/if_redis.py | redis_server.get | def get(self, key):
""" get a set of keys from redis """
res = self.connection.get(key)
print(res)
return res | python | def get(self, key):
""" get a set of keys from redis """
res = self.connection.get(key)
print(res)
return res | [
"def",
"get",
"(",
"self",
",",
"key",
")",
":",
"res",
"=",
"self",
".",
"connection",
".",
"get",
"(",
"key",
")",
"print",
"(",
"res",
")",
"return",
"res"
] | get a set of keys from redis | [
"get",
"a",
"set",
"of",
"keys",
"from",
"redis"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/dataTools/if_redis.py#L108-L112 | train |
Nachtfeuer/pipeline | spline/components/packer.py | Packer.creator | def creator(_, config):
"""Creator function for creating an instance of a Packer image script."""
packer_script = render(config.script, model=config.model, env=config.env,
variables=config.variables, item=config.item)
filename = "packer.dry.run.see.comment"
if not config.dry_run:
# writing Packer file (JSON)
filename = write_temporary_file(packer_script, 'packer-', '.json')
packer_script = ''
# rendering the Bash script for generating the Packer image
template_file = os.path.join(os.path.dirname(__file__), 'templates/packer-image.sh.j2')
with open(template_file) as handle:
template = handle.read()
config.script = render(template, debug=config.debug,
packer_content=packer_script,
packer_filename=filename)
return Packer(config) | python | def creator(_, config):
"""Creator function for creating an instance of a Packer image script."""
packer_script = render(config.script, model=config.model, env=config.env,
variables=config.variables, item=config.item)
filename = "packer.dry.run.see.comment"
if not config.dry_run:
# writing Packer file (JSON)
filename = write_temporary_file(packer_script, 'packer-', '.json')
packer_script = ''
# rendering the Bash script for generating the Packer image
template_file = os.path.join(os.path.dirname(__file__), 'templates/packer-image.sh.j2')
with open(template_file) as handle:
template = handle.read()
config.script = render(template, debug=config.debug,
packer_content=packer_script,
packer_filename=filename)
return Packer(config) | [
"def",
"creator",
"(",
"_",
",",
"config",
")",
":",
"packer_script",
"=",
"render",
"(",
"config",
".",
"script",
",",
"model",
"=",
"config",
".",
"model",
",",
"env",
"=",
"config",
".",
"env",
",",
"variables",
"=",
"config",
".",
"variables",
",",
"item",
"=",
"config",
".",
"item",
")",
"filename",
"=",
"\"packer.dry.run.see.comment\"",
"if",
"not",
"config",
".",
"dry_run",
":",
"filename",
"=",
"write_temporary_file",
"(",
"packer_script",
",",
"'packer-'",
",",
"'.json'",
")",
"packer_script",
"=",
"''",
"template_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
",",
"'templates/packer-image.sh.j2'",
")",
"with",
"open",
"(",
"template_file",
")",
"as",
"handle",
":",
"template",
"=",
"handle",
".",
"read",
"(",
")",
"config",
".",
"script",
"=",
"render",
"(",
"template",
",",
"debug",
"=",
"config",
".",
"debug",
",",
"packer_content",
"=",
"packer_script",
",",
"packer_filename",
"=",
"filename",
")",
"return",
"Packer",
"(",
"config",
")"
] | Creator function for creating an instance of a Packer image script. | [
"Creator",
"function",
"for",
"creating",
"an",
"instance",
"of",
"a",
"Packer",
"image",
"script",
"."
] | 04ca18c4e95e4349532bb45b768206393e1f2c13 | https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/components/packer.py#L37-L57 | train |
wanadev/pyguetzli | pyguetzli/guetzli.py | process_jpeg_bytes | def process_jpeg_bytes(bytes_in, quality=DEFAULT_JPEG_QUALITY):
"""Generates an optimized JPEG from JPEG-encoded bytes.
:param bytes_in: the input image's bytes
:param quality: the output JPEG quality (default 95)
:returns: Optimized JPEG bytes
:rtype: bytes
:raises ValueError: Guetzli was not able to decode the image (the image is
probably corrupted or is not a JPEG)
.. code:: python
import pyguetzli
input_jpeg_bytes = open("./test/image.jpg", "rb").read()
optimized_jpeg = pyguetzli.process_jpeg_bytes(input_jpeg_bytes)
"""
bytes_out_p = ffi.new("char**")
bytes_out_p_gc = ffi.gc(bytes_out_p, lib.guetzli_free_bytes)
length = lib.guetzli_process_jpeg_bytes(
bytes_in,
len(bytes_in),
bytes_out_p_gc,
quality
)
if length == 0:
raise ValueError("Invalid JPEG: Guetzli was not able to decode the image") # noqa
bytes_out = ffi.cast("char*", bytes_out_p_gc[0])
return ffi.unpack(bytes_out, length) | python | def process_jpeg_bytes(bytes_in, quality=DEFAULT_JPEG_QUALITY):
"""Generates an optimized JPEG from JPEG-encoded bytes.
:param bytes_in: the input image's bytes
:param quality: the output JPEG quality (default 95)
:returns: Optimized JPEG bytes
:rtype: bytes
:raises ValueError: Guetzli was not able to decode the image (the image is
probably corrupted or is not a JPEG)
.. code:: python
import pyguetzli
input_jpeg_bytes = open("./test/image.jpg", "rb").read()
optimized_jpeg = pyguetzli.process_jpeg_bytes(input_jpeg_bytes)
"""
bytes_out_p = ffi.new("char**")
bytes_out_p_gc = ffi.gc(bytes_out_p, lib.guetzli_free_bytes)
length = lib.guetzli_process_jpeg_bytes(
bytes_in,
len(bytes_in),
bytes_out_p_gc,
quality
)
if length == 0:
raise ValueError("Invalid JPEG: Guetzli was not able to decode the image") # noqa
bytes_out = ffi.cast("char*", bytes_out_p_gc[0])
return ffi.unpack(bytes_out, length) | [
"def",
"process_jpeg_bytes",
"(",
"bytes_in",
",",
"quality",
"=",
"DEFAULT_JPEG_QUALITY",
")",
":",
"bytes_out_p",
"=",
"ffi",
".",
"new",
"(",
"\"char**\"",
")",
"bytes_out_p_gc",
"=",
"ffi",
".",
"gc",
"(",
"bytes_out_p",
",",
"lib",
".",
"guetzli_free_bytes",
")",
"length",
"=",
"lib",
".",
"guetzli_process_jpeg_bytes",
"(",
"bytes_in",
",",
"len",
"(",
"bytes_in",
")",
",",
"bytes_out_p_gc",
",",
"quality",
")",
"if",
"length",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"\"Invalid JPEG: Guetzli was not able to decode the image\"",
")",
"bytes_out",
"=",
"ffi",
".",
"cast",
"(",
"\"char*\"",
",",
"bytes_out_p_gc",
"[",
"0",
"]",
")",
"return",
"ffi",
".",
"unpack",
"(",
"bytes_out",
",",
"length",
")"
] | Generates an optimized JPEG from JPEG-encoded bytes.
:param bytes_in: the input image's bytes
:param quality: the output JPEG quality (default 95)
:returns: Optimized JPEG bytes
:rtype: bytes
:raises ValueError: Guetzli was not able to decode the image (the image is
probably corrupted or is not a JPEG)
.. code:: python
import pyguetzli
input_jpeg_bytes = open("./test/image.jpg", "rb").read()
optimized_jpeg = pyguetzli.process_jpeg_bytes(input_jpeg_bytes) | [
"Generates",
"an",
"optimized",
"JPEG",
"from",
"JPEG",
"-",
"encoded",
"bytes",
"."
] | 4e0c221f5e8f23adb38505c3c1c5a09294b7ee98 | https://github.com/wanadev/pyguetzli/blob/4e0c221f5e8f23adb38505c3c1c5a09294b7ee98/pyguetzli/guetzli.py#L13-L46 | train |
wanadev/pyguetzli | pyguetzli/guetzli.py | process_rgb_bytes | def process_rgb_bytes(bytes_in, width, height, quality=DEFAULT_JPEG_QUALITY):
"""Generates an optimized JPEG from RGB bytes.
:param bytes bytes_in: the input image's bytes
:param int width: the width of the input image
:param int height: the height of the input image
:param int quality: the output JPEG quality (default 95)
:returns: Optimized JPEG bytes
:rtype: bytes
:raises ValueError: the given width and height is not coherent with the
``bytes_in`` length.
.. code:: python
import pyguetzli
# 2x2px RGB image
# | red | green |
image_pixels = b"\\xFF\\x00\\x00\\x00\\xFF\\x00"
image_pixels += b"\\x00\\x00\\xFF\\xFF\\xFF\\xFF"
# | blue | white |
optimized_jpeg = pyguetzli.process_rgb_bytes(image_pixels, 2, 2)
"""
if len(bytes_in) != width * height * 3:
raise ValueError("bytes_in length is not coherent with given width and height") # noqa
bytes_out_p = ffi.new("char**")
bytes_out_p_gc = ffi.gc(bytes_out_p, lib.guetzli_free_bytes)
length = lib.guetzli_process_rgb_bytes(
bytes_in,
width,
height,
bytes_out_p_gc,
quality
)
bytes_out = ffi.cast("char*", bytes_out_p_gc[0])
return ffi.unpack(bytes_out, length) | python | def process_rgb_bytes(bytes_in, width, height, quality=DEFAULT_JPEG_QUALITY):
"""Generates an optimized JPEG from RGB bytes.
:param bytes bytes_in: the input image's bytes
:param int width: the width of the input image
:param int height: the height of the input image
:param int quality: the output JPEG quality (default 95)
:returns: Optimized JPEG bytes
:rtype: bytes
:raises ValueError: the given width and height is not coherent with the
``bytes_in`` length.
.. code:: python
import pyguetzli
# 2x2px RGB image
# | red | green |
image_pixels = b"\\xFF\\x00\\x00\\x00\\xFF\\x00"
image_pixels += b"\\x00\\x00\\xFF\\xFF\\xFF\\xFF"
# | blue | white |
optimized_jpeg = pyguetzli.process_rgb_bytes(image_pixels, 2, 2)
"""
if len(bytes_in) != width * height * 3:
raise ValueError("bytes_in length is not coherent with given width and height") # noqa
bytes_out_p = ffi.new("char**")
bytes_out_p_gc = ffi.gc(bytes_out_p, lib.guetzli_free_bytes)
length = lib.guetzli_process_rgb_bytes(
bytes_in,
width,
height,
bytes_out_p_gc,
quality
)
bytes_out = ffi.cast("char*", bytes_out_p_gc[0])
return ffi.unpack(bytes_out, length) | [
"def",
"process_rgb_bytes",
"(",
"bytes_in",
",",
"width",
",",
"height",
",",
"quality",
"=",
"DEFAULT_JPEG_QUALITY",
")",
":",
"if",
"len",
"(",
"bytes_in",
")",
"!=",
"width",
"*",
"height",
"*",
"3",
":",
"raise",
"ValueError",
"(",
"\"bytes_in length is not coherent with given width and height\"",
")",
"bytes_out_p",
"=",
"ffi",
".",
"new",
"(",
"\"char**\"",
")",
"bytes_out_p_gc",
"=",
"ffi",
".",
"gc",
"(",
"bytes_out_p",
",",
"lib",
".",
"guetzli_free_bytes",
")",
"length",
"=",
"lib",
".",
"guetzli_process_rgb_bytes",
"(",
"bytes_in",
",",
"width",
",",
"height",
",",
"bytes_out_p_gc",
",",
"quality",
")",
"bytes_out",
"=",
"ffi",
".",
"cast",
"(",
"\"char*\"",
",",
"bytes_out_p_gc",
"[",
"0",
"]",
")",
"return",
"ffi",
".",
"unpack",
"(",
"bytes_out",
",",
"length",
")"
] | Generates an optimized JPEG from RGB bytes.
:param bytes bytes_in: the input image's bytes
:param int width: the width of the input image
:param int height: the height of the input image
:param int quality: the output JPEG quality (default 95)
:returns: Optimized JPEG bytes
:rtype: bytes
:raises ValueError: the given width and height is not coherent with the
``bytes_in`` length.
.. code:: python
import pyguetzli
# 2x2px RGB image
# | red | green |
image_pixels = b"\\xFF\\x00\\x00\\x00\\xFF\\x00"
image_pixels += b"\\x00\\x00\\xFF\\xFF\\xFF\\xFF"
# | blue | white |
optimized_jpeg = pyguetzli.process_rgb_bytes(image_pixels, 2, 2) | [
"Generates",
"an",
"optimized",
"JPEG",
"from",
"RGB",
"bytes",
"."
] | 4e0c221f5e8f23adb38505c3c1c5a09294b7ee98 | https://github.com/wanadev/pyguetzli/blob/4e0c221f5e8f23adb38505c3c1c5a09294b7ee98/pyguetzli/guetzli.py#L49-L90 | train |
Nachtfeuer/pipeline | spline/tools/decorators.py | singleton | def singleton(the_class):
"""
Decorator for a class to make a singleton out of it.
@type the_class: class
@param the_class: the class that should work as a singleton
@rtype: decorator
@return: decorator
"""
class_instances = {}
def get_instance(*args, **kwargs):
"""
Creating or just return the one and only class instance.
The singleton depends on the parameters used in __init__
@type args: list
@param args: positional arguments of the constructor.
@type kwargs: dict
@param kwargs: named parameters of the constructor.
@rtype: decorated class type
@return: singleton instance of decorated class.
"""
key = (the_class, args, str(kwargs))
if key not in class_instances:
class_instances[key] = the_class(*args, **kwargs)
return class_instances[key]
return get_instance | python | def singleton(the_class):
"""
Decorator for a class to make a singleton out of it.
@type the_class: class
@param the_class: the class that should work as a singleton
@rtype: decorator
@return: decorator
"""
class_instances = {}
def get_instance(*args, **kwargs):
"""
Creating or just return the one and only class instance.
The singleton depends on the parameters used in __init__
@type args: list
@param args: positional arguments of the constructor.
@type kwargs: dict
@param kwargs: named parameters of the constructor.
@rtype: decorated class type
@return: singleton instance of decorated class.
"""
key = (the_class, args, str(kwargs))
if key not in class_instances:
class_instances[key] = the_class(*args, **kwargs)
return class_instances[key]
return get_instance | [
"def",
"singleton",
"(",
"the_class",
")",
":",
"class_instances",
"=",
"{",
"}",
"def",
"get_instance",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"key",
"=",
"(",
"the_class",
",",
"args",
",",
"str",
"(",
"kwargs",
")",
")",
"if",
"key",
"not",
"in",
"class_instances",
":",
"class_instances",
"[",
"key",
"]",
"=",
"the_class",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
"return",
"class_instances",
"[",
"key",
"]",
"return",
"get_instance"
] | Decorator for a class to make a singleton out of it.
@type the_class: class
@param the_class: the class that should work as a singleton
@rtype: decorator
@return: decorator | [
"Decorator",
"for",
"a",
"class",
"to",
"make",
"a",
"singleton",
"out",
"of",
"it",
"."
] | 04ca18c4e95e4349532bb45b768206393e1f2c13 | https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/decorators.py#L25-L53 | train |
acutesoftware/AIKIF | aikif/toolbox/game_board_utils.py | build_board_2048 | def build_board_2048():
""" builds a 2048 starting board
Printing Grid
0 0 0 2
0 0 4 0
0 0 0 0
0 0 0 0
"""
grd = Grid(4,4, [2,4])
grd.new_tile()
grd.new_tile()
print(grd)
return grd | python | def build_board_2048():
""" builds a 2048 starting board
Printing Grid
0 0 0 2
0 0 4 0
0 0 0 0
0 0 0 0
"""
grd = Grid(4,4, [2,4])
grd.new_tile()
grd.new_tile()
print(grd)
return grd | [
"def",
"build_board_2048",
"(",
")",
":",
"grd",
"=",
"Grid",
"(",
"4",
",",
"4",
",",
"[",
"2",
",",
"4",
"]",
")",
"grd",
".",
"new_tile",
"(",
")",
"grd",
".",
"new_tile",
"(",
")",
"print",
"(",
"grd",
")",
"return",
"grd"
] | builds a 2048 starting board
Printing Grid
0 0 0 2
0 0 4 0
0 0 0 0
0 0 0 0 | [
"builds",
"a",
"2048",
"starting",
"board",
"Printing",
"Grid",
"0",
"0",
"0",
"2",
"0",
"0",
"4",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/game_board_utils.py#L10-L23 | train |
acutesoftware/AIKIF | aikif/toolbox/game_board_utils.py | build_board_checkers | def build_board_checkers():
""" builds a checkers starting board
Printing Grid
0 B 0 B 0 B 0 B
B 0 B 0 B 0 B 0
0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0
0 W 0 W 0 W 0 W
W 0 W 0 W 0 W 0
"""
grd = Grid(8,8, ["B","W"])
for c in range(4):
grd.set_tile(0,(c*2) - 1, "B")
grd.set_tile(1,(c*2) - 0, "B")
grd.set_tile(6,(c*2) + 1, "W")
grd.set_tile(7,(c*2) - 0, "W")
print(grd)
return grd | python | def build_board_checkers():
""" builds a checkers starting board
Printing Grid
0 B 0 B 0 B 0 B
B 0 B 0 B 0 B 0
0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0
0 W 0 W 0 W 0 W
W 0 W 0 W 0 W 0
"""
grd = Grid(8,8, ["B","W"])
for c in range(4):
grd.set_tile(0,(c*2) - 1, "B")
grd.set_tile(1,(c*2) - 0, "B")
grd.set_tile(6,(c*2) + 1, "W")
grd.set_tile(7,(c*2) - 0, "W")
print(grd)
return grd | [
"def",
"build_board_checkers",
"(",
")",
":",
"grd",
"=",
"Grid",
"(",
"8",
",",
"8",
",",
"[",
"\"B\"",
",",
"\"W\"",
"]",
")",
"for",
"c",
"in",
"range",
"(",
"4",
")",
":",
"grd",
".",
"set_tile",
"(",
"0",
",",
"(",
"c",
"*",
"2",
")",
"-",
"1",
",",
"\"B\"",
")",
"grd",
".",
"set_tile",
"(",
"1",
",",
"(",
"c",
"*",
"2",
")",
"-",
"0",
",",
"\"B\"",
")",
"grd",
".",
"set_tile",
"(",
"6",
",",
"(",
"c",
"*",
"2",
")",
"+",
"1",
",",
"\"W\"",
")",
"grd",
".",
"set_tile",
"(",
"7",
",",
"(",
"c",
"*",
"2",
")",
"-",
"0",
",",
"\"W\"",
")",
"print",
"(",
"grd",
")",
"return",
"grd"
] | builds a checkers starting board
Printing Grid
0 B 0 B 0 B 0 B
B 0 B 0 B 0 B 0
0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0
0 W 0 W 0 W 0 W
W 0 W 0 W 0 W 0 | [
"builds",
"a",
"checkers",
"starting",
"board",
"Printing",
"Grid",
"0",
"B",
"0",
"B",
"0",
"B",
"0",
"B",
"B",
"0",
"B",
"0",
"B",
"0",
"B",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"W",
"0",
"W",
"0",
"W",
"0",
"W",
"W",
"0",
"W",
"0",
"W",
"0",
"W",
"0"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/game_board_utils.py#L25-L45 | train |
acutesoftware/AIKIF | aikif/toolbox/game_board_utils.py | TEST | def TEST():
""" tests for this module """
grd = Grid(4,4, [2,4])
grd.new_tile()
grd.new_tile()
print(grd)
print("There are ", grd.count_blank_positions(), " blanks in grid 1\n")
grd2 = Grid(5,5, ['A','B'])
grd2.new_tile(26)
print(grd2)
build_board_checkers()
print("There are ", grd2.count_blank_positions(), " blanks in grid 2") | python | def TEST():
""" tests for this module """
grd = Grid(4,4, [2,4])
grd.new_tile()
grd.new_tile()
print(grd)
print("There are ", grd.count_blank_positions(), " blanks in grid 1\n")
grd2 = Grid(5,5, ['A','B'])
grd2.new_tile(26)
print(grd2)
build_board_checkers()
print("There are ", grd2.count_blank_positions(), " blanks in grid 2") | [
"def",
"TEST",
"(",
")",
":",
"grd",
"=",
"Grid",
"(",
"4",
",",
"4",
",",
"[",
"2",
",",
"4",
"]",
")",
"grd",
".",
"new_tile",
"(",
")",
"grd",
".",
"new_tile",
"(",
")",
"print",
"(",
"grd",
")",
"print",
"(",
"\"There are \"",
",",
"grd",
".",
"count_blank_positions",
"(",
")",
",",
"\" blanks in grid 1\\n\"",
")",
"grd2",
"=",
"Grid",
"(",
"5",
",",
"5",
",",
"[",
"'A'",
",",
"'B'",
"]",
")",
"grd2",
".",
"new_tile",
"(",
"26",
")",
"print",
"(",
"grd2",
")",
"build_board_checkers",
"(",
")",
"print",
"(",
"\"There are \"",
",",
"grd2",
".",
"count_blank_positions",
"(",
")",
",",
"\" blanks in grid 2\"",
")"
] | tests for this module | [
"tests",
"for",
"this",
"module"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/game_board_utils.py#L47-L60 | train |
masci/django-appengine-toolkit | appengine_toolkit/storage.py | GoogleCloudStorage.url | def url(self, name):
"""
Ask blobstore api for an url to directly serve the file
"""
key = blobstore.create_gs_key('/gs' + name)
return images.get_serving_url(key) | python | def url(self, name):
"""
Ask blobstore api for an url to directly serve the file
"""
key = blobstore.create_gs_key('/gs' + name)
return images.get_serving_url(key) | [
"def",
"url",
"(",
"self",
",",
"name",
")",
":",
"key",
"=",
"blobstore",
".",
"create_gs_key",
"(",
"'/gs'",
"+",
"name",
")",
"return",
"images",
".",
"get_serving_url",
"(",
"key",
")"
] | Ask blobstore api for an url to directly serve the file | [
"Ask",
"blobstore",
"api",
"for",
"an",
"url",
"to",
"directly",
"serve",
"the",
"file"
] | 9ffe8b05a263889787fb34a3e28ebc66b1f0a1d2 | https://github.com/masci/django-appengine-toolkit/blob/9ffe8b05a263889787fb34a3e28ebc66b1f0a1d2/appengine_toolkit/storage.py#L74-L79 | train |
Nachtfeuer/pipeline | spline/components/stage.py | Stage.process | def process(self, stage):
"""Processing one stage."""
self.logger.info("Processing pipeline stage '%s'", self.title)
output = []
for entry in stage:
key = list(entry.keys())[0]
if key == "env":
self.pipeline.data.env_list[1].update(entry[key])
self.logger.debug("Updating environment at level 1 with %s",
self.pipeline.data.env_list[1])
continue
# if not "env" then it must be "tasks" (schema):
tasks = Tasks(self.pipeline, re.match(r"tasks\(parallel\)", key) is not None)
result = tasks.process(entry[key])
for line in result['output']:
output.append(line)
if not result['success']:
self.event.failed()
return {'success': False, 'output': output}
self.event.succeeded()
return {'success': True, 'output': output} | python | def process(self, stage):
"""Processing one stage."""
self.logger.info("Processing pipeline stage '%s'", self.title)
output = []
for entry in stage:
key = list(entry.keys())[0]
if key == "env":
self.pipeline.data.env_list[1].update(entry[key])
self.logger.debug("Updating environment at level 1 with %s",
self.pipeline.data.env_list[1])
continue
# if not "env" then it must be "tasks" (schema):
tasks = Tasks(self.pipeline, re.match(r"tasks\(parallel\)", key) is not None)
result = tasks.process(entry[key])
for line in result['output']:
output.append(line)
if not result['success']:
self.event.failed()
return {'success': False, 'output': output}
self.event.succeeded()
return {'success': True, 'output': output} | [
"def",
"process",
"(",
"self",
",",
"stage",
")",
":",
"self",
".",
"logger",
".",
"info",
"(",
"\"Processing pipeline stage '%s'\"",
",",
"self",
".",
"title",
")",
"output",
"=",
"[",
"]",
"for",
"entry",
"in",
"stage",
":",
"key",
"=",
"list",
"(",
"entry",
".",
"keys",
"(",
")",
")",
"[",
"0",
"]",
"if",
"key",
"==",
"\"env\"",
":",
"self",
".",
"pipeline",
".",
"data",
".",
"env_list",
"[",
"1",
"]",
".",
"update",
"(",
"entry",
"[",
"key",
"]",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Updating environment at level 1 with %s\"",
",",
"self",
".",
"pipeline",
".",
"data",
".",
"env_list",
"[",
"1",
"]",
")",
"continue",
"tasks",
"=",
"Tasks",
"(",
"self",
".",
"pipeline",
",",
"re",
".",
"match",
"(",
"r\"tasks\\(parallel\\)\"",
",",
"key",
")",
"is",
"not",
"None",
")",
"result",
"=",
"tasks",
".",
"process",
"(",
"entry",
"[",
"key",
"]",
")",
"for",
"line",
"in",
"result",
"[",
"'output'",
"]",
":",
"output",
".",
"append",
"(",
"line",
")",
"if",
"not",
"result",
"[",
"'success'",
"]",
":",
"self",
".",
"event",
".",
"failed",
"(",
")",
"return",
"{",
"'success'",
":",
"False",
",",
"'output'",
":",
"output",
"}",
"self",
".",
"event",
".",
"succeeded",
"(",
")",
"return",
"{",
"'success'",
":",
"True",
",",
"'output'",
":",
"output",
"}"
] | Processing one stage. | [
"Processing",
"one",
"stage",
"."
] | 04ca18c4e95e4349532bb45b768206393e1f2c13 | https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/components/stage.py#L47-L69 | train |
budacom/trading-bots | trading_bots/contrib/clients.py | MarketClient.trading_fees | def trading_fees(self) -> TradingFees:
"""Fetch trading fees."""
return self._fetch('trading fees', self.market.code)(self._trading_fees)() | python | def trading_fees(self) -> TradingFees:
"""Fetch trading fees."""
return self._fetch('trading fees', self.market.code)(self._trading_fees)() | [
"def",
"trading_fees",
"(",
"self",
")",
"->",
"TradingFees",
":",
"return",
"self",
".",
"_fetch",
"(",
"'trading fees'",
",",
"self",
".",
"market",
".",
"code",
")",
"(",
"self",
".",
"_trading_fees",
")",
"(",
")"
] | Fetch trading fees. | [
"Fetch",
"trading",
"fees",
"."
] | 8cb68bb8d0b5f822108db1cc5dae336e3d3c3452 | https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/contrib/clients.py#L202-L204 | train |
budacom/trading-bots | trading_bots/contrib/clients.py | MarketClient.fetch_ticker | def fetch_ticker(self) -> Ticker:
"""Fetch the market ticker."""
return self._fetch('ticker', self.market.code)(self._ticker)() | python | def fetch_ticker(self) -> Ticker:
"""Fetch the market ticker."""
return self._fetch('ticker', self.market.code)(self._ticker)() | [
"def",
"fetch_ticker",
"(",
"self",
")",
"->",
"Ticker",
":",
"return",
"self",
".",
"_fetch",
"(",
"'ticker'",
",",
"self",
".",
"market",
".",
"code",
")",
"(",
"self",
".",
"_ticker",
")",
"(",
")"
] | Fetch the market ticker. | [
"Fetch",
"the",
"market",
"ticker",
"."
] | 8cb68bb8d0b5f822108db1cc5dae336e3d3c3452 | https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/contrib/clients.py#L210-L212 | train |
budacom/trading-bots | trading_bots/contrib/clients.py | MarketClient.fetch_order_book | def fetch_order_book(self) -> OrderBook:
"""Fetch the order book."""
return self._fetch('order book', self.market.code)(self._order_book)() | python | def fetch_order_book(self) -> OrderBook:
"""Fetch the order book."""
return self._fetch('order book', self.market.code)(self._order_book)() | [
"def",
"fetch_order_book",
"(",
"self",
")",
"->",
"OrderBook",
":",
"return",
"self",
".",
"_fetch",
"(",
"'order book'",
",",
"self",
".",
"market",
".",
"code",
")",
"(",
"self",
".",
"_order_book",
")",
"(",
")"
] | Fetch the order book. | [
"Fetch",
"the",
"order",
"book",
"."
] | 8cb68bb8d0b5f822108db1cc5dae336e3d3c3452 | https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/contrib/clients.py#L232-L234 | train |
budacom/trading-bots | trading_bots/contrib/clients.py | MarketClient.fetch_trades_since | def fetch_trades_since(self, since: int) -> List[Trade]:
"""Fetch trades since given timestamp."""
return self._fetch_since('trades', self.market.code)(self._trades_since)(since) | python | def fetch_trades_since(self, since: int) -> List[Trade]:
"""Fetch trades since given timestamp."""
return self._fetch_since('trades', self.market.code)(self._trades_since)(since) | [
"def",
"fetch_trades_since",
"(",
"self",
",",
"since",
":",
"int",
")",
"->",
"List",
"[",
"Trade",
"]",
":",
"return",
"self",
".",
"_fetch_since",
"(",
"'trades'",
",",
"self",
".",
"market",
".",
"code",
")",
"(",
"self",
".",
"_trades_since",
")",
"(",
"since",
")"
] | Fetch trades since given timestamp. | [
"Fetch",
"trades",
"since",
"given",
"timestamp",
"."
] | 8cb68bb8d0b5f822108db1cc5dae336e3d3c3452 | https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/contrib/clients.py#L241-L243 | train |
budacom/trading-bots | trading_bots/contrib/clients.py | WalletClient.fetch_deposits | def fetch_deposits(self, limit: int) -> List[Deposit]:
"""Fetch latest deposits, must provide a limit."""
return self._transactions(self._deposits, 'deposits', limit) | python | def fetch_deposits(self, limit: int) -> List[Deposit]:
"""Fetch latest deposits, must provide a limit."""
return self._transactions(self._deposits, 'deposits', limit) | [
"def",
"fetch_deposits",
"(",
"self",
",",
"limit",
":",
"int",
")",
"->",
"List",
"[",
"Deposit",
"]",
":",
"return",
"self",
".",
"_transactions",
"(",
"self",
".",
"_deposits",
",",
"'deposits'",
",",
"limit",
")"
] | Fetch latest deposits, must provide a limit. | [
"Fetch",
"latest",
"deposits",
"must",
"provide",
"a",
"limit",
"."
] | 8cb68bb8d0b5f822108db1cc5dae336e3d3c3452 | https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/contrib/clients.py#L292-L294 | train |
budacom/trading-bots | trading_bots/contrib/clients.py | WalletClient.fetch_deposits_since | def fetch_deposits_since(self, since: int) -> List[Deposit]:
"""Fetch all deposits since the given timestamp."""
return self._transactions_since(self._deposits_since, 'deposits', since) | python | def fetch_deposits_since(self, since: int) -> List[Deposit]:
"""Fetch all deposits since the given timestamp."""
return self._transactions_since(self._deposits_since, 'deposits', since) | [
"def",
"fetch_deposits_since",
"(",
"self",
",",
"since",
":",
"int",
")",
"->",
"List",
"[",
"Deposit",
"]",
":",
"return",
"self",
".",
"_transactions_since",
"(",
"self",
".",
"_deposits_since",
",",
"'deposits'",
",",
"since",
")"
] | Fetch all deposits since the given timestamp. | [
"Fetch",
"all",
"deposits",
"since",
"the",
"given",
"timestamp",
"."
] | 8cb68bb8d0b5f822108db1cc5dae336e3d3c3452 | https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/contrib/clients.py#L304-L306 | train |
budacom/trading-bots | trading_bots/contrib/clients.py | WalletClient.fetch_withdrawals | def fetch_withdrawals(self, limit: int) -> List[Withdrawal]:
"""Fetch latest withdrawals, must provide a limit."""
return self._transactions(self._withdrawals, 'withdrawals', limit) | python | def fetch_withdrawals(self, limit: int) -> List[Withdrawal]:
"""Fetch latest withdrawals, must provide a limit."""
return self._transactions(self._withdrawals, 'withdrawals', limit) | [
"def",
"fetch_withdrawals",
"(",
"self",
",",
"limit",
":",
"int",
")",
"->",
"List",
"[",
"Withdrawal",
"]",
":",
"return",
"self",
".",
"_transactions",
"(",
"self",
".",
"_withdrawals",
",",
"'withdrawals'",
",",
"limit",
")"
] | Fetch latest withdrawals, must provide a limit. | [
"Fetch",
"latest",
"withdrawals",
"must",
"provide",
"a",
"limit",
"."
] | 8cb68bb8d0b5f822108db1cc5dae336e3d3c3452 | https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/contrib/clients.py#L313-L315 | train |
budacom/trading-bots | trading_bots/contrib/clients.py | WalletClient.fetch_withdrawals_since | def fetch_withdrawals_since(self, since: int) -> List[Withdrawal]:
"""Fetch all withdrawals since the given timestamp."""
return self._transactions_since(self._withdrawals_since, 'withdrawals', since) | python | def fetch_withdrawals_since(self, since: int) -> List[Withdrawal]:
"""Fetch all withdrawals since the given timestamp."""
return self._transactions_since(self._withdrawals_since, 'withdrawals', since) | [
"def",
"fetch_withdrawals_since",
"(",
"self",
",",
"since",
":",
"int",
")",
"->",
"List",
"[",
"Withdrawal",
"]",
":",
"return",
"self",
".",
"_transactions_since",
"(",
"self",
".",
"_withdrawals_since",
",",
"'withdrawals'",
",",
"since",
")"
] | Fetch all withdrawals since the given timestamp. | [
"Fetch",
"all",
"withdrawals",
"since",
"the",
"given",
"timestamp",
"."
] | 8cb68bb8d0b5f822108db1cc5dae336e3d3c3452 | https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/contrib/clients.py#L325-L327 | train |
budacom/trading-bots | trading_bots/contrib/clients.py | WalletClient.request_withdrawal | def request_withdrawal(self, amount: Number, address: str, subtract_fee: bool=False, **params) -> Withdrawal:
"""Request a withdrawal."""
self.log.debug(f'Requesting {self.currency} withdrawal from {self.name} to {address}')
amount = self._parse_money(amount)
if self.dry_run:
withdrawal = Withdrawal.create_default(TxType.WITHDRAWAL, self.currency, amount, address)
self.log.warning(f'DRY RUN: Withdrawal requested on {self.name}: {withdrawal}')
return withdrawal
try:
withdrawal = self._withdraw(amount, address, subtract_fee, **params)
except Exception as e:
msg = f'Failed requesting withdrawal on {self.name}!: amount={amount}, address={address}'
raise self.exception(InvalidWithdrawal, msg, e) from e
self.log.info(f'Withdrawal requested on {self.name}: {withdrawal}')
return withdrawal | python | def request_withdrawal(self, amount: Number, address: str, subtract_fee: bool=False, **params) -> Withdrawal:
"""Request a withdrawal."""
self.log.debug(f'Requesting {self.currency} withdrawal from {self.name} to {address}')
amount = self._parse_money(amount)
if self.dry_run:
withdrawal = Withdrawal.create_default(TxType.WITHDRAWAL, self.currency, amount, address)
self.log.warning(f'DRY RUN: Withdrawal requested on {self.name}: {withdrawal}')
return withdrawal
try:
withdrawal = self._withdraw(amount, address, subtract_fee, **params)
except Exception as e:
msg = f'Failed requesting withdrawal on {self.name}!: amount={amount}, address={address}'
raise self.exception(InvalidWithdrawal, msg, e) from e
self.log.info(f'Withdrawal requested on {self.name}: {withdrawal}')
return withdrawal | [
"def",
"request_withdrawal",
"(",
"self",
",",
"amount",
":",
"Number",
",",
"address",
":",
"str",
",",
"subtract_fee",
":",
"bool",
"=",
"False",
",",
"**",
"params",
")",
"->",
"Withdrawal",
":",
"self",
".",
"log",
".",
"debug",
"(",
"f'Requesting {self.currency} withdrawal from {self.name} to {address}'",
")",
"amount",
"=",
"self",
".",
"_parse_money",
"(",
"amount",
")",
"if",
"self",
".",
"dry_run",
":",
"withdrawal",
"=",
"Withdrawal",
".",
"create_default",
"(",
"TxType",
".",
"WITHDRAWAL",
",",
"self",
".",
"currency",
",",
"amount",
",",
"address",
")",
"self",
".",
"log",
".",
"warning",
"(",
"f'DRY RUN: Withdrawal requested on {self.name}: {withdrawal}'",
")",
"return",
"withdrawal",
"try",
":",
"withdrawal",
"=",
"self",
".",
"_withdraw",
"(",
"amount",
",",
"address",
",",
"subtract_fee",
",",
"**",
"params",
")",
"except",
"Exception",
"as",
"e",
":",
"msg",
"=",
"f'Failed requesting withdrawal on {self.name}!: amount={amount}, address={address}'",
"raise",
"self",
".",
"exception",
"(",
"InvalidWithdrawal",
",",
"msg",
",",
"e",
")",
"from",
"e",
"self",
".",
"log",
".",
"info",
"(",
"f'Withdrawal requested on {self.name}: {withdrawal}'",
")",
"return",
"withdrawal"
] | Request a withdrawal. | [
"Request",
"a",
"withdrawal",
"."
] | 8cb68bb8d0b5f822108db1cc5dae336e3d3c3452 | https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/contrib/clients.py#L346-L363 | train |
budacom/trading-bots | trading_bots/contrib/clients.py | TradingClient.fetch_order | def fetch_order(self, order_id: str) -> Order:
"""Fetch an order by ID."""
return self._fetch(f'order id={order_id}', exc=OrderNotFound)(self._order)(order_id) | python | def fetch_order(self, order_id: str) -> Order:
"""Fetch an order by ID."""
return self._fetch(f'order id={order_id}', exc=OrderNotFound)(self._order)(order_id) | [
"def",
"fetch_order",
"(",
"self",
",",
"order_id",
":",
"str",
")",
"->",
"Order",
":",
"return",
"self",
".",
"_fetch",
"(",
"f'order id={order_id}'",
",",
"exc",
"=",
"OrderNotFound",
")",
"(",
"self",
".",
"_order",
")",
"(",
"order_id",
")"
] | Fetch an order by ID. | [
"Fetch",
"an",
"order",
"by",
"ID",
"."
] | 8cb68bb8d0b5f822108db1cc5dae336e3d3c3452 | https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/contrib/clients.py#L410-L412 | train |
budacom/trading-bots | trading_bots/contrib/clients.py | TradingClient.fetch_open_orders | def fetch_open_orders(self, limit: int) -> List[Order]:
"""Fetch latest open orders, must provide a limit."""
return self._fetch_orders_limit(self._open_orders, limit) | python | def fetch_open_orders(self, limit: int) -> List[Order]:
"""Fetch latest open orders, must provide a limit."""
return self._fetch_orders_limit(self._open_orders, limit) | [
"def",
"fetch_open_orders",
"(",
"self",
",",
"limit",
":",
"int",
")",
"->",
"List",
"[",
"Order",
"]",
":",
"return",
"self",
".",
"_fetch_orders_limit",
"(",
"self",
".",
"_open_orders",
",",
"limit",
")"
] | Fetch latest open orders, must provide a limit. | [
"Fetch",
"latest",
"open",
"orders",
"must",
"provide",
"a",
"limit",
"."
] | 8cb68bb8d0b5f822108db1cc5dae336e3d3c3452 | https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/contrib/clients.py#L424-L426 | train |
budacom/trading-bots | trading_bots/contrib/clients.py | TradingClient.fetch_closed_orders | def fetch_closed_orders(self, limit: int) -> List[Order]:
"""Fetch latest closed orders, must provide a limit."""
return self._fetch_orders_limit(self._closed_orders, limit) | python | def fetch_closed_orders(self, limit: int) -> List[Order]:
"""Fetch latest closed orders, must provide a limit."""
return self._fetch_orders_limit(self._closed_orders, limit) | [
"def",
"fetch_closed_orders",
"(",
"self",
",",
"limit",
":",
"int",
")",
"->",
"List",
"[",
"Order",
"]",
":",
"return",
"self",
".",
"_fetch_orders_limit",
"(",
"self",
".",
"_closed_orders",
",",
"limit",
")"
] | Fetch latest closed orders, must provide a limit. | [
"Fetch",
"latest",
"closed",
"orders",
"must",
"provide",
"a",
"limit",
"."
] | 8cb68bb8d0b5f822108db1cc5dae336e3d3c3452 | https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/contrib/clients.py#L436-L438 | train |
budacom/trading-bots | trading_bots/contrib/clients.py | TradingClient.fetch_closed_orders_since | def fetch_closed_orders_since(self, since: int) -> List[Order]:
"""Fetch closed orders since the given timestamp."""
return self._fetch_orders_since(self._closed_orders_since, since) | python | def fetch_closed_orders_since(self, since: int) -> List[Order]:
"""Fetch closed orders since the given timestamp."""
return self._fetch_orders_since(self._closed_orders_since, since) | [
"def",
"fetch_closed_orders_since",
"(",
"self",
",",
"since",
":",
"int",
")",
"->",
"List",
"[",
"Order",
"]",
":",
"return",
"self",
".",
"_fetch_orders_since",
"(",
"self",
".",
"_closed_orders_since",
",",
"since",
")"
] | Fetch closed orders since the given timestamp. | [
"Fetch",
"closed",
"orders",
"since",
"the",
"given",
"timestamp",
"."
] | 8cb68bb8d0b5f822108db1cc5dae336e3d3c3452 | https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/contrib/clients.py#L448-L450 | train |
budacom/trading-bots | trading_bots/contrib/clients.py | TradingClient.cancel_order | def cancel_order(self, order_id: str) -> str:
"""Cancel an order by ID."""
self.log.debug(f'Canceling order id={order_id} on {self.name}')
if self.dry_run: # Don't cancel if dry run
self.log.warning(f'DRY RUN: Order cancelled on {self.name}: id={order_id}')
return order_id
try: # Cancel order
self._cancel_order(order_id)
except Exception as e:
raise self.exception(OrderNotFound, f'Failed to cancel order: id={order_id}', e) from e
self.log.info(f'Order cancelled on {self.name}: id={order_id}')
return order_id | python | def cancel_order(self, order_id: str) -> str:
"""Cancel an order by ID."""
self.log.debug(f'Canceling order id={order_id} on {self.name}')
if self.dry_run: # Don't cancel if dry run
self.log.warning(f'DRY RUN: Order cancelled on {self.name}: id={order_id}')
return order_id
try: # Cancel order
self._cancel_order(order_id)
except Exception as e:
raise self.exception(OrderNotFound, f'Failed to cancel order: id={order_id}', e) from e
self.log.info(f'Order cancelled on {self.name}: id={order_id}')
return order_id | [
"def",
"cancel_order",
"(",
"self",
",",
"order_id",
":",
"str",
")",
"->",
"str",
":",
"self",
".",
"log",
".",
"debug",
"(",
"f'Canceling order id={order_id} on {self.name}'",
")",
"if",
"self",
".",
"dry_run",
":",
"self",
".",
"log",
".",
"warning",
"(",
"f'DRY RUN: Order cancelled on {self.name}: id={order_id}'",
")",
"return",
"order_id",
"try",
":",
"self",
".",
"_cancel_order",
"(",
"order_id",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"self",
".",
"exception",
"(",
"OrderNotFound",
",",
"f'Failed to cancel order: id={order_id}'",
",",
"e",
")",
"from",
"e",
"self",
".",
"log",
".",
"info",
"(",
"f'Order cancelled on {self.name}: id={order_id}'",
")",
"return",
"order_id"
] | Cancel an order by ID. | [
"Cancel",
"an",
"order",
"by",
"ID",
"."
] | 8cb68bb8d0b5f822108db1cc5dae336e3d3c3452 | https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/contrib/clients.py#L456-L470 | train |
budacom/trading-bots | trading_bots/contrib/clients.py | TradingClient.cancel_orders | def cancel_orders(self, order_ids: List[str]) -> List[str]:
"""Cancel multiple orders by a list of IDs."""
orders_to_cancel = order_ids
self.log.debug(f'Canceling orders on {self.name}: ids={orders_to_cancel}')
cancelled_orders = []
if self.dry_run: # Don't cancel if dry run
self.log.warning(f'DRY RUN: Orders cancelled on {self.name}: {orders_to_cancel}')
return orders_to_cancel
try: # Iterate and cancel orders
if self.has_batch_cancel:
self._cancel_orders(orders_to_cancel)
cancelled_orders.append(orders_to_cancel)
orders_to_cancel.clear()
else:
for i, order_id in enumerate(orders_to_cancel):
self._cancel_order(order_id)
cancelled_orders.append(order_id)
orders_to_cancel.pop(i)
except Exception as e:
msg = f'Failed to cancel {len(orders_to_cancel)} orders on {self.name}: ids={orders_to_cancel}'
raise self.exception(OrderNotFound, msg, e) from e
self.log.info(f'Orders cancelled on {self.name}: ids={cancelled_orders}')
return cancelled_orders | python | def cancel_orders(self, order_ids: List[str]) -> List[str]:
"""Cancel multiple orders by a list of IDs."""
orders_to_cancel = order_ids
self.log.debug(f'Canceling orders on {self.name}: ids={orders_to_cancel}')
cancelled_orders = []
if self.dry_run: # Don't cancel if dry run
self.log.warning(f'DRY RUN: Orders cancelled on {self.name}: {orders_to_cancel}')
return orders_to_cancel
try: # Iterate and cancel orders
if self.has_batch_cancel:
self._cancel_orders(orders_to_cancel)
cancelled_orders.append(orders_to_cancel)
orders_to_cancel.clear()
else:
for i, order_id in enumerate(orders_to_cancel):
self._cancel_order(order_id)
cancelled_orders.append(order_id)
orders_to_cancel.pop(i)
except Exception as e:
msg = f'Failed to cancel {len(orders_to_cancel)} orders on {self.name}: ids={orders_to_cancel}'
raise self.exception(OrderNotFound, msg, e) from e
self.log.info(f'Orders cancelled on {self.name}: ids={cancelled_orders}')
return cancelled_orders | [
"def",
"cancel_orders",
"(",
"self",
",",
"order_ids",
":",
"List",
"[",
"str",
"]",
")",
"->",
"List",
"[",
"str",
"]",
":",
"orders_to_cancel",
"=",
"order_ids",
"self",
".",
"log",
".",
"debug",
"(",
"f'Canceling orders on {self.name}: ids={orders_to_cancel}'",
")",
"cancelled_orders",
"=",
"[",
"]",
"if",
"self",
".",
"dry_run",
":",
"self",
".",
"log",
".",
"warning",
"(",
"f'DRY RUN: Orders cancelled on {self.name}: {orders_to_cancel}'",
")",
"return",
"orders_to_cancel",
"try",
":",
"if",
"self",
".",
"has_batch_cancel",
":",
"self",
".",
"_cancel_orders",
"(",
"orders_to_cancel",
")",
"cancelled_orders",
".",
"append",
"(",
"orders_to_cancel",
")",
"orders_to_cancel",
".",
"clear",
"(",
")",
"else",
":",
"for",
"i",
",",
"order_id",
"in",
"enumerate",
"(",
"orders_to_cancel",
")",
":",
"self",
".",
"_cancel_order",
"(",
"order_id",
")",
"cancelled_orders",
".",
"append",
"(",
"order_id",
")",
"orders_to_cancel",
".",
"pop",
"(",
"i",
")",
"except",
"Exception",
"as",
"e",
":",
"msg",
"=",
"f'Failed to cancel {len(orders_to_cancel)} orders on {self.name}: ids={orders_to_cancel}'",
"raise",
"self",
".",
"exception",
"(",
"OrderNotFound",
",",
"msg",
",",
"e",
")",
"from",
"e",
"self",
".",
"log",
".",
"info",
"(",
"f'Orders cancelled on {self.name}: ids={cancelled_orders}'",
")",
"return",
"cancelled_orders"
] | Cancel multiple orders by a list of IDs. | [
"Cancel",
"multiple",
"orders",
"by",
"a",
"list",
"of",
"IDs",
"."
] | 8cb68bb8d0b5f822108db1cc5dae336e3d3c3452 | https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/contrib/clients.py#L476-L501 | train |
budacom/trading-bots | trading_bots/contrib/clients.py | TradingClient.cancel_all_orders | def cancel_all_orders(self) -> List[str]:
"""Cancel all open orders."""
order_ids = [o.id for o in self.fetch_all_open_orders()]
return self.cancel_orders(order_ids) | python | def cancel_all_orders(self) -> List[str]:
"""Cancel all open orders."""
order_ids = [o.id for o in self.fetch_all_open_orders()]
return self.cancel_orders(order_ids) | [
"def",
"cancel_all_orders",
"(",
"self",
")",
"->",
"List",
"[",
"str",
"]",
":",
"order_ids",
"=",
"[",
"o",
".",
"id",
"for",
"o",
"in",
"self",
".",
"fetch_all_open_orders",
"(",
")",
"]",
"return",
"self",
".",
"cancel_orders",
"(",
"order_ids",
")"
] | Cancel all open orders. | [
"Cancel",
"all",
"open",
"orders",
"."
] | 8cb68bb8d0b5f822108db1cc5dae336e3d3c3452 | https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/contrib/clients.py#L503-L506 | train |
budacom/trading-bots | trading_bots/contrib/clients.py | TradingClient.min_order_amount | def min_order_amount(self) -> Money:
"""Minimum amount to place an order."""
return self._fetch('minimum order amount', self.market.code)(self._min_order_amount)() | python | def min_order_amount(self) -> Money:
"""Minimum amount to place an order."""
return self._fetch('minimum order amount', self.market.code)(self._min_order_amount)() | [
"def",
"min_order_amount",
"(",
"self",
")",
"->",
"Money",
":",
"return",
"self",
".",
"_fetch",
"(",
"'minimum order amount'",
",",
"self",
".",
"market",
".",
"code",
")",
"(",
"self",
".",
"_min_order_amount",
")",
"(",
")"
] | Minimum amount to place an order. | [
"Minimum",
"amount",
"to",
"place",
"an",
"order",
"."
] | 8cb68bb8d0b5f822108db1cc5dae336e3d3c3452 | https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/contrib/clients.py#L517-L519 | train |
budacom/trading-bots | trading_bots/contrib/clients.py | TradingClient.place_market_order | def place_market_order(self, side: Side, amount: Number) -> Order:
"""Place a market order."""
return self.place_order(side, OrderType.MARKET, amount) | python | def place_market_order(self, side: Side, amount: Number) -> Order:
"""Place a market order."""
return self.place_order(side, OrderType.MARKET, amount) | [
"def",
"place_market_order",
"(",
"self",
",",
"side",
":",
"Side",
",",
"amount",
":",
"Number",
")",
"->",
"Order",
":",
"return",
"self",
".",
"place_order",
"(",
"side",
",",
"OrderType",
".",
"MARKET",
",",
"amount",
")"
] | Place a market order. | [
"Place",
"a",
"market",
"order",
"."
] | 8cb68bb8d0b5f822108db1cc5dae336e3d3c3452 | https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/contrib/clients.py#L552-L554 | train |
BD2KGenomics/protect | attic/encrypt_files_in_dir_to_s3.py | main | def main():
"""
This is the main module for the script. The script will accept a file, or a directory, and then
encrypt it with a provided key before pushing it to S3 into a specified bucket.
"""
parser = argparse.ArgumentParser(description=main.__doc__, add_help=True)
parser.add_argument('-M', '--master_key', dest='master_key', help='Path to the master key ' +
'used for the encryption. Data is transferred without encryption if this' +
'is not provided.', type=str, required=False, default=None)
parser.add_argument('-B', '--bucket', dest='bucket', help='S3 bucket.', type=str, required=True)
parser.add_argument('-R', '--remote_dir', dest='remote_dir', help='Pseudo directory within ' +
'the bucket to store the file(s). NOTE: Folder structure below ' +
'REMOTE_DIR will be retained.', type=str, required=False, default='')
parser.add_argument('data', help='File(s) or folder(s) to transfer to S3.', type=str, nargs='+')
params = parser.parse_args()
# Input handling
if params.master_key and not os.path.exists(params.master_key):
raise InputParameterError('The master key was not found at ' +
params.master_key)
# If the user doesn't have ~/.boto , it doesn't even make sense to go ahead
if not os.path.exists(os.path.expanduser('~/.boto')):
raise RuntimeError('~/.boto not found')
# Ensure that the remote directory doesn't start with a /
if params.remote_dir.startswith('/'):
raise InputParameterError('The remote dir cannot start with a \'/\'')
# Process each of the input arguments.
for datum in params.data:
datum = os.path.abspath(datum)
if not os.path.exists(datum):
print('ERROR: %s could not be found.' % datum, file=sys.stderr)
continue
write_to_s3(datum, params.master_key, params.bucket, params.remote_dir)
return None | python | def main():
"""
This is the main module for the script. The script will accept a file, or a directory, and then
encrypt it with a provided key before pushing it to S3 into a specified bucket.
"""
parser = argparse.ArgumentParser(description=main.__doc__, add_help=True)
parser.add_argument('-M', '--master_key', dest='master_key', help='Path to the master key ' +
'used for the encryption. Data is transferred without encryption if this' +
'is not provided.', type=str, required=False, default=None)
parser.add_argument('-B', '--bucket', dest='bucket', help='S3 bucket.', type=str, required=True)
parser.add_argument('-R', '--remote_dir', dest='remote_dir', help='Pseudo directory within ' +
'the bucket to store the file(s). NOTE: Folder structure below ' +
'REMOTE_DIR will be retained.', type=str, required=False, default='')
parser.add_argument('data', help='File(s) or folder(s) to transfer to S3.', type=str, nargs='+')
params = parser.parse_args()
# Input handling
if params.master_key and not os.path.exists(params.master_key):
raise InputParameterError('The master key was not found at ' +
params.master_key)
# If the user doesn't have ~/.boto , it doesn't even make sense to go ahead
if not os.path.exists(os.path.expanduser('~/.boto')):
raise RuntimeError('~/.boto not found')
# Ensure that the remote directory doesn't start with a /
if params.remote_dir.startswith('/'):
raise InputParameterError('The remote dir cannot start with a \'/\'')
# Process each of the input arguments.
for datum in params.data:
datum = os.path.abspath(datum)
if not os.path.exists(datum):
print('ERROR: %s could not be found.' % datum, file=sys.stderr)
continue
write_to_s3(datum, params.master_key, params.bucket, params.remote_dir)
return None | [
"def",
"main",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"main",
".",
"__doc__",
",",
"add_help",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"'-M'",
",",
"'--master_key'",
",",
"dest",
"=",
"'master_key'",
",",
"help",
"=",
"'Path to the master key '",
"+",
"'used for the encryption. Data is transferred without encryption if this'",
"+",
"'is not provided.'",
",",
"type",
"=",
"str",
",",
"required",
"=",
"False",
",",
"default",
"=",
"None",
")",
"parser",
".",
"add_argument",
"(",
"'-B'",
",",
"'--bucket'",
",",
"dest",
"=",
"'bucket'",
",",
"help",
"=",
"'S3 bucket.'",
",",
"type",
"=",
"str",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"'-R'",
",",
"'--remote_dir'",
",",
"dest",
"=",
"'remote_dir'",
",",
"help",
"=",
"'Pseudo directory within '",
"+",
"'the bucket to store the file(s). NOTE: Folder structure below '",
"+",
"'REMOTE_DIR will be retained.'",
",",
"type",
"=",
"str",
",",
"required",
"=",
"False",
",",
"default",
"=",
"''",
")",
"parser",
".",
"add_argument",
"(",
"'data'",
",",
"help",
"=",
"'File(s) or folder(s) to transfer to S3.'",
",",
"type",
"=",
"str",
",",
"nargs",
"=",
"'+'",
")",
"params",
"=",
"parser",
".",
"parse_args",
"(",
")",
"if",
"params",
".",
"master_key",
"and",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"params",
".",
"master_key",
")",
":",
"raise",
"InputParameterError",
"(",
"'The master key was not found at '",
"+",
"params",
".",
"master_key",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"'~/.boto'",
")",
")",
":",
"raise",
"RuntimeError",
"(",
"'~/.boto not found'",
")",
"if",
"params",
".",
"remote_dir",
".",
"startswith",
"(",
"'/'",
")",
":",
"raise",
"InputParameterError",
"(",
"'The remote dir cannot start with a \\'/\\''",
")",
"for",
"datum",
"in",
"params",
".",
"data",
":",
"datum",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"datum",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"datum",
")",
":",
"print",
"(",
"'ERROR: %s could not be found.'",
"%",
"datum",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"continue",
"write_to_s3",
"(",
"datum",
",",
"params",
".",
"master_key",
",",
"params",
".",
"bucket",
",",
"params",
".",
"remote_dir",
")",
"return",
"None"
] | This is the main module for the script. The script will accept a file, or a directory, and then
encrypt it with a provided key before pushing it to S3 into a specified bucket. | [
"This",
"is",
"the",
"main",
"module",
"for",
"the",
"script",
".",
"The",
"script",
"will",
"accept",
"a",
"file",
"or",
"a",
"directory",
"and",
"then",
"encrypt",
"it",
"with",
"a",
"provided",
"key",
"before",
"pushing",
"it",
"to",
"S3",
"into",
"a",
"specified",
"bucket",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/attic/encrypt_files_in_dir_to_s3.py#L157-L190 | train |
BD2KGenomics/protect | attic/encrypt_files_in_dir_to_s3.py | BucketInfo._get_bucket_endpoint | def _get_bucket_endpoint(self):
"""
Queries S3 to identify the region hosting the provided bucket.
"""
conn = S3Connection()
bucket = conn.lookup(self.bucket_name)
if not bucket:
# TODO: Make the bucket here?
raise InputParameterError('The provided bucket %s doesn\'t exist' % self.bucket_name)
endpoint = str(bucket.get_location())
return endpoint | python | def _get_bucket_endpoint(self):
"""
Queries S3 to identify the region hosting the provided bucket.
"""
conn = S3Connection()
bucket = conn.lookup(self.bucket_name)
if not bucket:
# TODO: Make the bucket here?
raise InputParameterError('The provided bucket %s doesn\'t exist' % self.bucket_name)
endpoint = str(bucket.get_location())
return endpoint | [
"def",
"_get_bucket_endpoint",
"(",
"self",
")",
":",
"conn",
"=",
"S3Connection",
"(",
")",
"bucket",
"=",
"conn",
".",
"lookup",
"(",
"self",
".",
"bucket_name",
")",
"if",
"not",
"bucket",
":",
"raise",
"InputParameterError",
"(",
"'The provided bucket %s doesn\\'t exist'",
"%",
"self",
".",
"bucket_name",
")",
"endpoint",
"=",
"str",
"(",
"bucket",
".",
"get_location",
"(",
")",
")",
"return",
"endpoint"
] | Queries S3 to identify the region hosting the provided bucket. | [
"Queries",
"S3",
"to",
"identify",
"the",
"region",
"hosting",
"the",
"provided",
"bucket",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/attic/encrypt_files_in_dir_to_s3.py#L82-L92 | train |
BD2KGenomics/protect | src/protect/alignment/rna.py | align_rna | def align_rna(job, fastqs, univ_options, star_options):
"""
A wrapper for the entire rna alignment subgraph.
:param list fastqs: The input fastqs for alignment
:param dict univ_options: Dict of universal options used by almost all tools
:param dict star_options: Options specific to star
:return: Dict containing input bam and the generated index (.bam.bai)
:rtype: dict
"""
star = job.wrapJobFn(run_star, fastqs, univ_options, star_options,
cores=star_options['n'],
memory=PromisedRequirement(lambda x: int(1.85 * x.size),
star_options['index']),
disk=PromisedRequirement(star_disk, fastqs, star_options['index']))
s_and_i = job.wrapJobFn(sort_and_index_star, star.rv(), univ_options,
star_options).encapsulate()
job.addChild(star)
star.addChild(s_and_i)
return s_and_i.rv() | python | def align_rna(job, fastqs, univ_options, star_options):
"""
A wrapper for the entire rna alignment subgraph.
:param list fastqs: The input fastqs for alignment
:param dict univ_options: Dict of universal options used by almost all tools
:param dict star_options: Options specific to star
:return: Dict containing input bam and the generated index (.bam.bai)
:rtype: dict
"""
star = job.wrapJobFn(run_star, fastqs, univ_options, star_options,
cores=star_options['n'],
memory=PromisedRequirement(lambda x: int(1.85 * x.size),
star_options['index']),
disk=PromisedRequirement(star_disk, fastqs, star_options['index']))
s_and_i = job.wrapJobFn(sort_and_index_star, star.rv(), univ_options,
star_options).encapsulate()
job.addChild(star)
star.addChild(s_and_i)
return s_and_i.rv() | [
"def",
"align_rna",
"(",
"job",
",",
"fastqs",
",",
"univ_options",
",",
"star_options",
")",
":",
"star",
"=",
"job",
".",
"wrapJobFn",
"(",
"run_star",
",",
"fastqs",
",",
"univ_options",
",",
"star_options",
",",
"cores",
"=",
"star_options",
"[",
"'n'",
"]",
",",
"memory",
"=",
"PromisedRequirement",
"(",
"lambda",
"x",
":",
"int",
"(",
"1.85",
"*",
"x",
".",
"size",
")",
",",
"star_options",
"[",
"'index'",
"]",
")",
",",
"disk",
"=",
"PromisedRequirement",
"(",
"star_disk",
",",
"fastqs",
",",
"star_options",
"[",
"'index'",
"]",
")",
")",
"s_and_i",
"=",
"job",
".",
"wrapJobFn",
"(",
"sort_and_index_star",
",",
"star",
".",
"rv",
"(",
")",
",",
"univ_options",
",",
"star_options",
")",
".",
"encapsulate",
"(",
")",
"job",
".",
"addChild",
"(",
"star",
")",
"star",
".",
"addChild",
"(",
"s_and_i",
")",
"return",
"s_and_i",
".",
"rv",
"(",
")"
] | A wrapper for the entire rna alignment subgraph.
:param list fastqs: The input fastqs for alignment
:param dict univ_options: Dict of universal options used by almost all tools
:param dict star_options: Options specific to star
:return: Dict containing input bam and the generated index (.bam.bai)
:rtype: dict | [
"A",
"wrapper",
"for",
"the",
"entire",
"rna",
"alignment",
"subgraph",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/alignment/rna.py#L39-L58 | train |
BD2KGenomics/protect | src/protect/alignment/rna.py | run_star | def run_star(job, fastqs, univ_options, star_options):
"""
Align a pair of fastqs with STAR.
:param list fastqs: The input fastqs for alignment
:param dict univ_options: Dict of universal options used by almost all tools
:param dict star_options: Options specific to star
:return: Dict containing output genome bam, genome bai, and transcriptome bam
output_files:
|- 'rnaAligned.toTranscriptome.out.bam': fsID
+- 'rnaAligned.out.bam': fsID
+- 'rnaChimeric.out.junction': fsID
:rtype: dict
"""
assert star_options['type'] in ('star', 'starlong')
work_dir = os.getcwd()
input_files = {
'rna_cutadapt_1.fastq': fastqs[0],
'rna_cutadapt_2.fastq': fastqs[1],
'star_index.tar.gz': star_options['index']}
input_files = get_files_from_filestore(job, input_files, work_dir,
docker=False)
# Handle gzipped file
gz = '.gz' if is_gzipfile(input_files['rna_cutadapt_1.fastq']) else ''
if gz:
for read_file in 'rna_cutadapt_1.fastq', 'rna_cutadapt_2.fastq':
os.symlink(read_file, read_file + gz)
input_files[read_file + gz] = input_files[read_file] + gz
# Untar the index
input_files['star_index'] = untargz(input_files['star_index.tar.gz'], work_dir)
# Check to see if user is using a STAR-Fusion index
star_fusion_idx = os.path.join(input_files['star_index'], 'ref_genome.fa.star.idx')
if os.path.exists(star_fusion_idx):
input_files['star_index'] = star_fusion_idx
input_files = {key: docker_path(path, work_dir=work_dir) for key, path in input_files.items()}
# Using recommended STAR-Fusion parameters:
# https://github.com/STAR-Fusion/STAR-Fusion/wiki
parameters = ['--runThreadN', str(star_options['n']),
'--genomeDir', input_files['star_index'],
'--twopassMode', 'Basic',
'--outReadsUnmapped', 'None',
'--chimSegmentMin', '12',
'--chimJunctionOverhangMin', '12',
'--alignSJDBoverhangMin', '10',
'--alignMatesGapMax', '200000',
'--alignIntronMax', '200000',
'--chimSegmentReadGapMax', 'parameter', '3',
'--alignSJstitchMismatchNmax', '5', '-1', '5', '5',
'--outFileNamePrefix', 'rna',
'--readFilesIn',
input_files['rna_cutadapt_1.fastq' + gz],
input_files['rna_cutadapt_2.fastq' + gz],
'--outSAMattributes', 'NH', 'HI', 'AS', 'NM', 'MD',
'--outSAMtype', 'BAM', 'Unsorted',
'--quantMode', 'TranscriptomeSAM']
if gz:
parameters.extend(['--readFilesCommand', 'zcat'])
if star_options['type'] == 'star':
docker_call(tool='star', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], tool_version=star_options['version'])
else:
docker_call(tool='starlong', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], tool_version=star_options['version'])
output_files = defaultdict()
for output_file in ['rnaAligned.toTranscriptome.out.bam',
'rnaAligned.out.bam',
'rnaChimeric.out.junction']:
output_files[output_file] = job.fileStore.writeGlobalFile('/'.join([work_dir, output_file]))
export_results(job, output_files['rnaAligned.toTranscriptome.out.bam'], 'rna_transcriptome.bam',
univ_options, subfolder='alignments')
export_results(job, output_files['rnaChimeric.out.junction'], 'rna_chimeric.junction',
univ_options, subfolder='mutations/fusions')
job.fileStore.logToMaster('Ran STAR on %s successfully' % univ_options['patient'])
return output_files | python | def run_star(job, fastqs, univ_options, star_options):
"""
Align a pair of fastqs with STAR.
:param list fastqs: The input fastqs for alignment
:param dict univ_options: Dict of universal options used by almost all tools
:param dict star_options: Options specific to star
:return: Dict containing output genome bam, genome bai, and transcriptome bam
output_files:
|- 'rnaAligned.toTranscriptome.out.bam': fsID
+- 'rnaAligned.out.bam': fsID
+- 'rnaChimeric.out.junction': fsID
:rtype: dict
"""
assert star_options['type'] in ('star', 'starlong')
work_dir = os.getcwd()
input_files = {
'rna_cutadapt_1.fastq': fastqs[0],
'rna_cutadapt_2.fastq': fastqs[1],
'star_index.tar.gz': star_options['index']}
input_files = get_files_from_filestore(job, input_files, work_dir,
docker=False)
# Handle gzipped file
gz = '.gz' if is_gzipfile(input_files['rna_cutadapt_1.fastq']) else ''
if gz:
for read_file in 'rna_cutadapt_1.fastq', 'rna_cutadapt_2.fastq':
os.symlink(read_file, read_file + gz)
input_files[read_file + gz] = input_files[read_file] + gz
# Untar the index
input_files['star_index'] = untargz(input_files['star_index.tar.gz'], work_dir)
# Check to see if user is using a STAR-Fusion index
star_fusion_idx = os.path.join(input_files['star_index'], 'ref_genome.fa.star.idx')
if os.path.exists(star_fusion_idx):
input_files['star_index'] = star_fusion_idx
input_files = {key: docker_path(path, work_dir=work_dir) for key, path in input_files.items()}
# Using recommended STAR-Fusion parameters:
# https://github.com/STAR-Fusion/STAR-Fusion/wiki
parameters = ['--runThreadN', str(star_options['n']),
'--genomeDir', input_files['star_index'],
'--twopassMode', 'Basic',
'--outReadsUnmapped', 'None',
'--chimSegmentMin', '12',
'--chimJunctionOverhangMin', '12',
'--alignSJDBoverhangMin', '10',
'--alignMatesGapMax', '200000',
'--alignIntronMax', '200000',
'--chimSegmentReadGapMax', 'parameter', '3',
'--alignSJstitchMismatchNmax', '5', '-1', '5', '5',
'--outFileNamePrefix', 'rna',
'--readFilesIn',
input_files['rna_cutadapt_1.fastq' + gz],
input_files['rna_cutadapt_2.fastq' + gz],
'--outSAMattributes', 'NH', 'HI', 'AS', 'NM', 'MD',
'--outSAMtype', 'BAM', 'Unsorted',
'--quantMode', 'TranscriptomeSAM']
if gz:
parameters.extend(['--readFilesCommand', 'zcat'])
if star_options['type'] == 'star':
docker_call(tool='star', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], tool_version=star_options['version'])
else:
docker_call(tool='starlong', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], tool_version=star_options['version'])
output_files = defaultdict()
for output_file in ['rnaAligned.toTranscriptome.out.bam',
'rnaAligned.out.bam',
'rnaChimeric.out.junction']:
output_files[output_file] = job.fileStore.writeGlobalFile('/'.join([work_dir, output_file]))
export_results(job, output_files['rnaAligned.toTranscriptome.out.bam'], 'rna_transcriptome.bam',
univ_options, subfolder='alignments')
export_results(job, output_files['rnaChimeric.out.junction'], 'rna_chimeric.junction',
univ_options, subfolder='mutations/fusions')
job.fileStore.logToMaster('Ran STAR on %s successfully' % univ_options['patient'])
return output_files | [
"def",
"run_star",
"(",
"job",
",",
"fastqs",
",",
"univ_options",
",",
"star_options",
")",
":",
"assert",
"star_options",
"[",
"'type'",
"]",
"in",
"(",
"'star'",
",",
"'starlong'",
")",
"work_dir",
"=",
"os",
".",
"getcwd",
"(",
")",
"input_files",
"=",
"{",
"'rna_cutadapt_1.fastq'",
":",
"fastqs",
"[",
"0",
"]",
",",
"'rna_cutadapt_2.fastq'",
":",
"fastqs",
"[",
"1",
"]",
",",
"'star_index.tar.gz'",
":",
"star_options",
"[",
"'index'",
"]",
"}",
"input_files",
"=",
"get_files_from_filestore",
"(",
"job",
",",
"input_files",
",",
"work_dir",
",",
"docker",
"=",
"False",
")",
"gz",
"=",
"'.gz'",
"if",
"is_gzipfile",
"(",
"input_files",
"[",
"'rna_cutadapt_1.fastq'",
"]",
")",
"else",
"''",
"if",
"gz",
":",
"for",
"read_file",
"in",
"'rna_cutadapt_1.fastq'",
",",
"'rna_cutadapt_2.fastq'",
":",
"os",
".",
"symlink",
"(",
"read_file",
",",
"read_file",
"+",
"gz",
")",
"input_files",
"[",
"read_file",
"+",
"gz",
"]",
"=",
"input_files",
"[",
"read_file",
"]",
"+",
"gz",
"input_files",
"[",
"'star_index'",
"]",
"=",
"untargz",
"(",
"input_files",
"[",
"'star_index.tar.gz'",
"]",
",",
"work_dir",
")",
"star_fusion_idx",
"=",
"os",
".",
"path",
".",
"join",
"(",
"input_files",
"[",
"'star_index'",
"]",
",",
"'ref_genome.fa.star.idx'",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"star_fusion_idx",
")",
":",
"input_files",
"[",
"'star_index'",
"]",
"=",
"star_fusion_idx",
"input_files",
"=",
"{",
"key",
":",
"docker_path",
"(",
"path",
",",
"work_dir",
"=",
"work_dir",
")",
"for",
"key",
",",
"path",
"in",
"input_files",
".",
"items",
"(",
")",
"}",
"parameters",
"=",
"[",
"'--runThreadN'",
",",
"str",
"(",
"star_options",
"[",
"'n'",
"]",
")",
",",
"'--genomeDir'",
",",
"input_files",
"[",
"'star_index'",
"]",
",",
"'--twopassMode'",
",",
"'Basic'",
",",
"'--outReadsUnmapped'",
",",
"'None'",
",",
"'--chimSegmentMin'",
",",
"'12'",
",",
"'--chimJunctionOverhangMin'",
",",
"'12'",
",",
"'--alignSJDBoverhangMin'",
",",
"'10'",
",",
"'--alignMatesGapMax'",
",",
"'200000'",
",",
"'--alignIntronMax'",
",",
"'200000'",
",",
"'--chimSegmentReadGapMax'",
",",
"'parameter'",
",",
"'3'",
",",
"'--alignSJstitchMismatchNmax'",
",",
"'5'",
",",
"'-1'",
",",
"'5'",
",",
"'5'",
",",
"'--outFileNamePrefix'",
",",
"'rna'",
",",
"'--readFilesIn'",
",",
"input_files",
"[",
"'rna_cutadapt_1.fastq'",
"+",
"gz",
"]",
",",
"input_files",
"[",
"'rna_cutadapt_2.fastq'",
"+",
"gz",
"]",
",",
"'--outSAMattributes'",
",",
"'NH'",
",",
"'HI'",
",",
"'AS'",
",",
"'NM'",
",",
"'MD'",
",",
"'--outSAMtype'",
",",
"'BAM'",
",",
"'Unsorted'",
",",
"'--quantMode'",
",",
"'TranscriptomeSAM'",
"]",
"if",
"gz",
":",
"parameters",
".",
"extend",
"(",
"[",
"'--readFilesCommand'",
",",
"'zcat'",
"]",
")",
"if",
"star_options",
"[",
"'type'",
"]",
"==",
"'star'",
":",
"docker_call",
"(",
"tool",
"=",
"'star'",
",",
"tool_parameters",
"=",
"parameters",
",",
"work_dir",
"=",
"work_dir",
",",
"dockerhub",
"=",
"univ_options",
"[",
"'dockerhub'",
"]",
",",
"tool_version",
"=",
"star_options",
"[",
"'version'",
"]",
")",
"else",
":",
"docker_call",
"(",
"tool",
"=",
"'starlong'",
",",
"tool_parameters",
"=",
"parameters",
",",
"work_dir",
"=",
"work_dir",
",",
"dockerhub",
"=",
"univ_options",
"[",
"'dockerhub'",
"]",
",",
"tool_version",
"=",
"star_options",
"[",
"'version'",
"]",
")",
"output_files",
"=",
"defaultdict",
"(",
")",
"for",
"output_file",
"in",
"[",
"'rnaAligned.toTranscriptome.out.bam'",
",",
"'rnaAligned.out.bam'",
",",
"'rnaChimeric.out.junction'",
"]",
":",
"output_files",
"[",
"output_file",
"]",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"'/'",
".",
"join",
"(",
"[",
"work_dir",
",",
"output_file",
"]",
")",
")",
"export_results",
"(",
"job",
",",
"output_files",
"[",
"'rnaAligned.toTranscriptome.out.bam'",
"]",
",",
"'rna_transcriptome.bam'",
",",
"univ_options",
",",
"subfolder",
"=",
"'alignments'",
")",
"export_results",
"(",
"job",
",",
"output_files",
"[",
"'rnaChimeric.out.junction'",
"]",
",",
"'rna_chimeric.junction'",
",",
"univ_options",
",",
"subfolder",
"=",
"'mutations/fusions'",
")",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Ran STAR on %s successfully'",
"%",
"univ_options",
"[",
"'patient'",
"]",
")",
"return",
"output_files"
] | Align a pair of fastqs with STAR.
:param list fastqs: The input fastqs for alignment
:param dict univ_options: Dict of universal options used by almost all tools
:param dict star_options: Options specific to star
:return: Dict containing output genome bam, genome bai, and transcriptome bam
output_files:
|- 'rnaAligned.toTranscriptome.out.bam': fsID
+- 'rnaAligned.out.bam': fsID
+- 'rnaChimeric.out.junction': fsID
:rtype: dict | [
"Align",
"a",
"pair",
"of",
"fastqs",
"with",
"STAR",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/alignment/rna.py#L61-L138 | train |
BD2KGenomics/protect | src/protect/alignment/rna.py | sort_and_index_star | def sort_and_index_star(job, star_bams, univ_options, star_options):
"""
A wrapper for sorting and indexing the genomic star bam generated by run_star. It is required
since run_star returns a dict of 2 bams
:param dict star_bams: The bams from run_star
:param dict univ_options: Dict of universal options used by almost all tools
:param dict star_options: Options specific to star
:return: Dict containing input bam and the generated index (.bam.bai)
output_files:
|- 'rna_transcriptome.bam': fsID
+- 'rna_genome':
|- 'rna_sorted.bam': fsID
+- 'rna_sorted.bam.bai': fsID
+- 'rnaChimeric.out.junction': fsID
:rtype: dict
"""
star_options['samtools']['n'] = star_options['n']
sort = job.wrapJobFn(sort_bamfile, star_bams['rnaAligned.out.bam'], 'rna', univ_options,
samtools_options=star_options['samtools'],
disk=PromisedRequirement(sort_disk, star_bams['rnaAligned.out.bam']))
index = job.wrapJobFn(index_bamfile, sort.rv(), 'rna', univ_options,
samtools_options=star_options['samtools'], sample_info='genome_sorted',
disk=PromisedRequirement(index_disk, sort.rv()))
job.addChild(sort)
sort.addChild(index)
return {'rna_genome': index.rv(),
'rna_transcriptome.bam': star_bams['rnaAligned.toTranscriptome.out.bam'],
'rnaChimeric.out.junction': star_bams['rnaChimeric.out.junction']} | python | def sort_and_index_star(job, star_bams, univ_options, star_options):
"""
A wrapper for sorting and indexing the genomic star bam generated by run_star. It is required
since run_star returns a dict of 2 bams
:param dict star_bams: The bams from run_star
:param dict univ_options: Dict of universal options used by almost all tools
:param dict star_options: Options specific to star
:return: Dict containing input bam and the generated index (.bam.bai)
output_files:
|- 'rna_transcriptome.bam': fsID
+- 'rna_genome':
|- 'rna_sorted.bam': fsID
+- 'rna_sorted.bam.bai': fsID
+- 'rnaChimeric.out.junction': fsID
:rtype: dict
"""
star_options['samtools']['n'] = star_options['n']
sort = job.wrapJobFn(sort_bamfile, star_bams['rnaAligned.out.bam'], 'rna', univ_options,
samtools_options=star_options['samtools'],
disk=PromisedRequirement(sort_disk, star_bams['rnaAligned.out.bam']))
index = job.wrapJobFn(index_bamfile, sort.rv(), 'rna', univ_options,
samtools_options=star_options['samtools'], sample_info='genome_sorted',
disk=PromisedRequirement(index_disk, sort.rv()))
job.addChild(sort)
sort.addChild(index)
return {'rna_genome': index.rv(),
'rna_transcriptome.bam': star_bams['rnaAligned.toTranscriptome.out.bam'],
'rnaChimeric.out.junction': star_bams['rnaChimeric.out.junction']} | [
"def",
"sort_and_index_star",
"(",
"job",
",",
"star_bams",
",",
"univ_options",
",",
"star_options",
")",
":",
"star_options",
"[",
"'samtools'",
"]",
"[",
"'n'",
"]",
"=",
"star_options",
"[",
"'n'",
"]",
"sort",
"=",
"job",
".",
"wrapJobFn",
"(",
"sort_bamfile",
",",
"star_bams",
"[",
"'rnaAligned.out.bam'",
"]",
",",
"'rna'",
",",
"univ_options",
",",
"samtools_options",
"=",
"star_options",
"[",
"'samtools'",
"]",
",",
"disk",
"=",
"PromisedRequirement",
"(",
"sort_disk",
",",
"star_bams",
"[",
"'rnaAligned.out.bam'",
"]",
")",
")",
"index",
"=",
"job",
".",
"wrapJobFn",
"(",
"index_bamfile",
",",
"sort",
".",
"rv",
"(",
")",
",",
"'rna'",
",",
"univ_options",
",",
"samtools_options",
"=",
"star_options",
"[",
"'samtools'",
"]",
",",
"sample_info",
"=",
"'genome_sorted'",
",",
"disk",
"=",
"PromisedRequirement",
"(",
"index_disk",
",",
"sort",
".",
"rv",
"(",
")",
")",
")",
"job",
".",
"addChild",
"(",
"sort",
")",
"sort",
".",
"addChild",
"(",
"index",
")",
"return",
"{",
"'rna_genome'",
":",
"index",
".",
"rv",
"(",
")",
",",
"'rna_transcriptome.bam'",
":",
"star_bams",
"[",
"'rnaAligned.toTranscriptome.out.bam'",
"]",
",",
"'rnaChimeric.out.junction'",
":",
"star_bams",
"[",
"'rnaChimeric.out.junction'",
"]",
"}"
] | A wrapper for sorting and indexing the genomic star bam generated by run_star. It is required
since run_star returns a dict of 2 bams
:param dict star_bams: The bams from run_star
:param dict univ_options: Dict of universal options used by almost all tools
:param dict star_options: Options specific to star
:return: Dict containing input bam and the generated index (.bam.bai)
output_files:
|- 'rna_transcriptome.bam': fsID
+- 'rna_genome':
|- 'rna_sorted.bam': fsID
+- 'rna_sorted.bam.bai': fsID
+- 'rnaChimeric.out.junction': fsID
:rtype: dict | [
"A",
"wrapper",
"for",
"sorting",
"and",
"indexing",
"the",
"genomic",
"star",
"bam",
"generated",
"by",
"run_star",
".",
"It",
"is",
"required",
"since",
"run_star",
"returns",
"a",
"dict",
"of",
"2",
"bams"
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/alignment/rna.py#L141-L169 | train |
drslump/pyshould | pyshould/expectation.py | Expectation.reset | def reset(self):
""" Resets the state of the expression """
self.expr = []
self.matcher = None
self.last_matcher = None
self.description = None | python | def reset(self):
""" Resets the state of the expression """
self.expr = []
self.matcher = None
self.last_matcher = None
self.description = None | [
"def",
"reset",
"(",
"self",
")",
":",
"self",
".",
"expr",
"=",
"[",
"]",
"self",
".",
"matcher",
"=",
"None",
"self",
".",
"last_matcher",
"=",
"None",
"self",
".",
"description",
"=",
"None"
] | Resets the state of the expression | [
"Resets",
"the",
"state",
"of",
"the",
"expression"
] | 7210859d4c84cfbaa64f91b30c2a541aea788ddf | https://github.com/drslump/pyshould/blob/7210859d4c84cfbaa64f91b30c2a541aea788ddf/pyshould/expectation.py#L44-L49 | train |
drslump/pyshould | pyshould/expectation.py | Expectation.clone | def clone(self):
""" Clone this expression """
from copy import copy
clone = copy(self)
clone.expr = copy(self.expr)
clone.factory = False
return clone | python | def clone(self):
""" Clone this expression """
from copy import copy
clone = copy(self)
clone.expr = copy(self.expr)
clone.factory = False
return clone | [
"def",
"clone",
"(",
"self",
")",
":",
"from",
"copy",
"import",
"copy",
"clone",
"=",
"copy",
"(",
"self",
")",
"clone",
".",
"expr",
"=",
"copy",
"(",
"self",
".",
"expr",
")",
"clone",
".",
"factory",
"=",
"False",
"return",
"clone"
] | Clone this expression | [
"Clone",
"this",
"expression"
] | 7210859d4c84cfbaa64f91b30c2a541aea788ddf | https://github.com/drslump/pyshould/blob/7210859d4c84cfbaa64f91b30c2a541aea788ddf/pyshould/expectation.py#L51-L57 | train |
drslump/pyshould | pyshould/expectation.py | Expectation.resolve | def resolve(self, value=None):
""" Resolve the current expression against the supplied value """
# If we still have an uninitialized matcher init it now
if self.matcher:
self._init_matcher()
# Evaluate the current set of matchers forming the expression
matcher = self.evaluate()
try:
value = self._transform(value)
self._assertion(matcher, value)
except AssertionError as ex:
# By re-raising here the exception we reset the traceback
raise ex
finally:
# Reset the state of the object so we can use it again
if self.deferred:
self.reset() | python | def resolve(self, value=None):
""" Resolve the current expression against the supplied value """
# If we still have an uninitialized matcher init it now
if self.matcher:
self._init_matcher()
# Evaluate the current set of matchers forming the expression
matcher = self.evaluate()
try:
value = self._transform(value)
self._assertion(matcher, value)
except AssertionError as ex:
# By re-raising here the exception we reset the traceback
raise ex
finally:
# Reset the state of the object so we can use it again
if self.deferred:
self.reset() | [
"def",
"resolve",
"(",
"self",
",",
"value",
"=",
"None",
")",
":",
"if",
"self",
".",
"matcher",
":",
"self",
".",
"_init_matcher",
"(",
")",
"matcher",
"=",
"self",
".",
"evaluate",
"(",
")",
"try",
":",
"value",
"=",
"self",
".",
"_transform",
"(",
"value",
")",
"self",
".",
"_assertion",
"(",
"matcher",
",",
"value",
")",
"except",
"AssertionError",
"as",
"ex",
":",
"raise",
"ex",
"finally",
":",
"if",
"self",
".",
"deferred",
":",
"self",
".",
"reset",
"(",
")"
] | Resolve the current expression against the supplied value | [
"Resolve",
"the",
"current",
"expression",
"against",
"the",
"supplied",
"value"
] | 7210859d4c84cfbaa64f91b30c2a541aea788ddf | https://github.com/drslump/pyshould/blob/7210859d4c84cfbaa64f91b30c2a541aea788ddf/pyshould/expectation.py#L81-L100 | train |
drslump/pyshould | pyshould/expectation.py | Expectation._assertion | def _assertion(self, matcher, value):
""" Perform the actual assertion for the given matcher and value. Override
this method to apply a special configuration when performing the assertion.
If the assertion fails it should raise an AssertionError.
"""
# To support the syntax `any_of(subject) | should ...` we check if the
# value to check is an Expectation object and if it is we use the descriptor
# protocol to bind the value's assertion logic to this expectation.
if isinstance(value, Expectation):
assertion = value._assertion.__get__(self, Expectation)
assertion(matcher, value.value)
else:
hc.assert_that(value, matcher) | python | def _assertion(self, matcher, value):
""" Perform the actual assertion for the given matcher and value. Override
this method to apply a special configuration when performing the assertion.
If the assertion fails it should raise an AssertionError.
"""
# To support the syntax `any_of(subject) | should ...` we check if the
# value to check is an Expectation object and if it is we use the descriptor
# protocol to bind the value's assertion logic to this expectation.
if isinstance(value, Expectation):
assertion = value._assertion.__get__(self, Expectation)
assertion(matcher, value.value)
else:
hc.assert_that(value, matcher) | [
"def",
"_assertion",
"(",
"self",
",",
"matcher",
",",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"Expectation",
")",
":",
"assertion",
"=",
"value",
".",
"_assertion",
".",
"__get__",
"(",
"self",
",",
"Expectation",
")",
"assertion",
"(",
"matcher",
",",
"value",
".",
"value",
")",
"else",
":",
"hc",
".",
"assert_that",
"(",
"value",
",",
"matcher",
")"
] | Perform the actual assertion for the given matcher and value. Override
this method to apply a special configuration when performing the assertion.
If the assertion fails it should raise an AssertionError. | [
"Perform",
"the",
"actual",
"assertion",
"for",
"the",
"given",
"matcher",
"and",
"value",
".",
"Override",
"this",
"method",
"to",
"apply",
"a",
"special",
"configuration",
"when",
"performing",
"the",
"assertion",
".",
"If",
"the",
"assertion",
"fails",
"it",
"should",
"raise",
"an",
"AssertionError",
"."
] | 7210859d4c84cfbaa64f91b30c2a541aea788ddf | https://github.com/drslump/pyshould/blob/7210859d4c84cfbaa64f91b30c2a541aea788ddf/pyshould/expectation.py#L102-L114 | train |
drslump/pyshould | pyshould/expectation.py | Expectation._transform | def _transform(self, value):
""" Applies any defined transformation to the given value
"""
if self.transform:
try:
value = self.transform(value)
except:
import sys
exc_type, exc_obj, exc_tb = sys.exc_info()
raise AssertionError('Error applying transformation <{0}>: {2}: {3}'.format(
self.transform.__name__, value, exc_type.__name__, exc_obj))
return value | python | def _transform(self, value):
""" Applies any defined transformation to the given value
"""
if self.transform:
try:
value = self.transform(value)
except:
import sys
exc_type, exc_obj, exc_tb = sys.exc_info()
raise AssertionError('Error applying transformation <{0}>: {2}: {3}'.format(
self.transform.__name__, value, exc_type.__name__, exc_obj))
return value | [
"def",
"_transform",
"(",
"self",
",",
"value",
")",
":",
"if",
"self",
".",
"transform",
":",
"try",
":",
"value",
"=",
"self",
".",
"transform",
"(",
"value",
")",
"except",
":",
"import",
"sys",
"exc_type",
",",
"exc_obj",
",",
"exc_tb",
"=",
"sys",
".",
"exc_info",
"(",
")",
"raise",
"AssertionError",
"(",
"'Error applying transformation <{0}>: {2}: {3}'",
".",
"format",
"(",
"self",
".",
"transform",
".",
"__name__",
",",
"value",
",",
"exc_type",
".",
"__name__",
",",
"exc_obj",
")",
")",
"return",
"value"
] | Applies any defined transformation to the given value | [
"Applies",
"any",
"defined",
"transformation",
"to",
"the",
"given",
"value"
] | 7210859d4c84cfbaa64f91b30c2a541aea788ddf | https://github.com/drslump/pyshould/blob/7210859d4c84cfbaa64f91b30c2a541aea788ddf/pyshould/expectation.py#L116-L128 | train |
drslump/pyshould | pyshould/expectation.py | Expectation.evaluate | def evaluate(self):
""" Converts the current expression into a single matcher, applying
coordination operators to operands according to their binding rules
"""
# Apply Shunting Yard algorithm to convert the infix expression
# into Reverse Polish Notation. Since we have a very limited
# set of operators and binding rules, the implementation becomes
# really simple. The expression is formed of hamcrest matcher instances
# and operators identifiers (ints).
ops = []
rpn = []
for token in self.expr:
if isinstance(token, int):
while len(ops) and token <= ops[-1]:
rpn.append(ops.pop())
ops.append(token)
else:
rpn.append(token)
# Append the remaining operators
while len(ops):
rpn.append(ops.pop())
# Walk the RPN expression to create AllOf/AnyOf matchers
stack = []
for token in rpn:
if isinstance(token, int):
# Handle the NOT case in a special way since it's unary
if token == OPERATOR.NOT:
stack[-1] = IsNot(stack[-1])
continue
# Our operators always need two operands
if len(stack) < 2:
raise RuntimeError('Unable to build a valid expression. Not enough operands available.')
# Check what kind of matcher we need to create
if token == OPERATOR.OR:
matcher = hc.any_of(*stack[-2:])
else: # AND, BUT
matcher = hc.all_of(*stack[-2:])
stack[-2:] = [matcher]
else:
stack.append(token)
if len(stack) != 1:
raise RuntimeError('Unable to build a valid expression. The RPN stack should have just one item.')
matcher = stack.pop()
# If a description has been given include it in the matcher
if self.description:
matcher = hc.described_as(self.description, matcher)
return matcher | python | def evaluate(self):
""" Converts the current expression into a single matcher, applying
coordination operators to operands according to their binding rules
"""
# Apply Shunting Yard algorithm to convert the infix expression
# into Reverse Polish Notation. Since we have a very limited
# set of operators and binding rules, the implementation becomes
# really simple. The expression is formed of hamcrest matcher instances
# and operators identifiers (ints).
ops = []
rpn = []
for token in self.expr:
if isinstance(token, int):
while len(ops) and token <= ops[-1]:
rpn.append(ops.pop())
ops.append(token)
else:
rpn.append(token)
# Append the remaining operators
while len(ops):
rpn.append(ops.pop())
# Walk the RPN expression to create AllOf/AnyOf matchers
stack = []
for token in rpn:
if isinstance(token, int):
# Handle the NOT case in a special way since it's unary
if token == OPERATOR.NOT:
stack[-1] = IsNot(stack[-1])
continue
# Our operators always need two operands
if len(stack) < 2:
raise RuntimeError('Unable to build a valid expression. Not enough operands available.')
# Check what kind of matcher we need to create
if token == OPERATOR.OR:
matcher = hc.any_of(*stack[-2:])
else: # AND, BUT
matcher = hc.all_of(*stack[-2:])
stack[-2:] = [matcher]
else:
stack.append(token)
if len(stack) != 1:
raise RuntimeError('Unable to build a valid expression. The RPN stack should have just one item.')
matcher = stack.pop()
# If a description has been given include it in the matcher
if self.description:
matcher = hc.described_as(self.description, matcher)
return matcher | [
"def",
"evaluate",
"(",
"self",
")",
":",
"ops",
"=",
"[",
"]",
"rpn",
"=",
"[",
"]",
"for",
"token",
"in",
"self",
".",
"expr",
":",
"if",
"isinstance",
"(",
"token",
",",
"int",
")",
":",
"while",
"len",
"(",
"ops",
")",
"and",
"token",
"<=",
"ops",
"[",
"-",
"1",
"]",
":",
"rpn",
".",
"append",
"(",
"ops",
".",
"pop",
"(",
")",
")",
"ops",
".",
"append",
"(",
"token",
")",
"else",
":",
"rpn",
".",
"append",
"(",
"token",
")",
"while",
"len",
"(",
"ops",
")",
":",
"rpn",
".",
"append",
"(",
"ops",
".",
"pop",
"(",
")",
")",
"stack",
"=",
"[",
"]",
"for",
"token",
"in",
"rpn",
":",
"if",
"isinstance",
"(",
"token",
",",
"int",
")",
":",
"if",
"token",
"==",
"OPERATOR",
".",
"NOT",
":",
"stack",
"[",
"-",
"1",
"]",
"=",
"IsNot",
"(",
"stack",
"[",
"-",
"1",
"]",
")",
"continue",
"if",
"len",
"(",
"stack",
")",
"<",
"2",
":",
"raise",
"RuntimeError",
"(",
"'Unable to build a valid expression. Not enough operands available.'",
")",
"if",
"token",
"==",
"OPERATOR",
".",
"OR",
":",
"matcher",
"=",
"hc",
".",
"any_of",
"(",
"*",
"stack",
"[",
"-",
"2",
":",
"]",
")",
"else",
":",
"matcher",
"=",
"hc",
".",
"all_of",
"(",
"*",
"stack",
"[",
"-",
"2",
":",
"]",
")",
"stack",
"[",
"-",
"2",
":",
"]",
"=",
"[",
"matcher",
"]",
"else",
":",
"stack",
".",
"append",
"(",
"token",
")",
"if",
"len",
"(",
"stack",
")",
"!=",
"1",
":",
"raise",
"RuntimeError",
"(",
"'Unable to build a valid expression. The RPN stack should have just one item.'",
")",
"matcher",
"=",
"stack",
".",
"pop",
"(",
")",
"if",
"self",
".",
"description",
":",
"matcher",
"=",
"hc",
".",
"described_as",
"(",
"self",
".",
"description",
",",
"matcher",
")",
"return",
"matcher"
] | Converts the current expression into a single matcher, applying
coordination operators to operands according to their binding rules | [
"Converts",
"the",
"current",
"expression",
"into",
"a",
"single",
"matcher",
"applying",
"coordination",
"operators",
"to",
"operands",
"according",
"to",
"their",
"binding",
"rules"
] | 7210859d4c84cfbaa64f91b30c2a541aea788ddf | https://github.com/drslump/pyshould/blob/7210859d4c84cfbaa64f91b30c2a541aea788ddf/pyshould/expectation.py#L130-L186 | train |
drslump/pyshould | pyshould/expectation.py | Expectation._find_matcher | def _find_matcher(self, alias):
""" Finds a matcher based on the given alias or raises an error if no
matcher could be found.
"""
matcher = lookup(alias)
if not matcher:
msg = 'Matcher "%s" not found' % alias
# Try to find similarly named matchers to help the user
similar = suggest(alias, max=3, cutoff=0.5)
if len(similar) > 1:
last = similar.pop()
msg += '. Perhaps you meant to use %s or %s?' % (', '.join(similar), last)
elif len(similar) > 0:
msg += '. Perhaps you meant to use %s?' % similar.pop()
raise KeyError(msg)
return matcher | python | def _find_matcher(self, alias):
""" Finds a matcher based on the given alias or raises an error if no
matcher could be found.
"""
matcher = lookup(alias)
if not matcher:
msg = 'Matcher "%s" not found' % alias
# Try to find similarly named matchers to help the user
similar = suggest(alias, max=3, cutoff=0.5)
if len(similar) > 1:
last = similar.pop()
msg += '. Perhaps you meant to use %s or %s?' % (', '.join(similar), last)
elif len(similar) > 0:
msg += '. Perhaps you meant to use %s?' % similar.pop()
raise KeyError(msg)
return matcher | [
"def",
"_find_matcher",
"(",
"self",
",",
"alias",
")",
":",
"matcher",
"=",
"lookup",
"(",
"alias",
")",
"if",
"not",
"matcher",
":",
"msg",
"=",
"'Matcher \"%s\" not found'",
"%",
"alias",
"similar",
"=",
"suggest",
"(",
"alias",
",",
"max",
"=",
"3",
",",
"cutoff",
"=",
"0.5",
")",
"if",
"len",
"(",
"similar",
")",
">",
"1",
":",
"last",
"=",
"similar",
".",
"pop",
"(",
")",
"msg",
"+=",
"'. Perhaps you meant to use %s or %s?'",
"%",
"(",
"', '",
".",
"join",
"(",
"similar",
")",
",",
"last",
")",
"elif",
"len",
"(",
"similar",
")",
">",
"0",
":",
"msg",
"+=",
"'. Perhaps you meant to use %s?'",
"%",
"similar",
".",
"pop",
"(",
")",
"raise",
"KeyError",
"(",
"msg",
")",
"return",
"matcher"
] | Finds a matcher based on the given alias or raises an error if no
matcher could be found. | [
"Finds",
"a",
"matcher",
"based",
"on",
"the",
"given",
"alias",
"or",
"raises",
"an",
"error",
"if",
"no",
"matcher",
"could",
"be",
"found",
"."
] | 7210859d4c84cfbaa64f91b30c2a541aea788ddf | https://github.com/drslump/pyshould/blob/7210859d4c84cfbaa64f91b30c2a541aea788ddf/pyshould/expectation.py#L188-L206 | train |
drslump/pyshould | pyshould/expectation.py | Expectation._init_matcher | def _init_matcher(self, *args, **kwargs):
""" Executes the current matcher appending it to the expression """
# If subject-less expectation are provided as arguments convert them
# to plain Hamcrest matchers in order to allow complex compositions
fn = lambda x: x.evaluate() if isinstance(x, Expectation) else x
args = [fn(x) for x in args]
kwargs = dict((k, fn(v)) for k, v in kwargs.items())
matcher = self.matcher(*args, **kwargs)
self.expr.append(matcher)
self.matcher = None
return matcher | python | def _init_matcher(self, *args, **kwargs):
""" Executes the current matcher appending it to the expression """
# If subject-less expectation are provided as arguments convert them
# to plain Hamcrest matchers in order to allow complex compositions
fn = lambda x: x.evaluate() if isinstance(x, Expectation) else x
args = [fn(x) for x in args]
kwargs = dict((k, fn(v)) for k, v in kwargs.items())
matcher = self.matcher(*args, **kwargs)
self.expr.append(matcher)
self.matcher = None
return matcher | [
"def",
"_init_matcher",
"(",
"self",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"fn",
"=",
"lambda",
"x",
":",
"x",
".",
"evaluate",
"(",
")",
"if",
"isinstance",
"(",
"x",
",",
"Expectation",
")",
"else",
"x",
"args",
"=",
"[",
"fn",
"(",
"x",
")",
"for",
"x",
"in",
"args",
"]",
"kwargs",
"=",
"dict",
"(",
"(",
"k",
",",
"fn",
"(",
"v",
")",
")",
"for",
"k",
",",
"v",
"in",
"kwargs",
".",
"items",
"(",
")",
")",
"matcher",
"=",
"self",
".",
"matcher",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
"self",
".",
"expr",
".",
"append",
"(",
"matcher",
")",
"self",
".",
"matcher",
"=",
"None",
"return",
"matcher"
] | Executes the current matcher appending it to the expression | [
"Executes",
"the",
"current",
"matcher",
"appending",
"it",
"to",
"the",
"expression"
] | 7210859d4c84cfbaa64f91b30c2a541aea788ddf | https://github.com/drslump/pyshould/blob/7210859d4c84cfbaa64f91b30c2a541aea788ddf/pyshould/expectation.py#L208-L220 | train |
drslump/pyshould | pyshould/expectation.py | Expectation.described_as | def described_as(self, description, *args):
""" Specify a custom message for the matcher """
if len(args):
description = description.format(*args)
self.description = description
return self | python | def described_as(self, description, *args):
""" Specify a custom message for the matcher """
if len(args):
description = description.format(*args)
self.description = description
return self | [
"def",
"described_as",
"(",
"self",
",",
"description",
",",
"*",
"args",
")",
":",
"if",
"len",
"(",
"args",
")",
":",
"description",
"=",
"description",
".",
"format",
"(",
"*",
"args",
")",
"self",
".",
"description",
"=",
"description",
"return",
"self"
] | Specify a custom message for the matcher | [
"Specify",
"a",
"custom",
"message",
"for",
"the",
"matcher"
] | 7210859d4c84cfbaa64f91b30c2a541aea788ddf | https://github.com/drslump/pyshould/blob/7210859d4c84cfbaa64f91b30c2a541aea788ddf/pyshould/expectation.py#L222-L227 | train |
bkg/django-spillway | spillway/carto.py | make_dbsource | def make_dbsource(**kwargs):
"""Returns a mapnik PostGIS or SQLite Datasource."""
if 'spatialite' in connection.settings_dict.get('ENGINE'):
kwargs.setdefault('file', connection.settings_dict['NAME'])
return mapnik.SQLite(wkb_format='spatialite', **kwargs)
names = (('dbname', 'NAME'), ('user', 'USER'),
('password', 'PASSWORD'), ('host', 'HOST'), ('port', 'PORT'))
for mopt, dopt in names:
val = connection.settings_dict.get(dopt)
if val:
kwargs.setdefault(mopt, val)
return mapnik.PostGIS(**kwargs) | python | def make_dbsource(**kwargs):
"""Returns a mapnik PostGIS or SQLite Datasource."""
if 'spatialite' in connection.settings_dict.get('ENGINE'):
kwargs.setdefault('file', connection.settings_dict['NAME'])
return mapnik.SQLite(wkb_format='spatialite', **kwargs)
names = (('dbname', 'NAME'), ('user', 'USER'),
('password', 'PASSWORD'), ('host', 'HOST'), ('port', 'PORT'))
for mopt, dopt in names:
val = connection.settings_dict.get(dopt)
if val:
kwargs.setdefault(mopt, val)
return mapnik.PostGIS(**kwargs) | [
"def",
"make_dbsource",
"(",
"**",
"kwargs",
")",
":",
"if",
"'spatialite'",
"in",
"connection",
".",
"settings_dict",
".",
"get",
"(",
"'ENGINE'",
")",
":",
"kwargs",
".",
"setdefault",
"(",
"'file'",
",",
"connection",
".",
"settings_dict",
"[",
"'NAME'",
"]",
")",
"return",
"mapnik",
".",
"SQLite",
"(",
"wkb_format",
"=",
"'spatialite'",
",",
"**",
"kwargs",
")",
"names",
"=",
"(",
"(",
"'dbname'",
",",
"'NAME'",
")",
",",
"(",
"'user'",
",",
"'USER'",
")",
",",
"(",
"'password'",
",",
"'PASSWORD'",
")",
",",
"(",
"'host'",
",",
"'HOST'",
")",
",",
"(",
"'port'",
",",
"'PORT'",
")",
")",
"for",
"mopt",
",",
"dopt",
"in",
"names",
":",
"val",
"=",
"connection",
".",
"settings_dict",
".",
"get",
"(",
"dopt",
")",
"if",
"val",
":",
"kwargs",
".",
"setdefault",
"(",
"mopt",
",",
"val",
")",
"return",
"mapnik",
".",
"PostGIS",
"(",
"**",
"kwargs",
")"
] | Returns a mapnik PostGIS or SQLite Datasource. | [
"Returns",
"a",
"mapnik",
"PostGIS",
"or",
"SQLite",
"Datasource",
"."
] | c488a62642430b005f1e0d4a19e160d8d5964b67 | https://github.com/bkg/django-spillway/blob/c488a62642430b005f1e0d4a19e160d8d5964b67/spillway/carto.py#L10-L21 | train |
bkg/django-spillway | spillway/carto.py | Map.layer | def layer(self, queryset, stylename=None):
"""Returns a map Layer.
Arguments:
queryset -- QuerySet for Layer
Keyword args:
stylename -- str name of style to apply
"""
cls = RasterLayer if hasattr(queryset, 'image') else VectorLayer
layer = cls(queryset, style=stylename)
try:
style = self.map.find_style(layer.stylename)
except KeyError:
self.map.append_style(layer.stylename, layer.style())
layer.styles.append(layer.stylename)
self.map.layers.append(layer._layer)
return layer | python | def layer(self, queryset, stylename=None):
"""Returns a map Layer.
Arguments:
queryset -- QuerySet for Layer
Keyword args:
stylename -- str name of style to apply
"""
cls = RasterLayer if hasattr(queryset, 'image') else VectorLayer
layer = cls(queryset, style=stylename)
try:
style = self.map.find_style(layer.stylename)
except KeyError:
self.map.append_style(layer.stylename, layer.style())
layer.styles.append(layer.stylename)
self.map.layers.append(layer._layer)
return layer | [
"def",
"layer",
"(",
"self",
",",
"queryset",
",",
"stylename",
"=",
"None",
")",
":",
"cls",
"=",
"RasterLayer",
"if",
"hasattr",
"(",
"queryset",
",",
"'image'",
")",
"else",
"VectorLayer",
"layer",
"=",
"cls",
"(",
"queryset",
",",
"style",
"=",
"stylename",
")",
"try",
":",
"style",
"=",
"self",
".",
"map",
".",
"find_style",
"(",
"layer",
".",
"stylename",
")",
"except",
"KeyError",
":",
"self",
".",
"map",
".",
"append_style",
"(",
"layer",
".",
"stylename",
",",
"layer",
".",
"style",
"(",
")",
")",
"layer",
".",
"styles",
".",
"append",
"(",
"layer",
".",
"stylename",
")",
"self",
".",
"map",
".",
"layers",
".",
"append",
"(",
"layer",
".",
"_layer",
")",
"return",
"layer"
] | Returns a map Layer.
Arguments:
queryset -- QuerySet for Layer
Keyword args:
stylename -- str name of style to apply | [
"Returns",
"a",
"map",
"Layer",
"."
] | c488a62642430b005f1e0d4a19e160d8d5964b67 | https://github.com/bkg/django-spillway/blob/c488a62642430b005f1e0d4a19e160d8d5964b67/spillway/carto.py#L56-L72 | train |
bkg/django-spillway | spillway/carto.py | Map.zoom_bbox | def zoom_bbox(self, bbox):
"""Zoom map to geometry extent.
Arguments:
bbox -- OGRGeometry polygon to zoom map extent
"""
try:
bbox.transform(self.map.srs)
except gdal.GDALException:
pass
else:
self.map.zoom_to_box(mapnik.Box2d(*bbox.extent)) | python | def zoom_bbox(self, bbox):
"""Zoom map to geometry extent.
Arguments:
bbox -- OGRGeometry polygon to zoom map extent
"""
try:
bbox.transform(self.map.srs)
except gdal.GDALException:
pass
else:
self.map.zoom_to_box(mapnik.Box2d(*bbox.extent)) | [
"def",
"zoom_bbox",
"(",
"self",
",",
"bbox",
")",
":",
"try",
":",
"bbox",
".",
"transform",
"(",
"self",
".",
"map",
".",
"srs",
")",
"except",
"gdal",
".",
"GDALException",
":",
"pass",
"else",
":",
"self",
".",
"map",
".",
"zoom_to_box",
"(",
"mapnik",
".",
"Box2d",
"(",
"*",
"bbox",
".",
"extent",
")",
")"
] | Zoom map to geometry extent.
Arguments:
bbox -- OGRGeometry polygon to zoom map extent | [
"Zoom",
"map",
"to",
"geometry",
"extent",
"."
] | c488a62642430b005f1e0d4a19e160d8d5964b67 | https://github.com/bkg/django-spillway/blob/c488a62642430b005f1e0d4a19e160d8d5964b67/spillway/carto.py#L79-L90 | train |
bkg/django-spillway | spillway/carto.py | Layer.style | def style(self):
"""Returns a default Style."""
style = mapnik.Style()
rule = mapnik.Rule()
self._symbolizer = self.symbolizer()
rule.symbols.append(self._symbolizer)
style.rules.append(rule)
return style | python | def style(self):
"""Returns a default Style."""
style = mapnik.Style()
rule = mapnik.Rule()
self._symbolizer = self.symbolizer()
rule.symbols.append(self._symbolizer)
style.rules.append(rule)
return style | [
"def",
"style",
"(",
"self",
")",
":",
"style",
"=",
"mapnik",
".",
"Style",
"(",
")",
"rule",
"=",
"mapnik",
".",
"Rule",
"(",
")",
"self",
".",
"_symbolizer",
"=",
"self",
".",
"symbolizer",
"(",
")",
"rule",
".",
"symbols",
".",
"append",
"(",
"self",
".",
"_symbolizer",
")",
"style",
".",
"rules",
".",
"append",
"(",
"rule",
")",
"return",
"style"
] | Returns a default Style. | [
"Returns",
"a",
"default",
"Style",
"."
] | c488a62642430b005f1e0d4a19e160d8d5964b67 | https://github.com/bkg/django-spillway/blob/c488a62642430b005f1e0d4a19e160d8d5964b67/spillway/carto.py#L99-L106 | train |
BD2KGenomics/protect | src/protect/mutation_calling/fusion.py | wrap_fusion | def wrap_fusion(job,
fastqs,
star_output,
univ_options,
star_fusion_options,
fusion_inspector_options):
"""
A wrapper for run_fusion using the results from cutadapt and star as input.
:param tuple fastqs: RNA-Seq FASTQ Filestore IDs
:param dict star_output: Dictionary containing STAR output files
:param dict univ_options: universal arguments used by almost all tools
:param dict star_fusion_options: STAR-Fusion specific parameters
:param dict fusion_inspector_options: FusionInspector specific parameters
:return: Transgene BEDPE file
:rtype: toil.fileStore.FileID
"""
# Give user option to skip fusion calling
if not star_fusion_options['run']:
job.fileStore.logToMaster('Skipping STAR-Fusion on %s' % univ_options['patient'])
return
fusion = job.wrapJobFn(run_fusion, fastqs, star_output['rnaChimeric.out.junction'],
univ_options, star_fusion_options, fusion_inspector_options,
cores=star_fusion_options['n'],
memory=PromisedRequirement(lambda x: int(1.85 * x.size),
star_fusion_options['index']),
disk=PromisedRequirement(fusion_disk,
fastqs,
star_fusion_options['index'])).encapsulate()
job.addChild(fusion)
return fusion.rv() | python | def wrap_fusion(job,
fastqs,
star_output,
univ_options,
star_fusion_options,
fusion_inspector_options):
"""
A wrapper for run_fusion using the results from cutadapt and star as input.
:param tuple fastqs: RNA-Seq FASTQ Filestore IDs
:param dict star_output: Dictionary containing STAR output files
:param dict univ_options: universal arguments used by almost all tools
:param dict star_fusion_options: STAR-Fusion specific parameters
:param dict fusion_inspector_options: FusionInspector specific parameters
:return: Transgene BEDPE file
:rtype: toil.fileStore.FileID
"""
# Give user option to skip fusion calling
if not star_fusion_options['run']:
job.fileStore.logToMaster('Skipping STAR-Fusion on %s' % univ_options['patient'])
return
fusion = job.wrapJobFn(run_fusion, fastqs, star_output['rnaChimeric.out.junction'],
univ_options, star_fusion_options, fusion_inspector_options,
cores=star_fusion_options['n'],
memory=PromisedRequirement(lambda x: int(1.85 * x.size),
star_fusion_options['index']),
disk=PromisedRequirement(fusion_disk,
fastqs,
star_fusion_options['index'])).encapsulate()
job.addChild(fusion)
return fusion.rv() | [
"def",
"wrap_fusion",
"(",
"job",
",",
"fastqs",
",",
"star_output",
",",
"univ_options",
",",
"star_fusion_options",
",",
"fusion_inspector_options",
")",
":",
"if",
"not",
"star_fusion_options",
"[",
"'run'",
"]",
":",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Skipping STAR-Fusion on %s'",
"%",
"univ_options",
"[",
"'patient'",
"]",
")",
"return",
"fusion",
"=",
"job",
".",
"wrapJobFn",
"(",
"run_fusion",
",",
"fastqs",
",",
"star_output",
"[",
"'rnaChimeric.out.junction'",
"]",
",",
"univ_options",
",",
"star_fusion_options",
",",
"fusion_inspector_options",
",",
"cores",
"=",
"star_fusion_options",
"[",
"'n'",
"]",
",",
"memory",
"=",
"PromisedRequirement",
"(",
"lambda",
"x",
":",
"int",
"(",
"1.85",
"*",
"x",
".",
"size",
")",
",",
"star_fusion_options",
"[",
"'index'",
"]",
")",
",",
"disk",
"=",
"PromisedRequirement",
"(",
"fusion_disk",
",",
"fastqs",
",",
"star_fusion_options",
"[",
"'index'",
"]",
")",
")",
".",
"encapsulate",
"(",
")",
"job",
".",
"addChild",
"(",
"fusion",
")",
"return",
"fusion",
".",
"rv",
"(",
")"
] | A wrapper for run_fusion using the results from cutadapt and star as input.
:param tuple fastqs: RNA-Seq FASTQ Filestore IDs
:param dict star_output: Dictionary containing STAR output files
:param dict univ_options: universal arguments used by almost all tools
:param dict star_fusion_options: STAR-Fusion specific parameters
:param dict fusion_inspector_options: FusionInspector specific parameters
:return: Transgene BEDPE file
:rtype: toil.fileStore.FileID | [
"A",
"wrapper",
"for",
"run_fusion",
"using",
"the",
"results",
"from",
"cutadapt",
"and",
"star",
"as",
"input",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/fusion.py#L38-L70 | train |
BD2KGenomics/protect | src/protect/mutation_calling/fusion.py | parse_star_fusion | def parse_star_fusion(infile):
"""
Parses STAR-Fusion format and returns an Expando object with basic features
:param str infile: path to STAR-Fusion prediction file
:return: Fusion prediction attributes
:rtype: bd2k.util.expando.Expando
"""
reader = csv.reader(infile, delimiter='\t')
header = reader.next()
header = {key: index for index, key in enumerate(header)}
features = ['LeftGene', 'LeftLocalBreakpoint', 'LeftBreakpoint',
'RightGene', 'RightLocalBreakpoint', 'RightBreakpoint',
'LargeAnchorSupport', 'JunctionReadCount', 'SpanningFragCount']
for line in reader:
yield Expando(dict((feature, line[header[feature]]) for feature in features)) | python | def parse_star_fusion(infile):
"""
Parses STAR-Fusion format and returns an Expando object with basic features
:param str infile: path to STAR-Fusion prediction file
:return: Fusion prediction attributes
:rtype: bd2k.util.expando.Expando
"""
reader = csv.reader(infile, delimiter='\t')
header = reader.next()
header = {key: index for index, key in enumerate(header)}
features = ['LeftGene', 'LeftLocalBreakpoint', 'LeftBreakpoint',
'RightGene', 'RightLocalBreakpoint', 'RightBreakpoint',
'LargeAnchorSupport', 'JunctionReadCount', 'SpanningFragCount']
for line in reader:
yield Expando(dict((feature, line[header[feature]]) for feature in features)) | [
"def",
"parse_star_fusion",
"(",
"infile",
")",
":",
"reader",
"=",
"csv",
".",
"reader",
"(",
"infile",
",",
"delimiter",
"=",
"'\\t'",
")",
"header",
"=",
"reader",
".",
"next",
"(",
")",
"header",
"=",
"{",
"key",
":",
"index",
"for",
"index",
",",
"key",
"in",
"enumerate",
"(",
"header",
")",
"}",
"features",
"=",
"[",
"'LeftGene'",
",",
"'LeftLocalBreakpoint'",
",",
"'LeftBreakpoint'",
",",
"'RightGene'",
",",
"'RightLocalBreakpoint'",
",",
"'RightBreakpoint'",
",",
"'LargeAnchorSupport'",
",",
"'JunctionReadCount'",
",",
"'SpanningFragCount'",
"]",
"for",
"line",
"in",
"reader",
":",
"yield",
"Expando",
"(",
"dict",
"(",
"(",
"feature",
",",
"line",
"[",
"header",
"[",
"feature",
"]",
"]",
")",
"for",
"feature",
"in",
"features",
")",
")"
] | Parses STAR-Fusion format and returns an Expando object with basic features
:param str infile: path to STAR-Fusion prediction file
:return: Fusion prediction attributes
:rtype: bd2k.util.expando.Expando | [
"Parses",
"STAR",
"-",
"Fusion",
"format",
"and",
"returns",
"an",
"Expando",
"object",
"with",
"basic",
"features"
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/fusion.py#L235-L252 | train |
BD2KGenomics/protect | src/protect/mutation_calling/fusion.py | get_transcripts | def get_transcripts(transcript_file):
"""
Parses FusionInspector transcript file and returns dictionary of sequences
:param str transcript_file: path to transcript FASTA
:return: de novo assembled transcripts
:rtype: dict
"""
with open(transcript_file, 'r') as fa:
transcripts = {}
regex_s = r"(?P<ID>TRINITY.*)\s(?P<fusion>.*--.*):(?P<left_start>\d+)-(?P<right_start>\d+)"
regex = re.compile(regex_s)
while True:
# Usually the transcript is on one line
try:
info = fa.next()
seq = fa.next()
assert info.startswith('>')
m = regex.search(info)
if m:
transcripts[m.group('ID')] = seq.strip()
except StopIteration:
break
except AssertionError:
print("WARNING: Malformed fusion transcript file")
return transcripts | python | def get_transcripts(transcript_file):
"""
Parses FusionInspector transcript file and returns dictionary of sequences
:param str transcript_file: path to transcript FASTA
:return: de novo assembled transcripts
:rtype: dict
"""
with open(transcript_file, 'r') as fa:
transcripts = {}
regex_s = r"(?P<ID>TRINITY.*)\s(?P<fusion>.*--.*):(?P<left_start>\d+)-(?P<right_start>\d+)"
regex = re.compile(regex_s)
while True:
# Usually the transcript is on one line
try:
info = fa.next()
seq = fa.next()
assert info.startswith('>')
m = regex.search(info)
if m:
transcripts[m.group('ID')] = seq.strip()
except StopIteration:
break
except AssertionError:
print("WARNING: Malformed fusion transcript file")
return transcripts | [
"def",
"get_transcripts",
"(",
"transcript_file",
")",
":",
"with",
"open",
"(",
"transcript_file",
",",
"'r'",
")",
"as",
"fa",
":",
"transcripts",
"=",
"{",
"}",
"regex_s",
"=",
"r\"(?P<ID>TRINITY.*)\\s(?P<fusion>.*--.*):(?P<left_start>\\d+)-(?P<right_start>\\d+)\"",
"regex",
"=",
"re",
".",
"compile",
"(",
"regex_s",
")",
"while",
"True",
":",
"try",
":",
"info",
"=",
"fa",
".",
"next",
"(",
")",
"seq",
"=",
"fa",
".",
"next",
"(",
")",
"assert",
"info",
".",
"startswith",
"(",
"'>'",
")",
"m",
"=",
"regex",
".",
"search",
"(",
"info",
")",
"if",
"m",
":",
"transcripts",
"[",
"m",
".",
"group",
"(",
"'ID'",
")",
"]",
"=",
"seq",
".",
"strip",
"(",
")",
"except",
"StopIteration",
":",
"break",
"except",
"AssertionError",
":",
"print",
"(",
"\"WARNING: Malformed fusion transcript file\"",
")",
"return",
"transcripts"
] | Parses FusionInspector transcript file and returns dictionary of sequences
:param str transcript_file: path to transcript FASTA
:return: de novo assembled transcripts
:rtype: dict | [
"Parses",
"FusionInspector",
"transcript",
"file",
"and",
"returns",
"dictionary",
"of",
"sequences"
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/fusion.py#L255-L284 | train |
BD2KGenomics/protect | src/protect/mutation_calling/fusion.py | split_fusion_transcript | def split_fusion_transcript(annotation_path, transcripts):
"""
Finds the breakpoint in the fusion transcript and splits the 5' donor from the 3' acceptor
:param str annotation_path: Path to transcript annotation file
:param dict transcripts: Dictionary of fusion transcripts
:return: 5' donor sequences and 3' acceptor sequences
:rtype: tuple
"""
annotation = collections.defaultdict(dict)
forward = 'ACGTN'
reverse = 'TGCAN'
trans = string.maketrans(forward, reverse)
# Pull in assembled transcript annotation
five_pr_splits = collections.defaultdict(dict)
three_pr_splits = collections.defaultdict(dict)
regex = re.compile(r'ID=(?P<ID>.*);Name=(?P<Name>.*);Target=(?P<Target>.*)\s(?P<start>\d+)\s(?P<stop>\d+)')
with open(annotation_path, 'r') as gff:
for line in gff:
print(line)
if line.startswith('#'):
_, eyd, fusion = line.strip().split()
fusion, start_stop = fusion.split(':')
left_break, right_break = start_stop.split('-')
annotation[fusion][eyd] = {}
annotation[fusion][eyd]['left_break'] = left_break
annotation[fusion][eyd]['right_break'] = right_break
else:
line = line.strip().split('\t')
fusion = line[0]
strand = line[6]
block_start = line[3]
block_stop = line[4]
attr = line[8]
m = regex.search(attr)
if m:
transcript_id = m.group('Name')
rb = any([block_start == annotation[fusion][transcript_id]['right_break'],
block_stop == annotation[fusion][transcript_id]['right_break']])
lb = any([block_start == annotation[fusion][transcript_id]['left_break'],
block_stop == annotation[fusion][transcript_id]['left_break']])
if strand == '-' and rb:
transcript_split = int(m.group('stop')) + 1 # Off by one
# Take the reverse complement to orient transcripts from 5' to 3'
five_seq = transcripts[transcript_id][transcript_split:]
five_pr_splits[fusion][transcript_id] = five_seq.translate(trans)[::-1]
three_seq = transcripts[transcript_id][:transcript_split]
three_pr_splits[fusion][transcript_id] = three_seq.translate(trans)[::-1]
elif strand == '+' and lb:
transcript_split = int(m.group('stop'))
s1 = transcripts[transcript_id][:transcript_split]
five_pr_splits[fusion][transcript_id] = s1
s2 = transcripts[transcript_id][transcript_split:]
three_pr_splits[fusion][transcript_id] = s2
return five_pr_splits, three_pr_splits | python | def split_fusion_transcript(annotation_path, transcripts):
"""
Finds the breakpoint in the fusion transcript and splits the 5' donor from the 3' acceptor
:param str annotation_path: Path to transcript annotation file
:param dict transcripts: Dictionary of fusion transcripts
:return: 5' donor sequences and 3' acceptor sequences
:rtype: tuple
"""
annotation = collections.defaultdict(dict)
forward = 'ACGTN'
reverse = 'TGCAN'
trans = string.maketrans(forward, reverse)
# Pull in assembled transcript annotation
five_pr_splits = collections.defaultdict(dict)
three_pr_splits = collections.defaultdict(dict)
regex = re.compile(r'ID=(?P<ID>.*);Name=(?P<Name>.*);Target=(?P<Target>.*)\s(?P<start>\d+)\s(?P<stop>\d+)')
with open(annotation_path, 'r') as gff:
for line in gff:
print(line)
if line.startswith('#'):
_, eyd, fusion = line.strip().split()
fusion, start_stop = fusion.split(':')
left_break, right_break = start_stop.split('-')
annotation[fusion][eyd] = {}
annotation[fusion][eyd]['left_break'] = left_break
annotation[fusion][eyd]['right_break'] = right_break
else:
line = line.strip().split('\t')
fusion = line[0]
strand = line[6]
block_start = line[3]
block_stop = line[4]
attr = line[8]
m = regex.search(attr)
if m:
transcript_id = m.group('Name')
rb = any([block_start == annotation[fusion][transcript_id]['right_break'],
block_stop == annotation[fusion][transcript_id]['right_break']])
lb = any([block_start == annotation[fusion][transcript_id]['left_break'],
block_stop == annotation[fusion][transcript_id]['left_break']])
if strand == '-' and rb:
transcript_split = int(m.group('stop')) + 1 # Off by one
# Take the reverse complement to orient transcripts from 5' to 3'
five_seq = transcripts[transcript_id][transcript_split:]
five_pr_splits[fusion][transcript_id] = five_seq.translate(trans)[::-1]
three_seq = transcripts[transcript_id][:transcript_split]
three_pr_splits[fusion][transcript_id] = three_seq.translate(trans)[::-1]
elif strand == '+' and lb:
transcript_split = int(m.group('stop'))
s1 = transcripts[transcript_id][:transcript_split]
five_pr_splits[fusion][transcript_id] = s1
s2 = transcripts[transcript_id][transcript_split:]
three_pr_splits[fusion][transcript_id] = s2
return five_pr_splits, three_pr_splits | [
"def",
"split_fusion_transcript",
"(",
"annotation_path",
",",
"transcripts",
")",
":",
"annotation",
"=",
"collections",
".",
"defaultdict",
"(",
"dict",
")",
"forward",
"=",
"'ACGTN'",
"reverse",
"=",
"'TGCAN'",
"trans",
"=",
"string",
".",
"maketrans",
"(",
"forward",
",",
"reverse",
")",
"five_pr_splits",
"=",
"collections",
".",
"defaultdict",
"(",
"dict",
")",
"three_pr_splits",
"=",
"collections",
".",
"defaultdict",
"(",
"dict",
")",
"regex",
"=",
"re",
".",
"compile",
"(",
"r'ID=(?P<ID>.*);Name=(?P<Name>.*);Target=(?P<Target>.*)\\s(?P<start>\\d+)\\s(?P<stop>\\d+)'",
")",
"with",
"open",
"(",
"annotation_path",
",",
"'r'",
")",
"as",
"gff",
":",
"for",
"line",
"in",
"gff",
":",
"print",
"(",
"line",
")",
"if",
"line",
".",
"startswith",
"(",
"'#'",
")",
":",
"_",
",",
"eyd",
",",
"fusion",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"fusion",
",",
"start_stop",
"=",
"fusion",
".",
"split",
"(",
"':'",
")",
"left_break",
",",
"right_break",
"=",
"start_stop",
".",
"split",
"(",
"'-'",
")",
"annotation",
"[",
"fusion",
"]",
"[",
"eyd",
"]",
"=",
"{",
"}",
"annotation",
"[",
"fusion",
"]",
"[",
"eyd",
"]",
"[",
"'left_break'",
"]",
"=",
"left_break",
"annotation",
"[",
"fusion",
"]",
"[",
"eyd",
"]",
"[",
"'right_break'",
"]",
"=",
"right_break",
"else",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'\\t'",
")",
"fusion",
"=",
"line",
"[",
"0",
"]",
"strand",
"=",
"line",
"[",
"6",
"]",
"block_start",
"=",
"line",
"[",
"3",
"]",
"block_stop",
"=",
"line",
"[",
"4",
"]",
"attr",
"=",
"line",
"[",
"8",
"]",
"m",
"=",
"regex",
".",
"search",
"(",
"attr",
")",
"if",
"m",
":",
"transcript_id",
"=",
"m",
".",
"group",
"(",
"'Name'",
")",
"rb",
"=",
"any",
"(",
"[",
"block_start",
"==",
"annotation",
"[",
"fusion",
"]",
"[",
"transcript_id",
"]",
"[",
"'right_break'",
"]",
",",
"block_stop",
"==",
"annotation",
"[",
"fusion",
"]",
"[",
"transcript_id",
"]",
"[",
"'right_break'",
"]",
"]",
")",
"lb",
"=",
"any",
"(",
"[",
"block_start",
"==",
"annotation",
"[",
"fusion",
"]",
"[",
"transcript_id",
"]",
"[",
"'left_break'",
"]",
",",
"block_stop",
"==",
"annotation",
"[",
"fusion",
"]",
"[",
"transcript_id",
"]",
"[",
"'left_break'",
"]",
"]",
")",
"if",
"strand",
"==",
"'-'",
"and",
"rb",
":",
"transcript_split",
"=",
"int",
"(",
"m",
".",
"group",
"(",
"'stop'",
")",
")",
"+",
"1",
"five_seq",
"=",
"transcripts",
"[",
"transcript_id",
"]",
"[",
"transcript_split",
":",
"]",
"five_pr_splits",
"[",
"fusion",
"]",
"[",
"transcript_id",
"]",
"=",
"five_seq",
".",
"translate",
"(",
"trans",
")",
"[",
":",
":",
"-",
"1",
"]",
"three_seq",
"=",
"transcripts",
"[",
"transcript_id",
"]",
"[",
":",
"transcript_split",
"]",
"three_pr_splits",
"[",
"fusion",
"]",
"[",
"transcript_id",
"]",
"=",
"three_seq",
".",
"translate",
"(",
"trans",
")",
"[",
":",
":",
"-",
"1",
"]",
"elif",
"strand",
"==",
"'+'",
"and",
"lb",
":",
"transcript_split",
"=",
"int",
"(",
"m",
".",
"group",
"(",
"'stop'",
")",
")",
"s1",
"=",
"transcripts",
"[",
"transcript_id",
"]",
"[",
":",
"transcript_split",
"]",
"five_pr_splits",
"[",
"fusion",
"]",
"[",
"transcript_id",
"]",
"=",
"s1",
"s2",
"=",
"transcripts",
"[",
"transcript_id",
"]",
"[",
"transcript_split",
":",
"]",
"three_pr_splits",
"[",
"fusion",
"]",
"[",
"transcript_id",
"]",
"=",
"s2",
"return",
"five_pr_splits",
",",
"three_pr_splits"
] | Finds the breakpoint in the fusion transcript and splits the 5' donor from the 3' acceptor
:param str annotation_path: Path to transcript annotation file
:param dict transcripts: Dictionary of fusion transcripts
:return: 5' donor sequences and 3' acceptor sequences
:rtype: tuple | [
"Finds",
"the",
"breakpoint",
"in",
"the",
"fusion",
"transcript",
"and",
"splits",
"the",
"5",
"donor",
"from",
"the",
"3",
"acceptor"
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/fusion.py#L287-L352 | train |
BD2KGenomics/protect | src/protect/mutation_calling/fusion.py | get_gene_ids | def get_gene_ids(fusion_bed):
"""
Parses FusionInspector bed file to ascertain the ENSEMBL gene ids
:param str fusion_bed: path to fusion annotation
:return: dict
"""
with open(fusion_bed, 'r') as f:
gene_to_id = {}
regex = re.compile(r'(?P<gene>ENSG\d*)')
for line in f:
line = line.split('\t')
transcript, gene_bit, name = line[3].split(';')
m = regex.search(gene_bit)
if m:
gene_to_id[name] = m.group('gene')
return gene_to_id | python | def get_gene_ids(fusion_bed):
"""
Parses FusionInspector bed file to ascertain the ENSEMBL gene ids
:param str fusion_bed: path to fusion annotation
:return: dict
"""
with open(fusion_bed, 'r') as f:
gene_to_id = {}
regex = re.compile(r'(?P<gene>ENSG\d*)')
for line in f:
line = line.split('\t')
transcript, gene_bit, name = line[3].split(';')
m = regex.search(gene_bit)
if m:
gene_to_id[name] = m.group('gene')
return gene_to_id | [
"def",
"get_gene_ids",
"(",
"fusion_bed",
")",
":",
"with",
"open",
"(",
"fusion_bed",
",",
"'r'",
")",
"as",
"f",
":",
"gene_to_id",
"=",
"{",
"}",
"regex",
"=",
"re",
".",
"compile",
"(",
"r'(?P<gene>ENSG\\d*)'",
")",
"for",
"line",
"in",
"f",
":",
"line",
"=",
"line",
".",
"split",
"(",
"'\\t'",
")",
"transcript",
",",
"gene_bit",
",",
"name",
"=",
"line",
"[",
"3",
"]",
".",
"split",
"(",
"';'",
")",
"m",
"=",
"regex",
".",
"search",
"(",
"gene_bit",
")",
"if",
"m",
":",
"gene_to_id",
"[",
"name",
"]",
"=",
"m",
".",
"group",
"(",
"'gene'",
")",
"return",
"gene_to_id"
] | Parses FusionInspector bed file to ascertain the ENSEMBL gene ids
:param str fusion_bed: path to fusion annotation
:return: dict | [
"Parses",
"FusionInspector",
"bed",
"file",
"to",
"ascertain",
"the",
"ENSEMBL",
"gene",
"ids"
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/fusion.py#L355-L371 | train |
BD2KGenomics/protect | src/protect/mutation_calling/fusion.py | reformat_star_fusion_output | def reformat_star_fusion_output(job,
fusion_annot,
fusion_file,
transcript_file,
transcript_gff_file,
univ_options):
"""
Writes STAR-Fusion results in Transgene BEDPE format
:param toil.fileStore.FileID fusion_annot: Fusion annotation
:param toil.fileStore.FileID fusion_file: STAR-fusion prediction file
:param toil.fileStore.FileID transcript_file: Fusion transcript FASTA file
:param toil.fileStore.FileID transcript_gff_file: Fusion transcript GFF file
:param dict univ_options: universal arguments used by almost all tools
:return: Transgene BEDPE file
:rtype: toil.fileStore.FileID
"""
input_files = {'results.tsv': fusion_file,
'fusion.bed': fusion_annot}
if transcript_file and transcript_gff_file:
input_files['transcripts.fa'] = transcript_file
input_files['transcripts.gff'] = transcript_gff_file
work_dir = job.fileStore.getLocalTempDir()
input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
# Pull in assembled transcript file
hugo_to_gene_ids = get_gene_ids(input_files['fusion.bed'])
if transcript_file and transcript_gff_file:
transcripts = get_transcripts(input_files['transcripts.fa'])
five_pr_splits, three_pr_splits = split_fusion_transcript(input_files['transcripts.gff'],
transcripts)
else:
five_pr_splits = collections.defaultdict(dict)
three_pr_splits = collections.defaultdict(dict)
# Pull in assembled transcript annotation
# Header for BEDPE file
header = ['# chr1', 'start1', 'end1',
'chr2', 'start2', 'end2',
'name', 'score',
'strand1', 'strand2',
'junctionSeq1', 'junctionSeq2',
'hugo1', 'hugo2']
output_path = os.path.join(work_dir, 'fusion_results.bedpe')
with open(input_files['results.tsv'], 'r') as in_f, open(output_path, 'w') as out_f:
writer = csv.writer(out_f, delimiter='\t')
writer.writerow(header)
for record in parse_star_fusion(in_f):
left_chr, left_break, left_strand = record.LeftBreakpoint.split(':')
right_chr, right_break, right_strand = record.RightBreakpoint.split(':')
fusion = ''.join([record.LeftGene, '--', record.RightGene])
name = '-'.join([hugo_to_gene_ids[record.LeftGene], hugo_to_gene_ids[record.RightGene]])
score = 'Junction:%s-Spanning:%s' % (record.JunctionReadCount, record.SpanningFragCount)
# Add empty sequences in case Trinity doesn't output one
if len(five_pr_splits[fusion].keys()) == 0:
five_pr_splits[fusion]['N/A'] = '.'
if len(three_pr_splits[fusion].keys()) == 0:
three_pr_splits[fusion]['N/A'] = '.'
for transcript_id in five_pr_splits[fusion].keys():
five_prime_seq = five_pr_splits[fusion][transcript_id]
three_prime_seq = three_pr_splits[fusion][transcript_id]
writer.writerow([left_chr,
'.', # Donor start position is not necessary
left_break,
right_chr,
right_break,
'.', # Acceptor end position is not necessary
name,
score,
left_strand,
right_strand,
five_prime_seq,
three_prime_seq,
record.LeftGene,
record.RightGene])
bedpe_id = job.fileStore.writeGlobalFile(output_path)
export_results(job, bedpe_id, 'fusion.bedpe', univ_options, subfolder='mutations/fusions')
job.fileStore.logToMaster('Reformatted STAR-Fusion output for %s successfully'
% univ_options['patient'])
return bedpe_id | python | def reformat_star_fusion_output(job,
fusion_annot,
fusion_file,
transcript_file,
transcript_gff_file,
univ_options):
"""
Writes STAR-Fusion results in Transgene BEDPE format
:param toil.fileStore.FileID fusion_annot: Fusion annotation
:param toil.fileStore.FileID fusion_file: STAR-fusion prediction file
:param toil.fileStore.FileID transcript_file: Fusion transcript FASTA file
:param toil.fileStore.FileID transcript_gff_file: Fusion transcript GFF file
:param dict univ_options: universal arguments used by almost all tools
:return: Transgene BEDPE file
:rtype: toil.fileStore.FileID
"""
input_files = {'results.tsv': fusion_file,
'fusion.bed': fusion_annot}
if transcript_file and transcript_gff_file:
input_files['transcripts.fa'] = transcript_file
input_files['transcripts.gff'] = transcript_gff_file
work_dir = job.fileStore.getLocalTempDir()
input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
# Pull in assembled transcript file
hugo_to_gene_ids = get_gene_ids(input_files['fusion.bed'])
if transcript_file and transcript_gff_file:
transcripts = get_transcripts(input_files['transcripts.fa'])
five_pr_splits, three_pr_splits = split_fusion_transcript(input_files['transcripts.gff'],
transcripts)
else:
five_pr_splits = collections.defaultdict(dict)
three_pr_splits = collections.defaultdict(dict)
# Pull in assembled transcript annotation
# Header for BEDPE file
header = ['# chr1', 'start1', 'end1',
'chr2', 'start2', 'end2',
'name', 'score',
'strand1', 'strand2',
'junctionSeq1', 'junctionSeq2',
'hugo1', 'hugo2']
output_path = os.path.join(work_dir, 'fusion_results.bedpe')
with open(input_files['results.tsv'], 'r') as in_f, open(output_path, 'w') as out_f:
writer = csv.writer(out_f, delimiter='\t')
writer.writerow(header)
for record in parse_star_fusion(in_f):
left_chr, left_break, left_strand = record.LeftBreakpoint.split(':')
right_chr, right_break, right_strand = record.RightBreakpoint.split(':')
fusion = ''.join([record.LeftGene, '--', record.RightGene])
name = '-'.join([hugo_to_gene_ids[record.LeftGene], hugo_to_gene_ids[record.RightGene]])
score = 'Junction:%s-Spanning:%s' % (record.JunctionReadCount, record.SpanningFragCount)
# Add empty sequences in case Trinity doesn't output one
if len(five_pr_splits[fusion].keys()) == 0:
five_pr_splits[fusion]['N/A'] = '.'
if len(three_pr_splits[fusion].keys()) == 0:
three_pr_splits[fusion]['N/A'] = '.'
for transcript_id in five_pr_splits[fusion].keys():
five_prime_seq = five_pr_splits[fusion][transcript_id]
three_prime_seq = three_pr_splits[fusion][transcript_id]
writer.writerow([left_chr,
'.', # Donor start position is not necessary
left_break,
right_chr,
right_break,
'.', # Acceptor end position is not necessary
name,
score,
left_strand,
right_strand,
five_prime_seq,
three_prime_seq,
record.LeftGene,
record.RightGene])
bedpe_id = job.fileStore.writeGlobalFile(output_path)
export_results(job, bedpe_id, 'fusion.bedpe', univ_options, subfolder='mutations/fusions')
job.fileStore.logToMaster('Reformatted STAR-Fusion output for %s successfully'
% univ_options['patient'])
return bedpe_id | [
"def",
"reformat_star_fusion_output",
"(",
"job",
",",
"fusion_annot",
",",
"fusion_file",
",",
"transcript_file",
",",
"transcript_gff_file",
",",
"univ_options",
")",
":",
"input_files",
"=",
"{",
"'results.tsv'",
":",
"fusion_file",
",",
"'fusion.bed'",
":",
"fusion_annot",
"}",
"if",
"transcript_file",
"and",
"transcript_gff_file",
":",
"input_files",
"[",
"'transcripts.fa'",
"]",
"=",
"transcript_file",
"input_files",
"[",
"'transcripts.gff'",
"]",
"=",
"transcript_gff_file",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"input_files",
"=",
"get_files_from_filestore",
"(",
"job",
",",
"input_files",
",",
"work_dir",
",",
"docker",
"=",
"False",
")",
"hugo_to_gene_ids",
"=",
"get_gene_ids",
"(",
"input_files",
"[",
"'fusion.bed'",
"]",
")",
"if",
"transcript_file",
"and",
"transcript_gff_file",
":",
"transcripts",
"=",
"get_transcripts",
"(",
"input_files",
"[",
"'transcripts.fa'",
"]",
")",
"five_pr_splits",
",",
"three_pr_splits",
"=",
"split_fusion_transcript",
"(",
"input_files",
"[",
"'transcripts.gff'",
"]",
",",
"transcripts",
")",
"else",
":",
"five_pr_splits",
"=",
"collections",
".",
"defaultdict",
"(",
"dict",
")",
"three_pr_splits",
"=",
"collections",
".",
"defaultdict",
"(",
"dict",
")",
"header",
"=",
"[",
"'# chr1'",
",",
"'start1'",
",",
"'end1'",
",",
"'chr2'",
",",
"'start2'",
",",
"'end2'",
",",
"'name'",
",",
"'score'",
",",
"'strand1'",
",",
"'strand2'",
",",
"'junctionSeq1'",
",",
"'junctionSeq2'",
",",
"'hugo1'",
",",
"'hugo2'",
"]",
"output_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'fusion_results.bedpe'",
")",
"with",
"open",
"(",
"input_files",
"[",
"'results.tsv'",
"]",
",",
"'r'",
")",
"as",
"in_f",
",",
"open",
"(",
"output_path",
",",
"'w'",
")",
"as",
"out_f",
":",
"writer",
"=",
"csv",
".",
"writer",
"(",
"out_f",
",",
"delimiter",
"=",
"'\\t'",
")",
"writer",
".",
"writerow",
"(",
"header",
")",
"for",
"record",
"in",
"parse_star_fusion",
"(",
"in_f",
")",
":",
"left_chr",
",",
"left_break",
",",
"left_strand",
"=",
"record",
".",
"LeftBreakpoint",
".",
"split",
"(",
"':'",
")",
"right_chr",
",",
"right_break",
",",
"right_strand",
"=",
"record",
".",
"RightBreakpoint",
".",
"split",
"(",
"':'",
")",
"fusion",
"=",
"''",
".",
"join",
"(",
"[",
"record",
".",
"LeftGene",
",",
"'--'",
",",
"record",
".",
"RightGene",
"]",
")",
"name",
"=",
"'-'",
".",
"join",
"(",
"[",
"hugo_to_gene_ids",
"[",
"record",
".",
"LeftGene",
"]",
",",
"hugo_to_gene_ids",
"[",
"record",
".",
"RightGene",
"]",
"]",
")",
"score",
"=",
"'Junction:%s-Spanning:%s'",
"%",
"(",
"record",
".",
"JunctionReadCount",
",",
"record",
".",
"SpanningFragCount",
")",
"if",
"len",
"(",
"five_pr_splits",
"[",
"fusion",
"]",
".",
"keys",
"(",
")",
")",
"==",
"0",
":",
"five_pr_splits",
"[",
"fusion",
"]",
"[",
"'N/A'",
"]",
"=",
"'.'",
"if",
"len",
"(",
"three_pr_splits",
"[",
"fusion",
"]",
".",
"keys",
"(",
")",
")",
"==",
"0",
":",
"three_pr_splits",
"[",
"fusion",
"]",
"[",
"'N/A'",
"]",
"=",
"'.'",
"for",
"transcript_id",
"in",
"five_pr_splits",
"[",
"fusion",
"]",
".",
"keys",
"(",
")",
":",
"five_prime_seq",
"=",
"five_pr_splits",
"[",
"fusion",
"]",
"[",
"transcript_id",
"]",
"three_prime_seq",
"=",
"three_pr_splits",
"[",
"fusion",
"]",
"[",
"transcript_id",
"]",
"writer",
".",
"writerow",
"(",
"[",
"left_chr",
",",
"'.'",
",",
"left_break",
",",
"right_chr",
",",
"right_break",
",",
"'.'",
",",
"name",
",",
"score",
",",
"left_strand",
",",
"right_strand",
",",
"five_prime_seq",
",",
"three_prime_seq",
",",
"record",
".",
"LeftGene",
",",
"record",
".",
"RightGene",
"]",
")",
"bedpe_id",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"output_path",
")",
"export_results",
"(",
"job",
",",
"bedpe_id",
",",
"'fusion.bedpe'",
",",
"univ_options",
",",
"subfolder",
"=",
"'mutations/fusions'",
")",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Reformatted STAR-Fusion output for %s successfully'",
"%",
"univ_options",
"[",
"'patient'",
"]",
")",
"return",
"bedpe_id"
] | Writes STAR-Fusion results in Transgene BEDPE format
:param toil.fileStore.FileID fusion_annot: Fusion annotation
:param toil.fileStore.FileID fusion_file: STAR-fusion prediction file
:param toil.fileStore.FileID transcript_file: Fusion transcript FASTA file
:param toil.fileStore.FileID transcript_gff_file: Fusion transcript GFF file
:param dict univ_options: universal arguments used by almost all tools
:return: Transgene BEDPE file
:rtype: toil.fileStore.FileID | [
"Writes",
"STAR",
"-",
"Fusion",
"results",
"in",
"Transgene",
"BEDPE",
"format"
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/fusion.py#L374-L467 | train |
BD2KGenomics/protect | src/protect/pipeline/ProTECT.py | _ensure_patient_group_is_ok | def _ensure_patient_group_is_ok(patient_object, patient_name=None):
"""
Ensure that the provided entries for the patient groups is formatted properly.
:param set|dict patient_object: The values passed to the samples patient group
:param str patient_name: Optional name for the set
:raises ParameterError: If required entry doesnt exist
"""
from protect.addons.common import TCGAToGTEx
assert isinstance(patient_object, (set, dict)), '%s,%s' % (patient_object, patient_name)
# set(dict) = set of keys of the dict
test_set = set(patient_object)
if 'tumor_type' not in patient_object:
raise ParameterError(('The patient entry for sample %s ' % patient_name) +
'does not contain a Tumor type.')
elif patient_object['tumor_type'] not in TCGAToGTEx:
raise ParameterError(('The patient entry for sample %s ' % patient_name) +
'does contains an invalid Tumor type. Please use one of the '
'valid TCGA tumor types.')
if {'tumor_dna_fastq_1', 'normal_dna_fastq_1', 'tumor_rna_fastq_1'}.issubset(test_set):
# Best case scenario, we get all fastqs
pass
else:
# We have less than 3 fastqs so we have to have a haplotype.
if 'hla_haplotype_files' not in test_set:
raise ParameterError(('The patient entry for sample %s ' % patient_name) +
'does not contain a hla_haplotype_files entry.\nCannot haplotype '
'patient if all the input sequence files are not fastqs.')
# Either we have a fastq and/or bam for the tumor and normal, or we need to be given a vcf
if (({re.search('tumor_dna_((bam)|(fastq_1)).*', x) for x in test_set} == {None} or
{re.search('normal_dna_((bam)|(fastq_1)).*', x) for x in test_set} == {None}) and
('mutation_vcf' not in test_set and 'fusion_bedpe' not in test_set)):
raise ParameterError(('The patient entry for sample %s ' % patient_name) +
'does not contain a mutation_vcf or fusion_bedpe entry. If both '
'tumor and normal DNA sequences (fastqs or bam) are not provided, '
'a pre-computed vcf and/or bedpe must be provided.')
# We have to be given a tumor rna fastq or bam unless we are processing ONLY fusions
if {re.search('tumor_rna_((bam)|(fastq_1)).*', x) for x in test_set} == {None}:
if 'mutation_vcf' not in test_set and 'fusion_bedpe' in test_set:
# The only case where it is ok to not have the genome mapped rna.
pass
else:
raise ParameterError(('The patient entry for sample %s ' % patient_name) +
'does not contain a tumor rna sequence data entry. We require '
'either tumor_rna_fastq_1 or tumor_rna_bam.')
# If we are given an RNA bam then it needs to have a corresponding transcriptome bam unless
# we have also been provided expression values.
if 'tumor_rna_bam' in test_set and 'tumor_rna_transcriptome_bam' not in test_set:
if 'expression_files' not in test_set:
raise ParameterError(('The patient entry for sample %s ' % patient_name +
'was provided a tumor rna bam with sequences mapped to the '
'genome but was not provided a matching rna bam for the '
'transcriptome or a tar containing expression values. '
'We require either a matching transcriptome bam to estimate'
'expression, or the precomputed expression values.')) | python | def _ensure_patient_group_is_ok(patient_object, patient_name=None):
"""
Ensure that the provided entries for the patient groups is formatted properly.
:param set|dict patient_object: The values passed to the samples patient group
:param str patient_name: Optional name for the set
:raises ParameterError: If required entry doesnt exist
"""
from protect.addons.common import TCGAToGTEx
assert isinstance(patient_object, (set, dict)), '%s,%s' % (patient_object, patient_name)
# set(dict) = set of keys of the dict
test_set = set(patient_object)
if 'tumor_type' not in patient_object:
raise ParameterError(('The patient entry for sample %s ' % patient_name) +
'does not contain a Tumor type.')
elif patient_object['tumor_type'] not in TCGAToGTEx:
raise ParameterError(('The patient entry for sample %s ' % patient_name) +
'does contains an invalid Tumor type. Please use one of the '
'valid TCGA tumor types.')
if {'tumor_dna_fastq_1', 'normal_dna_fastq_1', 'tumor_rna_fastq_1'}.issubset(test_set):
# Best case scenario, we get all fastqs
pass
else:
# We have less than 3 fastqs so we have to have a haplotype.
if 'hla_haplotype_files' not in test_set:
raise ParameterError(('The patient entry for sample %s ' % patient_name) +
'does not contain a hla_haplotype_files entry.\nCannot haplotype '
'patient if all the input sequence files are not fastqs.')
# Either we have a fastq and/or bam for the tumor and normal, or we need to be given a vcf
if (({re.search('tumor_dna_((bam)|(fastq_1)).*', x) for x in test_set} == {None} or
{re.search('normal_dna_((bam)|(fastq_1)).*', x) for x in test_set} == {None}) and
('mutation_vcf' not in test_set and 'fusion_bedpe' not in test_set)):
raise ParameterError(('The patient entry for sample %s ' % patient_name) +
'does not contain a mutation_vcf or fusion_bedpe entry. If both '
'tumor and normal DNA sequences (fastqs or bam) are not provided, '
'a pre-computed vcf and/or bedpe must be provided.')
# We have to be given a tumor rna fastq or bam unless we are processing ONLY fusions
if {re.search('tumor_rna_((bam)|(fastq_1)).*', x) for x in test_set} == {None}:
if 'mutation_vcf' not in test_set and 'fusion_bedpe' in test_set:
# The only case where it is ok to not have the genome mapped rna.
pass
else:
raise ParameterError(('The patient entry for sample %s ' % patient_name) +
'does not contain a tumor rna sequence data entry. We require '
'either tumor_rna_fastq_1 or tumor_rna_bam.')
# If we are given an RNA bam then it needs to have a corresponding transcriptome bam unless
# we have also been provided expression values.
if 'tumor_rna_bam' in test_set and 'tumor_rna_transcriptome_bam' not in test_set:
if 'expression_files' not in test_set:
raise ParameterError(('The patient entry for sample %s ' % patient_name +
'was provided a tumor rna bam with sequences mapped to the '
'genome but was not provided a matching rna bam for the '
'transcriptome or a tar containing expression values. '
'We require either a matching transcriptome bam to estimate'
'expression, or the precomputed expression values.')) | [
"def",
"_ensure_patient_group_is_ok",
"(",
"patient_object",
",",
"patient_name",
"=",
"None",
")",
":",
"from",
"protect",
".",
"addons",
".",
"common",
"import",
"TCGAToGTEx",
"assert",
"isinstance",
"(",
"patient_object",
",",
"(",
"set",
",",
"dict",
")",
")",
",",
"'%s,%s'",
"%",
"(",
"patient_object",
",",
"patient_name",
")",
"test_set",
"=",
"set",
"(",
"patient_object",
")",
"if",
"'tumor_type'",
"not",
"in",
"patient_object",
":",
"raise",
"ParameterError",
"(",
"(",
"'The patient entry for sample %s '",
"%",
"patient_name",
")",
"+",
"'does not contain a Tumor type.'",
")",
"elif",
"patient_object",
"[",
"'tumor_type'",
"]",
"not",
"in",
"TCGAToGTEx",
":",
"raise",
"ParameterError",
"(",
"(",
"'The patient entry for sample %s '",
"%",
"patient_name",
")",
"+",
"'does contains an invalid Tumor type. Please use one of the '",
"'valid TCGA tumor types.'",
")",
"if",
"{",
"'tumor_dna_fastq_1'",
",",
"'normal_dna_fastq_1'",
",",
"'tumor_rna_fastq_1'",
"}",
".",
"issubset",
"(",
"test_set",
")",
":",
"pass",
"else",
":",
"if",
"'hla_haplotype_files'",
"not",
"in",
"test_set",
":",
"raise",
"ParameterError",
"(",
"(",
"'The patient entry for sample %s '",
"%",
"patient_name",
")",
"+",
"'does not contain a hla_haplotype_files entry.\\nCannot haplotype '",
"'patient if all the input sequence files are not fastqs.'",
")",
"if",
"(",
"(",
"{",
"re",
".",
"search",
"(",
"'tumor_dna_((bam)|(fastq_1)).*'",
",",
"x",
")",
"for",
"x",
"in",
"test_set",
"}",
"==",
"{",
"None",
"}",
"or",
"{",
"re",
".",
"search",
"(",
"'normal_dna_((bam)|(fastq_1)).*'",
",",
"x",
")",
"for",
"x",
"in",
"test_set",
"}",
"==",
"{",
"None",
"}",
")",
"and",
"(",
"'mutation_vcf'",
"not",
"in",
"test_set",
"and",
"'fusion_bedpe'",
"not",
"in",
"test_set",
")",
")",
":",
"raise",
"ParameterError",
"(",
"(",
"'The patient entry for sample %s '",
"%",
"patient_name",
")",
"+",
"'does not contain a mutation_vcf or fusion_bedpe entry. If both '",
"'tumor and normal DNA sequences (fastqs or bam) are not provided, '",
"'a pre-computed vcf and/or bedpe must be provided.'",
")",
"if",
"{",
"re",
".",
"search",
"(",
"'tumor_rna_((bam)|(fastq_1)).*'",
",",
"x",
")",
"for",
"x",
"in",
"test_set",
"}",
"==",
"{",
"None",
"}",
":",
"if",
"'mutation_vcf'",
"not",
"in",
"test_set",
"and",
"'fusion_bedpe'",
"in",
"test_set",
":",
"pass",
"else",
":",
"raise",
"ParameterError",
"(",
"(",
"'The patient entry for sample %s '",
"%",
"patient_name",
")",
"+",
"'does not contain a tumor rna sequence data entry. We require '",
"'either tumor_rna_fastq_1 or tumor_rna_bam.'",
")",
"if",
"'tumor_rna_bam'",
"in",
"test_set",
"and",
"'tumor_rna_transcriptome_bam'",
"not",
"in",
"test_set",
":",
"if",
"'expression_files'",
"not",
"in",
"test_set",
":",
"raise",
"ParameterError",
"(",
"(",
"'The patient entry for sample %s '",
"%",
"patient_name",
"+",
"'was provided a tumor rna bam with sequences mapped to the '",
"'genome but was not provided a matching rna bam for the '",
"'transcriptome or a tar containing expression values. '",
"'We require either a matching transcriptome bam to estimate'",
"'expression, or the precomputed expression values.'",
")",
")"
] | Ensure that the provided entries for the patient groups is formatted properly.
:param set|dict patient_object: The values passed to the samples patient group
:param str patient_name: Optional name for the set
:raises ParameterError: If required entry doesnt exist | [
"Ensure",
"that",
"the",
"provided",
"entries",
"for",
"the",
"patient",
"groups",
"is",
"formatted",
"properly",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/pipeline/ProTECT.py#L94-L148 | train |
BD2KGenomics/protect | src/protect/pipeline/ProTECT.py | _add_default_entries | def _add_default_entries(input_dict, defaults_dict):
"""
Add the entries in defaults dict into input_dict if they don't exist in input_dict
This is based on the accepted answer at
http://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth
:param dict input_dict: The dict to be updated
:param dict defaults_dict: Dict containing the defaults for entries in input_dict
:return: updated dict
:rtype: dict
"""
for key, value in defaults_dict.iteritems():
if key == 'patients':
print('Cannot default `patients`.')
continue
if isinstance(value, dict):
if key not in input_dict or input_dict[key] is None:
# User didn't specify anython for the tool, but the entry was still in there so we
# just copy over the whole defaults dict
input_dict[key] = value
else:
r = _add_default_entries(input_dict.get(key, {}), value)
input_dict[key] = r
else:
# Only write if not in input_dict
if key not in input_dict or input_dict[key] is None:
# Either the user didn't have the entry, or had it without a value
input_dict[key] = value
return input_dict | python | def _add_default_entries(input_dict, defaults_dict):
"""
Add the entries in defaults dict into input_dict if they don't exist in input_dict
This is based on the accepted answer at
http://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth
:param dict input_dict: The dict to be updated
:param dict defaults_dict: Dict containing the defaults for entries in input_dict
:return: updated dict
:rtype: dict
"""
for key, value in defaults_dict.iteritems():
if key == 'patients':
print('Cannot default `patients`.')
continue
if isinstance(value, dict):
if key not in input_dict or input_dict[key] is None:
# User didn't specify anython for the tool, but the entry was still in there so we
# just copy over the whole defaults dict
input_dict[key] = value
else:
r = _add_default_entries(input_dict.get(key, {}), value)
input_dict[key] = r
else:
# Only write if not in input_dict
if key not in input_dict or input_dict[key] is None:
# Either the user didn't have the entry, or had it without a value
input_dict[key] = value
return input_dict | [
"def",
"_add_default_entries",
"(",
"input_dict",
",",
"defaults_dict",
")",
":",
"for",
"key",
",",
"value",
"in",
"defaults_dict",
".",
"iteritems",
"(",
")",
":",
"if",
"key",
"==",
"'patients'",
":",
"print",
"(",
"'Cannot default `patients`.'",
")",
"continue",
"if",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"if",
"key",
"not",
"in",
"input_dict",
"or",
"input_dict",
"[",
"key",
"]",
"is",
"None",
":",
"input_dict",
"[",
"key",
"]",
"=",
"value",
"else",
":",
"r",
"=",
"_add_default_entries",
"(",
"input_dict",
".",
"get",
"(",
"key",
",",
"{",
"}",
")",
",",
"value",
")",
"input_dict",
"[",
"key",
"]",
"=",
"r",
"else",
":",
"if",
"key",
"not",
"in",
"input_dict",
"or",
"input_dict",
"[",
"key",
"]",
"is",
"None",
":",
"input_dict",
"[",
"key",
"]",
"=",
"value",
"return",
"input_dict"
] | Add the entries in defaults dict into input_dict if they don't exist in input_dict
This is based on the accepted answer at
http://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth
:param dict input_dict: The dict to be updated
:param dict defaults_dict: Dict containing the defaults for entries in input_dict
:return: updated dict
:rtype: dict | [
"Add",
"the",
"entries",
"in",
"defaults",
"dict",
"into",
"input_dict",
"if",
"they",
"don",
"t",
"exist",
"in",
"input_dict"
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/pipeline/ProTECT.py#L151-L180 | train |
BD2KGenomics/protect | src/protect/pipeline/ProTECT.py | _process_group | def _process_group(input_group, required_group, groupname, append_subgroups=None):
"""
Process one group from the input yaml. Ensure it has the required entries. If there is a
subgroup that should be processed and then appended to the rest of the subgroups in that group,
handle it accordingly.
:param dict input_group: The dict of values of the input group
:param dict required_group: The dict of required values for the input group
:param str groupname: The name of the group being processed
:param list append_subgroups: list of subgroups to append to each, other subgroup in this group
:return: processed dict of entries for the group
:rtype: dict
"""
if append_subgroups is None:
append_subgroups = []
tool_options = {}
for key in input_group:
_ensure_set_contains(input_group[key], required_group.get(key, {}), groupname + '::' + key)
if key in append_subgroups:
continue
else:
tool_options[key] = input_group[key]
for key in input_group:
if key in append_subgroups:
continue
else:
for yek in append_subgroups:
tool_options[key].update(input_group[yek])
return tool_options | python | def _process_group(input_group, required_group, groupname, append_subgroups=None):
"""
Process one group from the input yaml. Ensure it has the required entries. If there is a
subgroup that should be processed and then appended to the rest of the subgroups in that group,
handle it accordingly.
:param dict input_group: The dict of values of the input group
:param dict required_group: The dict of required values for the input group
:param str groupname: The name of the group being processed
:param list append_subgroups: list of subgroups to append to each, other subgroup in this group
:return: processed dict of entries for the group
:rtype: dict
"""
if append_subgroups is None:
append_subgroups = []
tool_options = {}
for key in input_group:
_ensure_set_contains(input_group[key], required_group.get(key, {}), groupname + '::' + key)
if key in append_subgroups:
continue
else:
tool_options[key] = input_group[key]
for key in input_group:
if key in append_subgroups:
continue
else:
for yek in append_subgroups:
tool_options[key].update(input_group[yek])
return tool_options | [
"def",
"_process_group",
"(",
"input_group",
",",
"required_group",
",",
"groupname",
",",
"append_subgroups",
"=",
"None",
")",
":",
"if",
"append_subgroups",
"is",
"None",
":",
"append_subgroups",
"=",
"[",
"]",
"tool_options",
"=",
"{",
"}",
"for",
"key",
"in",
"input_group",
":",
"_ensure_set_contains",
"(",
"input_group",
"[",
"key",
"]",
",",
"required_group",
".",
"get",
"(",
"key",
",",
"{",
"}",
")",
",",
"groupname",
"+",
"'::'",
"+",
"key",
")",
"if",
"key",
"in",
"append_subgroups",
":",
"continue",
"else",
":",
"tool_options",
"[",
"key",
"]",
"=",
"input_group",
"[",
"key",
"]",
"for",
"key",
"in",
"input_group",
":",
"if",
"key",
"in",
"append_subgroups",
":",
"continue",
"else",
":",
"for",
"yek",
"in",
"append_subgroups",
":",
"tool_options",
"[",
"key",
"]",
".",
"update",
"(",
"input_group",
"[",
"yek",
"]",
")",
"return",
"tool_options"
] | Process one group from the input yaml. Ensure it has the required entries. If there is a
subgroup that should be processed and then appended to the rest of the subgroups in that group,
handle it accordingly.
:param dict input_group: The dict of values of the input group
:param dict required_group: The dict of required values for the input group
:param str groupname: The name of the group being processed
:param list append_subgroups: list of subgroups to append to each, other subgroup in this group
:return: processed dict of entries for the group
:rtype: dict | [
"Process",
"one",
"group",
"from",
"the",
"input",
"yaml",
".",
"Ensure",
"it",
"has",
"the",
"required",
"entries",
".",
"If",
"there",
"is",
"a",
"subgroup",
"that",
"should",
"be",
"processed",
"and",
"then",
"appended",
"to",
"the",
"rest",
"of",
"the",
"subgroups",
"in",
"that",
"group",
"handle",
"it",
"accordingly",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/pipeline/ProTECT.py#L183-L211 | train |
BD2KGenomics/protect | src/protect/pipeline/ProTECT.py | get_fastq_2 | def get_fastq_2(job, patient_id, sample_type, fastq_1):
"""
For a path to a fastq_1 file, return a fastq_2 file with the same prefix and naming scheme.
:param str patient_id: The patient_id
:param str sample_type: The sample type of the file
:param str fastq_1: The path to the fastq_1 file
:return: The path to the fastq_2 file
:rtype: str
"""
prefix, extn = fastq_1, 'temp'
final_extn = ''
while extn:
prefix, extn = os.path.splitext(prefix)
final_extn = extn + final_extn
if prefix.endswith('1'):
prefix = prefix[:-1]
job.fileStore.logToMaster('"%s" prefix for "%s" determined to be %s'
% (sample_type, patient_id, prefix))
break
else:
raise ParameterError('Could not determine prefix from provided fastq (%s). Is it '
'of the form <fastq_prefix>1.[fq/fastq][.gz]?' % fastq_1)
if final_extn not in ['.fastq', '.fastq.gz', '.fq', '.fq.gz']:
raise ParameterError('If and _2 fastq path is not specified, only .fastq, .fq or '
'their gzippped extensions are accepted. Could not process '
'%s:%s.' % (patient_id, sample_type + '_fastq_1'))
return ''.join([prefix, '2', final_extn]) | python | def get_fastq_2(job, patient_id, sample_type, fastq_1):
"""
For a path to a fastq_1 file, return a fastq_2 file with the same prefix and naming scheme.
:param str patient_id: The patient_id
:param str sample_type: The sample type of the file
:param str fastq_1: The path to the fastq_1 file
:return: The path to the fastq_2 file
:rtype: str
"""
prefix, extn = fastq_1, 'temp'
final_extn = ''
while extn:
prefix, extn = os.path.splitext(prefix)
final_extn = extn + final_extn
if prefix.endswith('1'):
prefix = prefix[:-1]
job.fileStore.logToMaster('"%s" prefix for "%s" determined to be %s'
% (sample_type, patient_id, prefix))
break
else:
raise ParameterError('Could not determine prefix from provided fastq (%s). Is it '
'of the form <fastq_prefix>1.[fq/fastq][.gz]?' % fastq_1)
if final_extn not in ['.fastq', '.fastq.gz', '.fq', '.fq.gz']:
raise ParameterError('If and _2 fastq path is not specified, only .fastq, .fq or '
'their gzippped extensions are accepted. Could not process '
'%s:%s.' % (patient_id, sample_type + '_fastq_1'))
return ''.join([prefix, '2', final_extn]) | [
"def",
"get_fastq_2",
"(",
"job",
",",
"patient_id",
",",
"sample_type",
",",
"fastq_1",
")",
":",
"prefix",
",",
"extn",
"=",
"fastq_1",
",",
"'temp'",
"final_extn",
"=",
"''",
"while",
"extn",
":",
"prefix",
",",
"extn",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"prefix",
")",
"final_extn",
"=",
"extn",
"+",
"final_extn",
"if",
"prefix",
".",
"endswith",
"(",
"'1'",
")",
":",
"prefix",
"=",
"prefix",
"[",
":",
"-",
"1",
"]",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'\"%s\" prefix for \"%s\" determined to be %s'",
"%",
"(",
"sample_type",
",",
"patient_id",
",",
"prefix",
")",
")",
"break",
"else",
":",
"raise",
"ParameterError",
"(",
"'Could not determine prefix from provided fastq (%s). Is it '",
"'of the form <fastq_prefix>1.[fq/fastq][.gz]?'",
"%",
"fastq_1",
")",
"if",
"final_extn",
"not",
"in",
"[",
"'.fastq'",
",",
"'.fastq.gz'",
",",
"'.fq'",
",",
"'.fq.gz'",
"]",
":",
"raise",
"ParameterError",
"(",
"'If and _2 fastq path is not specified, only .fastq, .fq or '",
"'their gzippped extensions are accepted. Could not process '",
"'%s:%s.'",
"%",
"(",
"patient_id",
",",
"sample_type",
"+",
"'_fastq_1'",
")",
")",
"return",
"''",
".",
"join",
"(",
"[",
"prefix",
",",
"'2'",
",",
"final_extn",
"]",
")"
] | For a path to a fastq_1 file, return a fastq_2 file with the same prefix and naming scheme.
:param str patient_id: The patient_id
:param str sample_type: The sample type of the file
:param str fastq_1: The path to the fastq_1 file
:return: The path to the fastq_2 file
:rtype: str | [
"For",
"a",
"path",
"to",
"a",
"fastq_1",
"file",
"return",
"a",
"fastq_2",
"file",
"with",
"the",
"same",
"prefix",
"and",
"naming",
"scheme",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/pipeline/ProTECT.py#L214-L241 | train |
BD2KGenomics/protect | src/protect/pipeline/ProTECT.py | parse_config_file | def parse_config_file(job, config_file, max_cores=None):
"""
Parse the config file and spawn a ProTECT job for every input sample.
:param str config_file: Path to the input config file
:param int max_cores: The maximum cores to use for any single high-compute job.
"""
sample_set, univ_options, processed_tool_inputs = _parse_config_file(job, config_file,
max_cores)
# Start a job for each sample in the sample set
for patient_id in sample_set.keys():
job.addFollowOnJobFn(launch_protect, sample_set[patient_id], univ_options,
processed_tool_inputs)
return None | python | def parse_config_file(job, config_file, max_cores=None):
"""
Parse the config file and spawn a ProTECT job for every input sample.
:param str config_file: Path to the input config file
:param int max_cores: The maximum cores to use for any single high-compute job.
"""
sample_set, univ_options, processed_tool_inputs = _parse_config_file(job, config_file,
max_cores)
# Start a job for each sample in the sample set
for patient_id in sample_set.keys():
job.addFollowOnJobFn(launch_protect, sample_set[patient_id], univ_options,
processed_tool_inputs)
return None | [
"def",
"parse_config_file",
"(",
"job",
",",
"config_file",
",",
"max_cores",
"=",
"None",
")",
":",
"sample_set",
",",
"univ_options",
",",
"processed_tool_inputs",
"=",
"_parse_config_file",
"(",
"job",
",",
"config_file",
",",
"max_cores",
")",
"for",
"patient_id",
"in",
"sample_set",
".",
"keys",
"(",
")",
":",
"job",
".",
"addFollowOnJobFn",
"(",
"launch_protect",
",",
"sample_set",
"[",
"patient_id",
"]",
",",
"univ_options",
",",
"processed_tool_inputs",
")",
"return",
"None"
] | Parse the config file and spawn a ProTECT job for every input sample.
:param str config_file: Path to the input config file
:param int max_cores: The maximum cores to use for any single high-compute job. | [
"Parse",
"the",
"config",
"file",
"and",
"spawn",
"a",
"ProTECT",
"job",
"for",
"every",
"input",
"sample",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/pipeline/ProTECT.py#L436-L449 | train |
BD2KGenomics/protect | src/protect/pipeline/ProTECT.py | get_all_tool_inputs | def get_all_tool_inputs(job, tools, outer_key='', mutation_caller_list=None):
"""
Iterate through all the tool options and download required files from their remote locations.
:param dict tools: A dict of dicts of all tools, and their options
:param str outer_key: If this is being called recursively, what was the outer dict called?
:param list mutation_caller_list: A list of mutation caller keys to append the indexes to.
:return: The fully resolved tool dictionary
:rtype: dict
"""
for tool in tools:
for option in tools[tool]:
if isinstance(tools[tool][option], dict):
tools[tool][option] = get_all_tool_inputs(
job, {option: tools[tool][option]},
outer_key=':'.join([outer_key, tool]).lstrip(':'))[option]
else:
# If a file is of the type file, vcf, tar or fasta, it needs to be downloaded from
# S3 if reqd, then written to job store.
if option.split('_')[-1] in ['file', 'vcf', 'index', 'fasta', 'fai', 'idx', 'dict',
'tbi', 'beds', 'gtf', 'config']:
tools[tool][option] = job.addChildJobFn(
get_pipeline_inputs, ':'.join([outer_key, tool, option]).lstrip(':'),
tools[tool][option]).rv()
elif option == 'version':
tools[tool][option] = str(tools[tool][option])
if mutation_caller_list is not None:
# Guaranteed to occur only in the outermost loop
indexes = tools.pop('indexes')
indexes['chromosomes'] = parse_chromosome_string(job, indexes['chromosomes'])
for mutation_caller in mutation_caller_list:
if mutation_caller == 'indexes':
continue
tools[mutation_caller].update(indexes)
return tools | python | def get_all_tool_inputs(job, tools, outer_key='', mutation_caller_list=None):
"""
Iterate through all the tool options and download required files from their remote locations.
:param dict tools: A dict of dicts of all tools, and their options
:param str outer_key: If this is being called recursively, what was the outer dict called?
:param list mutation_caller_list: A list of mutation caller keys to append the indexes to.
:return: The fully resolved tool dictionary
:rtype: dict
"""
for tool in tools:
for option in tools[tool]:
if isinstance(tools[tool][option], dict):
tools[tool][option] = get_all_tool_inputs(
job, {option: tools[tool][option]},
outer_key=':'.join([outer_key, tool]).lstrip(':'))[option]
else:
# If a file is of the type file, vcf, tar or fasta, it needs to be downloaded from
# S3 if reqd, then written to job store.
if option.split('_')[-1] in ['file', 'vcf', 'index', 'fasta', 'fai', 'idx', 'dict',
'tbi', 'beds', 'gtf', 'config']:
tools[tool][option] = job.addChildJobFn(
get_pipeline_inputs, ':'.join([outer_key, tool, option]).lstrip(':'),
tools[tool][option]).rv()
elif option == 'version':
tools[tool][option] = str(tools[tool][option])
if mutation_caller_list is not None:
# Guaranteed to occur only in the outermost loop
indexes = tools.pop('indexes')
indexes['chromosomes'] = parse_chromosome_string(job, indexes['chromosomes'])
for mutation_caller in mutation_caller_list:
if mutation_caller == 'indexes':
continue
tools[mutation_caller].update(indexes)
return tools | [
"def",
"get_all_tool_inputs",
"(",
"job",
",",
"tools",
",",
"outer_key",
"=",
"''",
",",
"mutation_caller_list",
"=",
"None",
")",
":",
"for",
"tool",
"in",
"tools",
":",
"for",
"option",
"in",
"tools",
"[",
"tool",
"]",
":",
"if",
"isinstance",
"(",
"tools",
"[",
"tool",
"]",
"[",
"option",
"]",
",",
"dict",
")",
":",
"tools",
"[",
"tool",
"]",
"[",
"option",
"]",
"=",
"get_all_tool_inputs",
"(",
"job",
",",
"{",
"option",
":",
"tools",
"[",
"tool",
"]",
"[",
"option",
"]",
"}",
",",
"outer_key",
"=",
"':'",
".",
"join",
"(",
"[",
"outer_key",
",",
"tool",
"]",
")",
".",
"lstrip",
"(",
"':'",
")",
")",
"[",
"option",
"]",
"else",
":",
"if",
"option",
".",
"split",
"(",
"'_'",
")",
"[",
"-",
"1",
"]",
"in",
"[",
"'file'",
",",
"'vcf'",
",",
"'index'",
",",
"'fasta'",
",",
"'fai'",
",",
"'idx'",
",",
"'dict'",
",",
"'tbi'",
",",
"'beds'",
",",
"'gtf'",
",",
"'config'",
"]",
":",
"tools",
"[",
"tool",
"]",
"[",
"option",
"]",
"=",
"job",
".",
"addChildJobFn",
"(",
"get_pipeline_inputs",
",",
"':'",
".",
"join",
"(",
"[",
"outer_key",
",",
"tool",
",",
"option",
"]",
")",
".",
"lstrip",
"(",
"':'",
")",
",",
"tools",
"[",
"tool",
"]",
"[",
"option",
"]",
")",
".",
"rv",
"(",
")",
"elif",
"option",
"==",
"'version'",
":",
"tools",
"[",
"tool",
"]",
"[",
"option",
"]",
"=",
"str",
"(",
"tools",
"[",
"tool",
"]",
"[",
"option",
"]",
")",
"if",
"mutation_caller_list",
"is",
"not",
"None",
":",
"indexes",
"=",
"tools",
".",
"pop",
"(",
"'indexes'",
")",
"indexes",
"[",
"'chromosomes'",
"]",
"=",
"parse_chromosome_string",
"(",
"job",
",",
"indexes",
"[",
"'chromosomes'",
"]",
")",
"for",
"mutation_caller",
"in",
"mutation_caller_list",
":",
"if",
"mutation_caller",
"==",
"'indexes'",
":",
"continue",
"tools",
"[",
"mutation_caller",
"]",
".",
"update",
"(",
"indexes",
")",
"return",
"tools"
] | Iterate through all the tool options and download required files from their remote locations.
:param dict tools: A dict of dicts of all tools, and their options
:param str outer_key: If this is being called recursively, what was the outer dict called?
:param list mutation_caller_list: A list of mutation caller keys to append the indexes to.
:return: The fully resolved tool dictionary
:rtype: dict | [
"Iterate",
"through",
"all",
"the",
"tool",
"options",
"and",
"download",
"required",
"files",
"from",
"their",
"remote",
"locations",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/pipeline/ProTECT.py#L740-L774 | train |
BD2KGenomics/protect | src/protect/pipeline/ProTECT.py | get_pipeline_inputs | def get_pipeline_inputs(job, input_flag, input_file, encryption_key=None, per_file_encryption=False,
gdc_download_token=None):
"""
Get the input file from s3 or disk and write to file store.
:param str input_flag: The name of the flag
:param str input_file: The value passed in the config file
:param str encryption_key: Path to the encryption key if encrypted with sse-c
:param bool per_file_encryption: If encrypted, was the file encrypted using the per-file method?
:param str gdc_download_token: The download token to obtain files from the GDC
:return: fsID for the file
:rtype: toil.fileStore.FileID
"""
work_dir = os.getcwd()
job.fileStore.logToMaster('Obtaining file (%s) to the file job store' % input_flag)
if input_file.startswith(('http', 'https', 'ftp')):
input_file = get_file_from_url(job, input_file, encryption_key=encryption_key,
per_file_encryption=per_file_encryption,
write_to_jobstore=True)
elif input_file.startswith(('S3', 's3')):
input_file = get_file_from_s3(job, input_file, encryption_key=encryption_key,
per_file_encryption=per_file_encryption,
write_to_jobstore=True)
elif input_file.startswith(('GDC', 'gdc')):
input_file = get_file_from_gdc(job, input_file, gdc_download_token=gdc_download_token,
write_to_jobstore=True)
else:
assert os.path.exists(input_file), 'Bogus Input : ' + input_file
input_file = job.fileStore.writeGlobalFile(input_file)
return input_file | python | def get_pipeline_inputs(job, input_flag, input_file, encryption_key=None, per_file_encryption=False,
gdc_download_token=None):
"""
Get the input file from s3 or disk and write to file store.
:param str input_flag: The name of the flag
:param str input_file: The value passed in the config file
:param str encryption_key: Path to the encryption key if encrypted with sse-c
:param bool per_file_encryption: If encrypted, was the file encrypted using the per-file method?
:param str gdc_download_token: The download token to obtain files from the GDC
:return: fsID for the file
:rtype: toil.fileStore.FileID
"""
work_dir = os.getcwd()
job.fileStore.logToMaster('Obtaining file (%s) to the file job store' % input_flag)
if input_file.startswith(('http', 'https', 'ftp')):
input_file = get_file_from_url(job, input_file, encryption_key=encryption_key,
per_file_encryption=per_file_encryption,
write_to_jobstore=True)
elif input_file.startswith(('S3', 's3')):
input_file = get_file_from_s3(job, input_file, encryption_key=encryption_key,
per_file_encryption=per_file_encryption,
write_to_jobstore=True)
elif input_file.startswith(('GDC', 'gdc')):
input_file = get_file_from_gdc(job, input_file, gdc_download_token=gdc_download_token,
write_to_jobstore=True)
else:
assert os.path.exists(input_file), 'Bogus Input : ' + input_file
input_file = job.fileStore.writeGlobalFile(input_file)
return input_file | [
"def",
"get_pipeline_inputs",
"(",
"job",
",",
"input_flag",
",",
"input_file",
",",
"encryption_key",
"=",
"None",
",",
"per_file_encryption",
"=",
"False",
",",
"gdc_download_token",
"=",
"None",
")",
":",
"work_dir",
"=",
"os",
".",
"getcwd",
"(",
")",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Obtaining file (%s) to the file job store'",
"%",
"input_flag",
")",
"if",
"input_file",
".",
"startswith",
"(",
"(",
"'http'",
",",
"'https'",
",",
"'ftp'",
")",
")",
":",
"input_file",
"=",
"get_file_from_url",
"(",
"job",
",",
"input_file",
",",
"encryption_key",
"=",
"encryption_key",
",",
"per_file_encryption",
"=",
"per_file_encryption",
",",
"write_to_jobstore",
"=",
"True",
")",
"elif",
"input_file",
".",
"startswith",
"(",
"(",
"'S3'",
",",
"'s3'",
")",
")",
":",
"input_file",
"=",
"get_file_from_s3",
"(",
"job",
",",
"input_file",
",",
"encryption_key",
"=",
"encryption_key",
",",
"per_file_encryption",
"=",
"per_file_encryption",
",",
"write_to_jobstore",
"=",
"True",
")",
"elif",
"input_file",
".",
"startswith",
"(",
"(",
"'GDC'",
",",
"'gdc'",
")",
")",
":",
"input_file",
"=",
"get_file_from_gdc",
"(",
"job",
",",
"input_file",
",",
"gdc_download_token",
"=",
"gdc_download_token",
",",
"write_to_jobstore",
"=",
"True",
")",
"else",
":",
"assert",
"os",
".",
"path",
".",
"exists",
"(",
"input_file",
")",
",",
"'Bogus Input : '",
"+",
"input_file",
"input_file",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"input_file",
")",
"return",
"input_file"
] | Get the input file from s3 or disk and write to file store.
:param str input_flag: The name of the flag
:param str input_file: The value passed in the config file
:param str encryption_key: Path to the encryption key if encrypted with sse-c
:param bool per_file_encryption: If encrypted, was the file encrypted using the per-file method?
:param str gdc_download_token: The download token to obtain files from the GDC
:return: fsID for the file
:rtype: toil.fileStore.FileID | [
"Get",
"the",
"input",
"file",
"from",
"s3",
"or",
"disk",
"and",
"write",
"to",
"file",
"store",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/pipeline/ProTECT.py#L777-L806 | train |
BD2KGenomics/protect | src/protect/pipeline/ProTECT.py | prepare_samples | def prepare_samples(job, patient_dict, univ_options):
"""
Obtain the input files for the patient and write them to the file store.
:param dict patient_dict: The input fastq dict
patient_dict:
|- 'tumor_dna_fastq_[12]' OR 'tumor_dna_bam': str
|- 'tumor_rna_fastq_[12]' OR 'tumor_rna_bam': str
|- 'normal_dna_fastq_[12]' OR 'normal_dna_bam': str
|- 'mutation_vcf': str
|- 'hla_haplotype_files': str
+- 'patient_id': str
:param dict univ_options: Dict of universal options used by almost all tools
:return: Updated fastq dict
output_dict:
|- 'tumor_dna_fastq_[12]' OR 'tumor_dna_bam': fsID
|- 'tumor_rna_fastq_[12]' OR 'tumor_rna_bam': fsID
|- 'normal_dna_fastq_[12]' OR 'normal_dna_bam': fsID
|- 'mutation_vcf': fsID
|- 'hla_haplotype_files': fsId
+- 'patient_id': str
:rtype: dict
"""
job.fileStore.logToMaster('Downloading Inputs for %s' % univ_options['patient'])
# For each sample type, check if the prefix is an S3 link or a regular file
# Download S3 files.
output_dict = {}
for input_file in patient_dict:
if not input_file.endswith(('bam', 'bai', '_1', '_2', 'files', 'vcf', 'bedpe')):
output_dict[input_file] = patient_dict[input_file]
continue
output_dict[input_file] = get_pipeline_inputs(
job, ':'.join([univ_options['patient'], input_file]), patient_dict[input_file],
encryption_key=(univ_options['sse_key'] if patient_dict['ssec_encrypted'] else None),
per_file_encryption=univ_options['sse_key_is_master'],
gdc_download_token=univ_options['gdc_download_token'])
return output_dict | python | def prepare_samples(job, patient_dict, univ_options):
"""
Obtain the input files for the patient and write them to the file store.
:param dict patient_dict: The input fastq dict
patient_dict:
|- 'tumor_dna_fastq_[12]' OR 'tumor_dna_bam': str
|- 'tumor_rna_fastq_[12]' OR 'tumor_rna_bam': str
|- 'normal_dna_fastq_[12]' OR 'normal_dna_bam': str
|- 'mutation_vcf': str
|- 'hla_haplotype_files': str
+- 'patient_id': str
:param dict univ_options: Dict of universal options used by almost all tools
:return: Updated fastq dict
output_dict:
|- 'tumor_dna_fastq_[12]' OR 'tumor_dna_bam': fsID
|- 'tumor_rna_fastq_[12]' OR 'tumor_rna_bam': fsID
|- 'normal_dna_fastq_[12]' OR 'normal_dna_bam': fsID
|- 'mutation_vcf': fsID
|- 'hla_haplotype_files': fsId
+- 'patient_id': str
:rtype: dict
"""
job.fileStore.logToMaster('Downloading Inputs for %s' % univ_options['patient'])
# For each sample type, check if the prefix is an S3 link or a regular file
# Download S3 files.
output_dict = {}
for input_file in patient_dict:
if not input_file.endswith(('bam', 'bai', '_1', '_2', 'files', 'vcf', 'bedpe')):
output_dict[input_file] = patient_dict[input_file]
continue
output_dict[input_file] = get_pipeline_inputs(
job, ':'.join([univ_options['patient'], input_file]), patient_dict[input_file],
encryption_key=(univ_options['sse_key'] if patient_dict['ssec_encrypted'] else None),
per_file_encryption=univ_options['sse_key_is_master'],
gdc_download_token=univ_options['gdc_download_token'])
return output_dict | [
"def",
"prepare_samples",
"(",
"job",
",",
"patient_dict",
",",
"univ_options",
")",
":",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Downloading Inputs for %s'",
"%",
"univ_options",
"[",
"'patient'",
"]",
")",
"output_dict",
"=",
"{",
"}",
"for",
"input_file",
"in",
"patient_dict",
":",
"if",
"not",
"input_file",
".",
"endswith",
"(",
"(",
"'bam'",
",",
"'bai'",
",",
"'_1'",
",",
"'_2'",
",",
"'files'",
",",
"'vcf'",
",",
"'bedpe'",
")",
")",
":",
"output_dict",
"[",
"input_file",
"]",
"=",
"patient_dict",
"[",
"input_file",
"]",
"continue",
"output_dict",
"[",
"input_file",
"]",
"=",
"get_pipeline_inputs",
"(",
"job",
",",
"':'",
".",
"join",
"(",
"[",
"univ_options",
"[",
"'patient'",
"]",
",",
"input_file",
"]",
")",
",",
"patient_dict",
"[",
"input_file",
"]",
",",
"encryption_key",
"=",
"(",
"univ_options",
"[",
"'sse_key'",
"]",
"if",
"patient_dict",
"[",
"'ssec_encrypted'",
"]",
"else",
"None",
")",
",",
"per_file_encryption",
"=",
"univ_options",
"[",
"'sse_key_is_master'",
"]",
",",
"gdc_download_token",
"=",
"univ_options",
"[",
"'gdc_download_token'",
"]",
")",
"return",
"output_dict"
] | Obtain the input files for the patient and write them to the file store.
:param dict patient_dict: The input fastq dict
patient_dict:
|- 'tumor_dna_fastq_[12]' OR 'tumor_dna_bam': str
|- 'tumor_rna_fastq_[12]' OR 'tumor_rna_bam': str
|- 'normal_dna_fastq_[12]' OR 'normal_dna_bam': str
|- 'mutation_vcf': str
|- 'hla_haplotype_files': str
+- 'patient_id': str
:param dict univ_options: Dict of universal options used by almost all tools
:return: Updated fastq dict
output_dict:
|- 'tumor_dna_fastq_[12]' OR 'tumor_dna_bam': fsID
|- 'tumor_rna_fastq_[12]' OR 'tumor_rna_bam': fsID
|- 'normal_dna_fastq_[12]' OR 'normal_dna_bam': fsID
|- 'mutation_vcf': fsID
|- 'hla_haplotype_files': fsId
+- 'patient_id': str
:rtype: dict | [
"Obtain",
"the",
"input",
"files",
"for",
"the",
"patient",
"and",
"write",
"them",
"to",
"the",
"file",
"store",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/pipeline/ProTECT.py#L809-L845 | train |
BD2KGenomics/protect | src/protect/pipeline/ProTECT.py | get_patient_bams | def get_patient_bams(job, patient_dict, sample_type, univ_options, bwa_options, mutect_options):
"""
Convenience function to return the bam and its index in the correct format for a sample type.
:param dict patient_dict: dict of patient info
:param str sample_type: 'tumor_rna', 'tumor_dna', 'normal_dna'
:param dict univ_options: Dict of universal options used by almost all tools
:param dict bwa_options: Options specific to bwa
:param dict bwa_options: Options specific to mutect
:return: formatted dict of bam and bai
:rtype: dict
"""
output_dict = {}
if 'dna' in sample_type:
sample_info = 'fix_pg_sorted'
prefix = sample_type + '_' + sample_info
else:
sample_info = 'genome_sorted'
prefix = 'rna_' + sample_info
if sample_type + '_bam' in patient_dict['gdc_inputs']:
output_dict[prefix + '.bam'] = patient_dict[sample_type + '_bam'][0]
output_dict[prefix + '.bam.bai'] = patient_dict[sample_type + '_bam'][1]
elif sample_type + '_bai' in patient_dict:
output_dict[prefix + '.bam'] = patient_dict[sample_type + '_bam']
output_dict[prefix + '.bam.bai'] = patient_dict[sample_type + '_bai']
else:
from protect.alignment.dna import index_bamfile, index_disk
output_job = job.wrapJobFn(index_bamfile, patient_dict[sample_type + '_bam'],
'rna' if sample_type == 'tumor_rna' else sample_type,
univ_options, bwa_options['samtools'],
sample_info=sample_info, export=False,
disk=PromisedRequirement(index_disk,
patient_dict[sample_type + '_bam']))
job.addChild(output_job)
output_dict = output_job.rv()
if sample_type == 'tumor_rna':
if 'tumor_rna_transcriptome_bam' not in patient_dict:
patient_dict['tumor_rna_transcriptome_bam'] = None
return{'rna_genome': output_dict,
'rna_transcriptome.bam': patient_dict['tumor_rna_transcriptome_bam']}
else:
return output_dict | python | def get_patient_bams(job, patient_dict, sample_type, univ_options, bwa_options, mutect_options):
"""
Convenience function to return the bam and its index in the correct format for a sample type.
:param dict patient_dict: dict of patient info
:param str sample_type: 'tumor_rna', 'tumor_dna', 'normal_dna'
:param dict univ_options: Dict of universal options used by almost all tools
:param dict bwa_options: Options specific to bwa
:param dict bwa_options: Options specific to mutect
:return: formatted dict of bam and bai
:rtype: dict
"""
output_dict = {}
if 'dna' in sample_type:
sample_info = 'fix_pg_sorted'
prefix = sample_type + '_' + sample_info
else:
sample_info = 'genome_sorted'
prefix = 'rna_' + sample_info
if sample_type + '_bam' in patient_dict['gdc_inputs']:
output_dict[prefix + '.bam'] = patient_dict[sample_type + '_bam'][0]
output_dict[prefix + '.bam.bai'] = patient_dict[sample_type + '_bam'][1]
elif sample_type + '_bai' in patient_dict:
output_dict[prefix + '.bam'] = patient_dict[sample_type + '_bam']
output_dict[prefix + '.bam.bai'] = patient_dict[sample_type + '_bai']
else:
from protect.alignment.dna import index_bamfile, index_disk
output_job = job.wrapJobFn(index_bamfile, patient_dict[sample_type + '_bam'],
'rna' if sample_type == 'tumor_rna' else sample_type,
univ_options, bwa_options['samtools'],
sample_info=sample_info, export=False,
disk=PromisedRequirement(index_disk,
patient_dict[sample_type + '_bam']))
job.addChild(output_job)
output_dict = output_job.rv()
if sample_type == 'tumor_rna':
if 'tumor_rna_transcriptome_bam' not in patient_dict:
patient_dict['tumor_rna_transcriptome_bam'] = None
return{'rna_genome': output_dict,
'rna_transcriptome.bam': patient_dict['tumor_rna_transcriptome_bam']}
else:
return output_dict | [
"def",
"get_patient_bams",
"(",
"job",
",",
"patient_dict",
",",
"sample_type",
",",
"univ_options",
",",
"bwa_options",
",",
"mutect_options",
")",
":",
"output_dict",
"=",
"{",
"}",
"if",
"'dna'",
"in",
"sample_type",
":",
"sample_info",
"=",
"'fix_pg_sorted'",
"prefix",
"=",
"sample_type",
"+",
"'_'",
"+",
"sample_info",
"else",
":",
"sample_info",
"=",
"'genome_sorted'",
"prefix",
"=",
"'rna_'",
"+",
"sample_info",
"if",
"sample_type",
"+",
"'_bam'",
"in",
"patient_dict",
"[",
"'gdc_inputs'",
"]",
":",
"output_dict",
"[",
"prefix",
"+",
"'.bam'",
"]",
"=",
"patient_dict",
"[",
"sample_type",
"+",
"'_bam'",
"]",
"[",
"0",
"]",
"output_dict",
"[",
"prefix",
"+",
"'.bam.bai'",
"]",
"=",
"patient_dict",
"[",
"sample_type",
"+",
"'_bam'",
"]",
"[",
"1",
"]",
"elif",
"sample_type",
"+",
"'_bai'",
"in",
"patient_dict",
":",
"output_dict",
"[",
"prefix",
"+",
"'.bam'",
"]",
"=",
"patient_dict",
"[",
"sample_type",
"+",
"'_bam'",
"]",
"output_dict",
"[",
"prefix",
"+",
"'.bam.bai'",
"]",
"=",
"patient_dict",
"[",
"sample_type",
"+",
"'_bai'",
"]",
"else",
":",
"from",
"protect",
".",
"alignment",
".",
"dna",
"import",
"index_bamfile",
",",
"index_disk",
"output_job",
"=",
"job",
".",
"wrapJobFn",
"(",
"index_bamfile",
",",
"patient_dict",
"[",
"sample_type",
"+",
"'_bam'",
"]",
",",
"'rna'",
"if",
"sample_type",
"==",
"'tumor_rna'",
"else",
"sample_type",
",",
"univ_options",
",",
"bwa_options",
"[",
"'samtools'",
"]",
",",
"sample_info",
"=",
"sample_info",
",",
"export",
"=",
"False",
",",
"disk",
"=",
"PromisedRequirement",
"(",
"index_disk",
",",
"patient_dict",
"[",
"sample_type",
"+",
"'_bam'",
"]",
")",
")",
"job",
".",
"addChild",
"(",
"output_job",
")",
"output_dict",
"=",
"output_job",
".",
"rv",
"(",
")",
"if",
"sample_type",
"==",
"'tumor_rna'",
":",
"if",
"'tumor_rna_transcriptome_bam'",
"not",
"in",
"patient_dict",
":",
"patient_dict",
"[",
"'tumor_rna_transcriptome_bam'",
"]",
"=",
"None",
"return",
"{",
"'rna_genome'",
":",
"output_dict",
",",
"'rna_transcriptome.bam'",
":",
"patient_dict",
"[",
"'tumor_rna_transcriptome_bam'",
"]",
"}",
"else",
":",
"return",
"output_dict"
] | Convenience function to return the bam and its index in the correct format for a sample type.
:param dict patient_dict: dict of patient info
:param str sample_type: 'tumor_rna', 'tumor_dna', 'normal_dna'
:param dict univ_options: Dict of universal options used by almost all tools
:param dict bwa_options: Options specific to bwa
:param dict bwa_options: Options specific to mutect
:return: formatted dict of bam and bai
:rtype: dict | [
"Convenience",
"function",
"to",
"return",
"the",
"bam",
"and",
"its",
"index",
"in",
"the",
"correct",
"format",
"for",
"a",
"sample",
"type",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/pipeline/ProTECT.py#L860-L901 | train |
BD2KGenomics/protect | src/protect/pipeline/ProTECT.py | get_patient_vcf | def get_patient_vcf(job, patient_dict):
"""
Convenience function to get the vcf from the patient dict
:param dict patient_dict: dict of patient info
:return: The vcf
:rtype: toil.fileStore.FileID
"""
temp = job.fileStore.readGlobalFile(patient_dict['mutation_vcf'],
os.path.join(os.getcwd(), 'temp.gz'))
if is_gzipfile(temp):
outfile = job.fileStore.writeGlobalFile(gunzip(temp))
job.fileStore.deleteGlobalFile(patient_dict['mutation_vcf'])
else:
outfile = patient_dict['mutation_vcf']
return outfile | python | def get_patient_vcf(job, patient_dict):
"""
Convenience function to get the vcf from the patient dict
:param dict patient_dict: dict of patient info
:return: The vcf
:rtype: toil.fileStore.FileID
"""
temp = job.fileStore.readGlobalFile(patient_dict['mutation_vcf'],
os.path.join(os.getcwd(), 'temp.gz'))
if is_gzipfile(temp):
outfile = job.fileStore.writeGlobalFile(gunzip(temp))
job.fileStore.deleteGlobalFile(patient_dict['mutation_vcf'])
else:
outfile = patient_dict['mutation_vcf']
return outfile | [
"def",
"get_patient_vcf",
"(",
"job",
",",
"patient_dict",
")",
":",
"temp",
"=",
"job",
".",
"fileStore",
".",
"readGlobalFile",
"(",
"patient_dict",
"[",
"'mutation_vcf'",
"]",
",",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"getcwd",
"(",
")",
",",
"'temp.gz'",
")",
")",
"if",
"is_gzipfile",
"(",
"temp",
")",
":",
"outfile",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"gunzip",
"(",
"temp",
")",
")",
"job",
".",
"fileStore",
".",
"deleteGlobalFile",
"(",
"patient_dict",
"[",
"'mutation_vcf'",
"]",
")",
"else",
":",
"outfile",
"=",
"patient_dict",
"[",
"'mutation_vcf'",
"]",
"return",
"outfile"
] | Convenience function to get the vcf from the patient dict
:param dict patient_dict: dict of patient info
:return: The vcf
:rtype: toil.fileStore.FileID | [
"Convenience",
"function",
"to",
"get",
"the",
"vcf",
"from",
"the",
"patient",
"dict"
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/pipeline/ProTECT.py#L904-L919 | train |
BD2KGenomics/protect | src/protect/pipeline/ProTECT.py | get_patient_mhc_haplotype | def get_patient_mhc_haplotype(job, patient_dict):
"""
Convenience function to get the mhc haplotype from the patient dict
:param dict patient_dict: dict of patient info
:return: The MHCI and MHCII haplotypes
:rtype: toil.fileStore.FileID
"""
haplotype_archive = job.fileStore.readGlobalFile(patient_dict['hla_haplotype_files'])
haplotype_archive = untargz(haplotype_archive, os.getcwd())
output_dict = {}
for filename in 'mhci_alleles.list', 'mhcii_alleles.list':
output_dict[filename] = job.fileStore.writeGlobalFile(os.path.join(haplotype_archive,
filename))
return output_dict | python | def get_patient_mhc_haplotype(job, patient_dict):
"""
Convenience function to get the mhc haplotype from the patient dict
:param dict patient_dict: dict of patient info
:return: The MHCI and MHCII haplotypes
:rtype: toil.fileStore.FileID
"""
haplotype_archive = job.fileStore.readGlobalFile(patient_dict['hla_haplotype_files'])
haplotype_archive = untargz(haplotype_archive, os.getcwd())
output_dict = {}
for filename in 'mhci_alleles.list', 'mhcii_alleles.list':
output_dict[filename] = job.fileStore.writeGlobalFile(os.path.join(haplotype_archive,
filename))
return output_dict | [
"def",
"get_patient_mhc_haplotype",
"(",
"job",
",",
"patient_dict",
")",
":",
"haplotype_archive",
"=",
"job",
".",
"fileStore",
".",
"readGlobalFile",
"(",
"patient_dict",
"[",
"'hla_haplotype_files'",
"]",
")",
"haplotype_archive",
"=",
"untargz",
"(",
"haplotype_archive",
",",
"os",
".",
"getcwd",
"(",
")",
")",
"output_dict",
"=",
"{",
"}",
"for",
"filename",
"in",
"'mhci_alleles.list'",
",",
"'mhcii_alleles.list'",
":",
"output_dict",
"[",
"filename",
"]",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"haplotype_archive",
",",
"filename",
")",
")",
"return",
"output_dict"
] | Convenience function to get the mhc haplotype from the patient dict
:param dict patient_dict: dict of patient info
:return: The MHCI and MHCII haplotypes
:rtype: toil.fileStore.FileID | [
"Convenience",
"function",
"to",
"get",
"the",
"mhc",
"haplotype",
"from",
"the",
"patient",
"dict"
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/pipeline/ProTECT.py#L940-L954 | train |
BD2KGenomics/protect | src/protect/pipeline/ProTECT.py | get_patient_expression | def get_patient_expression(job, patient_dict):
"""
Convenience function to get the expression from the patient dict
:param dict patient_dict: dict of patient info
:return: The gene and isoform expression
:rtype: toil.fileStore.FileID
"""
expression_archive = job.fileStore.readGlobalFile(patient_dict['expression_files'])
expression_archive = untargz(expression_archive, os.getcwd())
output_dict = {}
for filename in 'rsem.genes.results', 'rsem.isoforms.results':
output_dict[filename] = job.fileStore.writeGlobalFile(os.path.join(expression_archive,
filename))
return output_dict | python | def get_patient_expression(job, patient_dict):
"""
Convenience function to get the expression from the patient dict
:param dict patient_dict: dict of patient info
:return: The gene and isoform expression
:rtype: toil.fileStore.FileID
"""
expression_archive = job.fileStore.readGlobalFile(patient_dict['expression_files'])
expression_archive = untargz(expression_archive, os.getcwd())
output_dict = {}
for filename in 'rsem.genes.results', 'rsem.isoforms.results':
output_dict[filename] = job.fileStore.writeGlobalFile(os.path.join(expression_archive,
filename))
return output_dict | [
"def",
"get_patient_expression",
"(",
"job",
",",
"patient_dict",
")",
":",
"expression_archive",
"=",
"job",
".",
"fileStore",
".",
"readGlobalFile",
"(",
"patient_dict",
"[",
"'expression_files'",
"]",
")",
"expression_archive",
"=",
"untargz",
"(",
"expression_archive",
",",
"os",
".",
"getcwd",
"(",
")",
")",
"output_dict",
"=",
"{",
"}",
"for",
"filename",
"in",
"'rsem.genes.results'",
",",
"'rsem.isoforms.results'",
":",
"output_dict",
"[",
"filename",
"]",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"expression_archive",
",",
"filename",
")",
")",
"return",
"output_dict"
] | Convenience function to get the expression from the patient dict
:param dict patient_dict: dict of patient info
:return: The gene and isoform expression
:rtype: toil.fileStore.FileID | [
"Convenience",
"function",
"to",
"get",
"the",
"expression",
"from",
"the",
"patient",
"dict"
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/pipeline/ProTECT.py#L957-L971 | train |
BD2KGenomics/protect | src/protect/pipeline/ProTECT.py | generate_config_file | def generate_config_file():
"""
Generate a config file for a ProTECT run on hg19.
:return: None
"""
shutil.copy(os.path.join(os.path.dirname(__file__), 'input_parameters.yaml'),
os.path.join(os.getcwd(), 'ProTECT_config.yaml')) | python | def generate_config_file():
"""
Generate a config file for a ProTECT run on hg19.
:return: None
"""
shutil.copy(os.path.join(os.path.dirname(__file__), 'input_parameters.yaml'),
os.path.join(os.getcwd(), 'ProTECT_config.yaml')) | [
"def",
"generate_config_file",
"(",
")",
":",
"shutil",
".",
"copy",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
",",
"'input_parameters.yaml'",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"getcwd",
"(",
")",
",",
"'ProTECT_config.yaml'",
")",
")"
] | Generate a config file for a ProTECT run on hg19.
:return: None | [
"Generate",
"a",
"config",
"file",
"for",
"a",
"ProTECT",
"run",
"on",
"hg19",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/pipeline/ProTECT.py#L974-L981 | train |
BD2KGenomics/protect | src/protect/pipeline/ProTECT.py | main | def main():
"""
This is the main function for ProTECT.
"""
parser = argparse.ArgumentParser(prog='ProTECT',
description='Prediction of T-Cell Epitopes for Cancer Therapy',
epilog='Contact Arjun Rao ([email protected]) if you encounter '
'any problems while running ProTECT')
inputs = parser.add_mutually_exclusive_group(required=True)
inputs.add_argument('--config_file', dest='config_file', help='Config file to be used in the '
'run.', type=str, default=None)
inputs.add_argument('--generate_config', dest='generate_config', help='Generate a config file '
'in the current directory that is pre-filled with references and flags for '
'an hg19 run.', action='store_true', default=False)
parser.add_argument('--max-cores-per-job', dest='max_cores', help='Maximum cores to use per '
'job. Aligners and Haplotypers ask for cores dependent on the machine that '
'the launchpad gets assigned to -- In a heterogeneous cluster, this can '
'lead to problems. This value should be set to the number of cpus on the '
'smallest node in a cluster.',
type=int, required=False, default=None)
# We parse the args once to see if the user has asked for a config file to be generated. In
# this case, we don't need a jobstore. To handle the case where Toil arguments are passed to
# ProTECT, we parse known args, and if the used specified config_file instead of generate_config
# we re-parse the arguments with the added Toil parser.
params, others = parser.parse_known_args()
if params.generate_config:
generate_config_file()
else:
Job.Runner.addToilOptions(parser)
params = parser.parse_args()
params.config_file = os.path.abspath(params.config_file)
if params.maxCores:
if not params.max_cores:
params.max_cores = int(params.maxCores)
else:
if params.max_cores > int(params.maxCores):
print("The value provided to max-cores-per-job (%s) was greater than that "
"provided to maxCores (%s). Setting max-cores-per-job = maxCores." %
(params.max_cores, params.maxCores), file=sys.stderr)
params.max_cores = int(params.maxCores)
start = Job.wrapJobFn(parse_config_file, params.config_file, params.max_cores)
Job.Runner.startToil(start, params)
return None | python | def main():
"""
This is the main function for ProTECT.
"""
parser = argparse.ArgumentParser(prog='ProTECT',
description='Prediction of T-Cell Epitopes for Cancer Therapy',
epilog='Contact Arjun Rao ([email protected]) if you encounter '
'any problems while running ProTECT')
inputs = parser.add_mutually_exclusive_group(required=True)
inputs.add_argument('--config_file', dest='config_file', help='Config file to be used in the '
'run.', type=str, default=None)
inputs.add_argument('--generate_config', dest='generate_config', help='Generate a config file '
'in the current directory that is pre-filled with references and flags for '
'an hg19 run.', action='store_true', default=False)
parser.add_argument('--max-cores-per-job', dest='max_cores', help='Maximum cores to use per '
'job. Aligners and Haplotypers ask for cores dependent on the machine that '
'the launchpad gets assigned to -- In a heterogeneous cluster, this can '
'lead to problems. This value should be set to the number of cpus on the '
'smallest node in a cluster.',
type=int, required=False, default=None)
# We parse the args once to see if the user has asked for a config file to be generated. In
# this case, we don't need a jobstore. To handle the case where Toil arguments are passed to
# ProTECT, we parse known args, and if the used specified config_file instead of generate_config
# we re-parse the arguments with the added Toil parser.
params, others = parser.parse_known_args()
if params.generate_config:
generate_config_file()
else:
Job.Runner.addToilOptions(parser)
params = parser.parse_args()
params.config_file = os.path.abspath(params.config_file)
if params.maxCores:
if not params.max_cores:
params.max_cores = int(params.maxCores)
else:
if params.max_cores > int(params.maxCores):
print("The value provided to max-cores-per-job (%s) was greater than that "
"provided to maxCores (%s). Setting max-cores-per-job = maxCores." %
(params.max_cores, params.maxCores), file=sys.stderr)
params.max_cores = int(params.maxCores)
start = Job.wrapJobFn(parse_config_file, params.config_file, params.max_cores)
Job.Runner.startToil(start, params)
return None | [
"def",
"main",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"prog",
"=",
"'ProTECT'",
",",
"description",
"=",
"'Prediction of T-Cell Epitopes for Cancer Therapy'",
",",
"epilog",
"=",
"'Contact Arjun Rao ([email protected]) if you encounter '",
"'any problems while running ProTECT'",
")",
"inputs",
"=",
"parser",
".",
"add_mutually_exclusive_group",
"(",
"required",
"=",
"True",
")",
"inputs",
".",
"add_argument",
"(",
"'--config_file'",
",",
"dest",
"=",
"'config_file'",
",",
"help",
"=",
"'Config file to be used in the '",
"'run.'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"None",
")",
"inputs",
".",
"add_argument",
"(",
"'--generate_config'",
",",
"dest",
"=",
"'generate_config'",
",",
"help",
"=",
"'Generate a config file '",
"'in the current directory that is pre-filled with references and flags for '",
"'an hg19 run.'",
",",
"action",
"=",
"'store_true'",
",",
"default",
"=",
"False",
")",
"parser",
".",
"add_argument",
"(",
"'--max-cores-per-job'",
",",
"dest",
"=",
"'max_cores'",
",",
"help",
"=",
"'Maximum cores to use per '",
"'job. Aligners and Haplotypers ask for cores dependent on the machine that '",
"'the launchpad gets assigned to -- In a heterogeneous cluster, this can '",
"'lead to problems. This value should be set to the number of cpus on the '",
"'smallest node in a cluster.'",
",",
"type",
"=",
"int",
",",
"required",
"=",
"False",
",",
"default",
"=",
"None",
")",
"params",
",",
"others",
"=",
"parser",
".",
"parse_known_args",
"(",
")",
"if",
"params",
".",
"generate_config",
":",
"generate_config_file",
"(",
")",
"else",
":",
"Job",
".",
"Runner",
".",
"addToilOptions",
"(",
"parser",
")",
"params",
"=",
"parser",
".",
"parse_args",
"(",
")",
"params",
".",
"config_file",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"params",
".",
"config_file",
")",
"if",
"params",
".",
"maxCores",
":",
"if",
"not",
"params",
".",
"max_cores",
":",
"params",
".",
"max_cores",
"=",
"int",
"(",
"params",
".",
"maxCores",
")",
"else",
":",
"if",
"params",
".",
"max_cores",
">",
"int",
"(",
"params",
".",
"maxCores",
")",
":",
"print",
"(",
"\"The value provided to max-cores-per-job (%s) was greater than that \"",
"\"provided to maxCores (%s). Setting max-cores-per-job = maxCores.\"",
"%",
"(",
"params",
".",
"max_cores",
",",
"params",
".",
"maxCores",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"params",
".",
"max_cores",
"=",
"int",
"(",
"params",
".",
"maxCores",
")",
"start",
"=",
"Job",
".",
"wrapJobFn",
"(",
"parse_config_file",
",",
"params",
".",
"config_file",
",",
"params",
".",
"max_cores",
")",
"Job",
".",
"Runner",
".",
"startToil",
"(",
"start",
",",
"params",
")",
"return",
"None"
] | This is the main function for ProTECT. | [
"This",
"is",
"the",
"main",
"function",
"for",
"ProTECT",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/pipeline/ProTECT.py#L984-L1026 | train |
jinglemansweep/lcdproc | lcdproc/server.py | Server.poll | def poll(self):
"""
Poll
Check for a non-response string generated by LCDd and return any string read.
LCDd generates strings for key presses, menu events & screen visibility changes.
"""
if select.select([self.tn], [], [], 0) == ([self.tn], [], []):
response = urllib.unquote(self.tn.read_until(b"\n").decode())
if self.debug: print "Telnet Poll: %s" % (response[:-1])
# TODO Keep track of which screen is displayed
return response
else:
return None | python | def poll(self):
"""
Poll
Check for a non-response string generated by LCDd and return any string read.
LCDd generates strings for key presses, menu events & screen visibility changes.
"""
if select.select([self.tn], [], [], 0) == ([self.tn], [], []):
response = urllib.unquote(self.tn.read_until(b"\n").decode())
if self.debug: print "Telnet Poll: %s" % (response[:-1])
# TODO Keep track of which screen is displayed
return response
else:
return None | [
"def",
"poll",
"(",
"self",
")",
":",
"if",
"select",
".",
"select",
"(",
"[",
"self",
".",
"tn",
"]",
",",
"[",
"]",
",",
"[",
"]",
",",
"0",
")",
"==",
"(",
"[",
"self",
".",
"tn",
"]",
",",
"[",
"]",
",",
"[",
"]",
")",
":",
"response",
"=",
"urllib",
".",
"unquote",
"(",
"self",
".",
"tn",
".",
"read_until",
"(",
"b\"\\n\"",
")",
".",
"decode",
"(",
")",
")",
"if",
"self",
".",
"debug",
":",
"print",
"\"Telnet Poll: %s\"",
"%",
"(",
"response",
"[",
":",
"-",
"1",
"]",
")",
"return",
"response",
"else",
":",
"return",
"None"
] | Poll
Check for a non-response string generated by LCDd and return any string read.
LCDd generates strings for key presses, menu events & screen visibility changes. | [
"Poll",
"Check",
"for",
"a",
"non",
"-",
"response",
"string",
"generated",
"by",
"LCDd",
"and",
"return",
"any",
"string",
"read",
".",
"LCDd",
"generates",
"strings",
"for",
"key",
"presses",
"menu",
"events",
"&",
"screen",
"visibility",
"changes",
"."
] | 973628fc326177c9deaf3f2e1a435159eb565ae0 | https://github.com/jinglemansweep/lcdproc/blob/973628fc326177c9deaf3f2e1a435159eb565ae0/lcdproc/server.py#L61-L74 | train |
APSL/django-kaio | kaio/management/commands/generate_ini.py | module_to_dict | def module_to_dict(module, omittable=lambda k: k.startswith('_')):
"""
Converts a module namespace to a Python dictionary. Used by get_settings_diff.
"""
return dict([(k, repr(v)) for k, v in module.__dict__.items() if not omittable(k)]) | python | def module_to_dict(module, omittable=lambda k: k.startswith('_')):
"""
Converts a module namespace to a Python dictionary. Used by get_settings_diff.
"""
return dict([(k, repr(v)) for k, v in module.__dict__.items() if not omittable(k)]) | [
"def",
"module_to_dict",
"(",
"module",
",",
"omittable",
"=",
"lambda",
"k",
":",
"k",
".",
"startswith",
"(",
"'_'",
")",
")",
":",
"return",
"dict",
"(",
"[",
"(",
"k",
",",
"repr",
"(",
"v",
")",
")",
"for",
"k",
",",
"v",
"in",
"module",
".",
"__dict__",
".",
"items",
"(",
")",
"if",
"not",
"omittable",
"(",
"k",
")",
"]",
")"
] | Converts a module namespace to a Python dictionary. Used by get_settings_diff. | [
"Converts",
"a",
"module",
"namespace",
"to",
"a",
"Python",
"dictionary",
".",
"Used",
"by",
"get_settings_diff",
"."
] | b74b109bcfba31d973723bc419e2c95d190b80b7 | https://github.com/APSL/django-kaio/blob/b74b109bcfba31d973723bc419e2c95d190b80b7/kaio/management/commands/generate_ini.py#L15-L19 | train |
BD2KGenomics/protect | src/protect/mutation_annotation/snpeff.py | run_snpeff | def run_snpeff(job, merged_mutation_file, univ_options, snpeff_options):
"""
Run snpeff on an input vcf.
:param toil.fileStore.FileID merged_mutation_file: fsID for input vcf
:param dict univ_options: Dict of universal options used by almost all tools
:param dict snpeff_options: Options specific to snpeff
:return: fsID for the snpeffed vcf
:rtype: toil.fileStore.FileID
"""
work_dir = os.getcwd()
input_files = {
'merged_mutations.vcf': merged_mutation_file,
'snpeff_index.tar.gz': snpeff_options['index']}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
input_files['snpeff_index'] = untargz(input_files['snpeff_index.tar.gz'], work_dir)
input_files = {key: docker_path(path) for key, path in input_files.items()}
parameters = ['eff',
'-dataDir', input_files['snpeff_index'],
'-c', '/'.join([input_files['snpeff_index'],
'snpEff_' + univ_options['ref'] + '_gencode.config']),
'-no-intergenic',
'-no-downstream',
'-no-upstream',
# '-canon',
'-noStats',
univ_options['ref'] + '_gencode',
input_files['merged_mutations.vcf']]
xmx = snpeff_options['java_Xmx'] if snpeff_options['java_Xmx'] else univ_options['java_Xmx']
with open('/'.join([work_dir, 'mutations.vcf']), 'w') as snpeff_file:
docker_call(tool='snpeff', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], java_xmx=xmx, outfile=snpeff_file,
tool_version=snpeff_options['version'])
output_file = job.fileStore.writeGlobalFile(snpeff_file.name)
export_results(job, output_file, snpeff_file.name, univ_options, subfolder='mutations/snpeffed')
job.fileStore.logToMaster('Ran snpeff on %s successfully' % univ_options['patient'])
return output_file | python | def run_snpeff(job, merged_mutation_file, univ_options, snpeff_options):
"""
Run snpeff on an input vcf.
:param toil.fileStore.FileID merged_mutation_file: fsID for input vcf
:param dict univ_options: Dict of universal options used by almost all tools
:param dict snpeff_options: Options specific to snpeff
:return: fsID for the snpeffed vcf
:rtype: toil.fileStore.FileID
"""
work_dir = os.getcwd()
input_files = {
'merged_mutations.vcf': merged_mutation_file,
'snpeff_index.tar.gz': snpeff_options['index']}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
input_files['snpeff_index'] = untargz(input_files['snpeff_index.tar.gz'], work_dir)
input_files = {key: docker_path(path) for key, path in input_files.items()}
parameters = ['eff',
'-dataDir', input_files['snpeff_index'],
'-c', '/'.join([input_files['snpeff_index'],
'snpEff_' + univ_options['ref'] + '_gencode.config']),
'-no-intergenic',
'-no-downstream',
'-no-upstream',
# '-canon',
'-noStats',
univ_options['ref'] + '_gencode',
input_files['merged_mutations.vcf']]
xmx = snpeff_options['java_Xmx'] if snpeff_options['java_Xmx'] else univ_options['java_Xmx']
with open('/'.join([work_dir, 'mutations.vcf']), 'w') as snpeff_file:
docker_call(tool='snpeff', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], java_xmx=xmx, outfile=snpeff_file,
tool_version=snpeff_options['version'])
output_file = job.fileStore.writeGlobalFile(snpeff_file.name)
export_results(job, output_file, snpeff_file.name, univ_options, subfolder='mutations/snpeffed')
job.fileStore.logToMaster('Ran snpeff on %s successfully' % univ_options['patient'])
return output_file | [
"def",
"run_snpeff",
"(",
"job",
",",
"merged_mutation_file",
",",
"univ_options",
",",
"snpeff_options",
")",
":",
"work_dir",
"=",
"os",
".",
"getcwd",
"(",
")",
"input_files",
"=",
"{",
"'merged_mutations.vcf'",
":",
"merged_mutation_file",
",",
"'snpeff_index.tar.gz'",
":",
"snpeff_options",
"[",
"'index'",
"]",
"}",
"input_files",
"=",
"get_files_from_filestore",
"(",
"job",
",",
"input_files",
",",
"work_dir",
",",
"docker",
"=",
"False",
")",
"input_files",
"[",
"'snpeff_index'",
"]",
"=",
"untargz",
"(",
"input_files",
"[",
"'snpeff_index.tar.gz'",
"]",
",",
"work_dir",
")",
"input_files",
"=",
"{",
"key",
":",
"docker_path",
"(",
"path",
")",
"for",
"key",
",",
"path",
"in",
"input_files",
".",
"items",
"(",
")",
"}",
"parameters",
"=",
"[",
"'eff'",
",",
"'-dataDir'",
",",
"input_files",
"[",
"'snpeff_index'",
"]",
",",
"'-c'",
",",
"'/'",
".",
"join",
"(",
"[",
"input_files",
"[",
"'snpeff_index'",
"]",
",",
"'snpEff_'",
"+",
"univ_options",
"[",
"'ref'",
"]",
"+",
"'_gencode.config'",
"]",
")",
",",
"'-no-intergenic'",
",",
"'-no-downstream'",
",",
"'-no-upstream'",
",",
"'-noStats'",
",",
"univ_options",
"[",
"'ref'",
"]",
"+",
"'_gencode'",
",",
"input_files",
"[",
"'merged_mutations.vcf'",
"]",
"]",
"xmx",
"=",
"snpeff_options",
"[",
"'java_Xmx'",
"]",
"if",
"snpeff_options",
"[",
"'java_Xmx'",
"]",
"else",
"univ_options",
"[",
"'java_Xmx'",
"]",
"with",
"open",
"(",
"'/'",
".",
"join",
"(",
"[",
"work_dir",
",",
"'mutations.vcf'",
"]",
")",
",",
"'w'",
")",
"as",
"snpeff_file",
":",
"docker_call",
"(",
"tool",
"=",
"'snpeff'",
",",
"tool_parameters",
"=",
"parameters",
",",
"work_dir",
"=",
"work_dir",
",",
"dockerhub",
"=",
"univ_options",
"[",
"'dockerhub'",
"]",
",",
"java_xmx",
"=",
"xmx",
",",
"outfile",
"=",
"snpeff_file",
",",
"tool_version",
"=",
"snpeff_options",
"[",
"'version'",
"]",
")",
"output_file",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"snpeff_file",
".",
"name",
")",
"export_results",
"(",
"job",
",",
"output_file",
",",
"snpeff_file",
".",
"name",
",",
"univ_options",
",",
"subfolder",
"=",
"'mutations/snpeffed'",
")",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Ran snpeff on %s successfully'",
"%",
"univ_options",
"[",
"'patient'",
"]",
")",
"return",
"output_file"
] | Run snpeff on an input vcf.
:param toil.fileStore.FileID merged_mutation_file: fsID for input vcf
:param dict univ_options: Dict of universal options used by almost all tools
:param dict snpeff_options: Options specific to snpeff
:return: fsID for the snpeffed vcf
:rtype: toil.fileStore.FileID | [
"Run",
"snpeff",
"on",
"an",
"input",
"vcf",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_annotation/snpeff.py#L32-L69 | train |
paypal/baler | baler/baler.py | paths_in_directory | def paths_in_directory(input_directory):
"""
Generate a list of all files in input_directory, each as a list containing path components.
"""
paths = []
for base_path, directories, filenames in os.walk(input_directory):
relative_path = os.path.relpath(base_path, input_directory)
path_components = relative_path.split(os.sep)
if path_components[0] == ".":
path_components = path_components[1:]
if path_components and path_components[0].startswith("."):
# hidden dir
continue
path_components = filter(bool, path_components) # remove empty components
for filename in filenames:
if filename.startswith("."):
# hidden file
continue
paths.append(path_components + [filename])
return paths | python | def paths_in_directory(input_directory):
"""
Generate a list of all files in input_directory, each as a list containing path components.
"""
paths = []
for base_path, directories, filenames in os.walk(input_directory):
relative_path = os.path.relpath(base_path, input_directory)
path_components = relative_path.split(os.sep)
if path_components[0] == ".":
path_components = path_components[1:]
if path_components and path_components[0].startswith("."):
# hidden dir
continue
path_components = filter(bool, path_components) # remove empty components
for filename in filenames:
if filename.startswith("."):
# hidden file
continue
paths.append(path_components + [filename])
return paths | [
"def",
"paths_in_directory",
"(",
"input_directory",
")",
":",
"paths",
"=",
"[",
"]",
"for",
"base_path",
",",
"directories",
",",
"filenames",
"in",
"os",
".",
"walk",
"(",
"input_directory",
")",
":",
"relative_path",
"=",
"os",
".",
"path",
".",
"relpath",
"(",
"base_path",
",",
"input_directory",
")",
"path_components",
"=",
"relative_path",
".",
"split",
"(",
"os",
".",
"sep",
")",
"if",
"path_components",
"[",
"0",
"]",
"==",
"\".\"",
":",
"path_components",
"=",
"path_components",
"[",
"1",
":",
"]",
"if",
"path_components",
"and",
"path_components",
"[",
"0",
"]",
".",
"startswith",
"(",
"\".\"",
")",
":",
"continue",
"path_components",
"=",
"filter",
"(",
"bool",
",",
"path_components",
")",
"for",
"filename",
"in",
"filenames",
":",
"if",
"filename",
".",
"startswith",
"(",
"\".\"",
")",
":",
"continue",
"paths",
".",
"append",
"(",
"path_components",
"+",
"[",
"filename",
"]",
")",
"return",
"paths"
] | Generate a list of all files in input_directory, each as a list containing path components. | [
"Generate",
"a",
"list",
"of",
"all",
"files",
"in",
"input_directory",
"each",
"as",
"a",
"list",
"containing",
"path",
"components",
"."
] | db4f09dd2c7729b2df5268c87ad3b4cb43396abf | https://github.com/paypal/baler/blob/db4f09dd2c7729b2df5268c87ad3b4cb43396abf/baler/baler.py#L22-L41 | train |
BD2KGenomics/protect | src/protect/addons/assess_car_t_validity.py | run_car_t_validity_assessment | def run_car_t_validity_assessment(job, rsem_files, univ_options, reports_options):
"""
A wrapper for assess_car_t_validity.
:param dict rsem_files: Results from running rsem
:param dict univ_options: Dict of universal options used by almost all tools
:param dict reports_options: Options specific to reporting modules
:return: The results of running assess_car_t_validity
:rtype: toil.fileStore.FileID
"""
return job.addChildJobFn(assess_car_t_validity, rsem_files['rsem.genes.results'],
univ_options, reports_options).rv() | python | def run_car_t_validity_assessment(job, rsem_files, univ_options, reports_options):
"""
A wrapper for assess_car_t_validity.
:param dict rsem_files: Results from running rsem
:param dict univ_options: Dict of universal options used by almost all tools
:param dict reports_options: Options specific to reporting modules
:return: The results of running assess_car_t_validity
:rtype: toil.fileStore.FileID
"""
return job.addChildJobFn(assess_car_t_validity, rsem_files['rsem.genes.results'],
univ_options, reports_options).rv() | [
"def",
"run_car_t_validity_assessment",
"(",
"job",
",",
"rsem_files",
",",
"univ_options",
",",
"reports_options",
")",
":",
"return",
"job",
".",
"addChildJobFn",
"(",
"assess_car_t_validity",
",",
"rsem_files",
"[",
"'rsem.genes.results'",
"]",
",",
"univ_options",
",",
"reports_options",
")",
".",
"rv",
"(",
")"
] | A wrapper for assess_car_t_validity.
:param dict rsem_files: Results from running rsem
:param dict univ_options: Dict of universal options used by almost all tools
:param dict reports_options: Options specific to reporting modules
:return: The results of running assess_car_t_validity
:rtype: toil.fileStore.FileID | [
"A",
"wrapper",
"for",
"assess_car_t_validity",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/addons/assess_car_t_validity.py#L25-L36 | train |
BD2KGenomics/protect | src/protect/alignment/dna.py | align_dna | def align_dna(job, fastqs, sample_type, univ_options, bwa_options):
"""
A wrapper for the entire dna alignment subgraph.
:param list fastqs: The input fastqs for alignment
:param str sample_type: Description of the sample to inject into the filename
:param dict univ_options: Dict of universal options used by almost all tools
:param dict bwa_options: Options specific to bwa
:return: Dict containing output bam and bai
output_files:
|- '<sample_type>_fix_pg_sorted.bam': fsID
+- '<sample_type>_fix_pg_sorted.bam.bai': fsID
:rtype: dict
"""
# The mkdup and regroup steps use picard that allots heap space using the Xmx key in the
# univ_options dictionary. This should reflect in the job allotment. Since We want all these
# jobs to occur on the same node, we ened to give them all the same memory requirements.
bwa = job.wrapJobFn(run_bwa, fastqs, sample_type, univ_options, bwa_options,
disk=PromisedRequirement(bwa_disk, fastqs, bwa_options['index']),
memory=univ_options['java_Xmx'],
cores=bwa_options['n'])
sam2bam = job.wrapJobFn(bam_conversion, bwa.rv(), sample_type, univ_options,
bwa_options['samtools'],
disk=PromisedRequirement(sam2bam_disk, bwa.rv()),
memory=univ_options['java_Xmx'])
# reheader takes the same disk as sam2bam so we can serialize this on the same worker.
reheader = job.wrapJobFn(fix_bam_header, sam2bam.rv(), sample_type, univ_options,
bwa_options['samtools'],
disk=PromisedRequirement(sam2bam_disk, bwa.rv()),
memory=univ_options['java_Xmx'])
regroup = job.wrapJobFn(add_readgroups, reheader.rv(), sample_type, univ_options,
bwa_options['picard'],
disk=PromisedRequirement(regroup_disk, reheader.rv()),
memory=univ_options['java_Xmx'])
mkdup = job.wrapJobFn(mark_duplicates, regroup.rv(), sample_type, univ_options,
bwa_options['picard'],
disk=PromisedRequirement(mkdup_disk, regroup.rv()),
memory=univ_options['java_Xmx'])
index = job.wrapJobFn(index_bamfile, mkdup.rv(), sample_type, univ_options,
bwa_options['samtools'], sample_info='fix_pg_sorted',
disk=PromisedRequirement(index_disk, mkdup.rv()),
memory=univ_options['java_Xmx'])
job.addChild(bwa)
bwa.addChild(sam2bam)
sam2bam.addChild(reheader)
reheader.addChild(regroup)
regroup.addChild(mkdup)
mkdup.addChild(index)
return index.rv() | python | def align_dna(job, fastqs, sample_type, univ_options, bwa_options):
"""
A wrapper for the entire dna alignment subgraph.
:param list fastqs: The input fastqs for alignment
:param str sample_type: Description of the sample to inject into the filename
:param dict univ_options: Dict of universal options used by almost all tools
:param dict bwa_options: Options specific to bwa
:return: Dict containing output bam and bai
output_files:
|- '<sample_type>_fix_pg_sorted.bam': fsID
+- '<sample_type>_fix_pg_sorted.bam.bai': fsID
:rtype: dict
"""
# The mkdup and regroup steps use picard that allots heap space using the Xmx key in the
# univ_options dictionary. This should reflect in the job allotment. Since We want all these
# jobs to occur on the same node, we ened to give them all the same memory requirements.
bwa = job.wrapJobFn(run_bwa, fastqs, sample_type, univ_options, bwa_options,
disk=PromisedRequirement(bwa_disk, fastqs, bwa_options['index']),
memory=univ_options['java_Xmx'],
cores=bwa_options['n'])
sam2bam = job.wrapJobFn(bam_conversion, bwa.rv(), sample_type, univ_options,
bwa_options['samtools'],
disk=PromisedRequirement(sam2bam_disk, bwa.rv()),
memory=univ_options['java_Xmx'])
# reheader takes the same disk as sam2bam so we can serialize this on the same worker.
reheader = job.wrapJobFn(fix_bam_header, sam2bam.rv(), sample_type, univ_options,
bwa_options['samtools'],
disk=PromisedRequirement(sam2bam_disk, bwa.rv()),
memory=univ_options['java_Xmx'])
regroup = job.wrapJobFn(add_readgroups, reheader.rv(), sample_type, univ_options,
bwa_options['picard'],
disk=PromisedRequirement(regroup_disk, reheader.rv()),
memory=univ_options['java_Xmx'])
mkdup = job.wrapJobFn(mark_duplicates, regroup.rv(), sample_type, univ_options,
bwa_options['picard'],
disk=PromisedRequirement(mkdup_disk, regroup.rv()),
memory=univ_options['java_Xmx'])
index = job.wrapJobFn(index_bamfile, mkdup.rv(), sample_type, univ_options,
bwa_options['samtools'], sample_info='fix_pg_sorted',
disk=PromisedRequirement(index_disk, mkdup.rv()),
memory=univ_options['java_Xmx'])
job.addChild(bwa)
bwa.addChild(sam2bam)
sam2bam.addChild(reheader)
reheader.addChild(regroup)
regroup.addChild(mkdup)
mkdup.addChild(index)
return index.rv() | [
"def",
"align_dna",
"(",
"job",
",",
"fastqs",
",",
"sample_type",
",",
"univ_options",
",",
"bwa_options",
")",
":",
"bwa",
"=",
"job",
".",
"wrapJobFn",
"(",
"run_bwa",
",",
"fastqs",
",",
"sample_type",
",",
"univ_options",
",",
"bwa_options",
",",
"disk",
"=",
"PromisedRequirement",
"(",
"bwa_disk",
",",
"fastqs",
",",
"bwa_options",
"[",
"'index'",
"]",
")",
",",
"memory",
"=",
"univ_options",
"[",
"'java_Xmx'",
"]",
",",
"cores",
"=",
"bwa_options",
"[",
"'n'",
"]",
")",
"sam2bam",
"=",
"job",
".",
"wrapJobFn",
"(",
"bam_conversion",
",",
"bwa",
".",
"rv",
"(",
")",
",",
"sample_type",
",",
"univ_options",
",",
"bwa_options",
"[",
"'samtools'",
"]",
",",
"disk",
"=",
"PromisedRequirement",
"(",
"sam2bam_disk",
",",
"bwa",
".",
"rv",
"(",
")",
")",
",",
"memory",
"=",
"univ_options",
"[",
"'java_Xmx'",
"]",
")",
"reheader",
"=",
"job",
".",
"wrapJobFn",
"(",
"fix_bam_header",
",",
"sam2bam",
".",
"rv",
"(",
")",
",",
"sample_type",
",",
"univ_options",
",",
"bwa_options",
"[",
"'samtools'",
"]",
",",
"disk",
"=",
"PromisedRequirement",
"(",
"sam2bam_disk",
",",
"bwa",
".",
"rv",
"(",
")",
")",
",",
"memory",
"=",
"univ_options",
"[",
"'java_Xmx'",
"]",
")",
"regroup",
"=",
"job",
".",
"wrapJobFn",
"(",
"add_readgroups",
",",
"reheader",
".",
"rv",
"(",
")",
",",
"sample_type",
",",
"univ_options",
",",
"bwa_options",
"[",
"'picard'",
"]",
",",
"disk",
"=",
"PromisedRequirement",
"(",
"regroup_disk",
",",
"reheader",
".",
"rv",
"(",
")",
")",
",",
"memory",
"=",
"univ_options",
"[",
"'java_Xmx'",
"]",
")",
"mkdup",
"=",
"job",
".",
"wrapJobFn",
"(",
"mark_duplicates",
",",
"regroup",
".",
"rv",
"(",
")",
",",
"sample_type",
",",
"univ_options",
",",
"bwa_options",
"[",
"'picard'",
"]",
",",
"disk",
"=",
"PromisedRequirement",
"(",
"mkdup_disk",
",",
"regroup",
".",
"rv",
"(",
")",
")",
",",
"memory",
"=",
"univ_options",
"[",
"'java_Xmx'",
"]",
")",
"index",
"=",
"job",
".",
"wrapJobFn",
"(",
"index_bamfile",
",",
"mkdup",
".",
"rv",
"(",
")",
",",
"sample_type",
",",
"univ_options",
",",
"bwa_options",
"[",
"'samtools'",
"]",
",",
"sample_info",
"=",
"'fix_pg_sorted'",
",",
"disk",
"=",
"PromisedRequirement",
"(",
"index_disk",
",",
"mkdup",
".",
"rv",
"(",
")",
")",
",",
"memory",
"=",
"univ_options",
"[",
"'java_Xmx'",
"]",
")",
"job",
".",
"addChild",
"(",
"bwa",
")",
"bwa",
".",
"addChild",
"(",
"sam2bam",
")",
"sam2bam",
".",
"addChild",
"(",
"reheader",
")",
"reheader",
".",
"addChild",
"(",
"regroup",
")",
"regroup",
".",
"addChild",
"(",
"mkdup",
")",
"mkdup",
".",
"addChild",
"(",
"index",
")",
"return",
"index",
".",
"rv",
"(",
")"
] | A wrapper for the entire dna alignment subgraph.
:param list fastqs: The input fastqs for alignment
:param str sample_type: Description of the sample to inject into the filename
:param dict univ_options: Dict of universal options used by almost all tools
:param dict bwa_options: Options specific to bwa
:return: Dict containing output bam and bai
output_files:
|- '<sample_type>_fix_pg_sorted.bam': fsID
+- '<sample_type>_fix_pg_sorted.bam.bai': fsID
:rtype: dict | [
"A",
"wrapper",
"for",
"the",
"entire",
"dna",
"alignment",
"subgraph",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/alignment/dna.py#L54-L103 | train |
BD2KGenomics/protect | src/protect/alignment/dna.py | run_bwa | def run_bwa(job, fastqs, sample_type, univ_options, bwa_options):
"""
Align a pair of fastqs with bwa.
:param list fastqs: The input fastqs for alignment
:param str sample_type: Description of the sample to inject into the filename
:param dict univ_options: Dict of universal options used by almost all tools
:param dict bwa_options: Options specific to bwa
:return: fsID for the generated sam
:rtype: toil.fileStore.FileID
"""
work_dir = os.getcwd()
input_files = {
'dna_1.fastq': fastqs[0],
'dna_2.fastq': fastqs[1],
'bwa_index.tar.gz': bwa_options['index']}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
# Handle gzipped file
gz = '.gz' if is_gzipfile(input_files['dna_1.fastq']) else ''
if gz:
for read_file in 'dna_1.fastq', 'dna_2.fastq':
os.symlink(read_file, read_file + gz)
input_files[read_file + gz] = input_files[read_file] + gz
# Untar the index
input_files['bwa_index'] = untargz(input_files['bwa_index.tar.gz'], work_dir)
input_files = {key: docker_path(path) for key, path in input_files.items()}
parameters = ['mem',
'-t', str(bwa_options['n']),
'-v', '1', # Don't print INFO messages to the stderr
'/'.join([input_files['bwa_index'], univ_options['ref']]),
input_files['dna_1.fastq' + gz],
input_files['dna_2.fastq' + gz]]
with open(''.join([work_dir, '/', sample_type, '.sam']), 'w') as samfile:
docker_call(tool='bwa', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], outfile=samfile,
tool_version=bwa_options['version'])
# samfile.name retains the path info
output_file = job.fileStore.writeGlobalFile(samfile.name)
job.fileStore.logToMaster('Ran bwa on %s:%s successfully'
% (univ_options['patient'], sample_type))
return output_file | python | def run_bwa(job, fastqs, sample_type, univ_options, bwa_options):
"""
Align a pair of fastqs with bwa.
:param list fastqs: The input fastqs for alignment
:param str sample_type: Description of the sample to inject into the filename
:param dict univ_options: Dict of universal options used by almost all tools
:param dict bwa_options: Options specific to bwa
:return: fsID for the generated sam
:rtype: toil.fileStore.FileID
"""
work_dir = os.getcwd()
input_files = {
'dna_1.fastq': fastqs[0],
'dna_2.fastq': fastqs[1],
'bwa_index.tar.gz': bwa_options['index']}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
# Handle gzipped file
gz = '.gz' if is_gzipfile(input_files['dna_1.fastq']) else ''
if gz:
for read_file in 'dna_1.fastq', 'dna_2.fastq':
os.symlink(read_file, read_file + gz)
input_files[read_file + gz] = input_files[read_file] + gz
# Untar the index
input_files['bwa_index'] = untargz(input_files['bwa_index.tar.gz'], work_dir)
input_files = {key: docker_path(path) for key, path in input_files.items()}
parameters = ['mem',
'-t', str(bwa_options['n']),
'-v', '1', # Don't print INFO messages to the stderr
'/'.join([input_files['bwa_index'], univ_options['ref']]),
input_files['dna_1.fastq' + gz],
input_files['dna_2.fastq' + gz]]
with open(''.join([work_dir, '/', sample_type, '.sam']), 'w') as samfile:
docker_call(tool='bwa', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], outfile=samfile,
tool_version=bwa_options['version'])
# samfile.name retains the path info
output_file = job.fileStore.writeGlobalFile(samfile.name)
job.fileStore.logToMaster('Ran bwa on %s:%s successfully'
% (univ_options['patient'], sample_type))
return output_file | [
"def",
"run_bwa",
"(",
"job",
",",
"fastqs",
",",
"sample_type",
",",
"univ_options",
",",
"bwa_options",
")",
":",
"work_dir",
"=",
"os",
".",
"getcwd",
"(",
")",
"input_files",
"=",
"{",
"'dna_1.fastq'",
":",
"fastqs",
"[",
"0",
"]",
",",
"'dna_2.fastq'",
":",
"fastqs",
"[",
"1",
"]",
",",
"'bwa_index.tar.gz'",
":",
"bwa_options",
"[",
"'index'",
"]",
"}",
"input_files",
"=",
"get_files_from_filestore",
"(",
"job",
",",
"input_files",
",",
"work_dir",
",",
"docker",
"=",
"False",
")",
"gz",
"=",
"'.gz'",
"if",
"is_gzipfile",
"(",
"input_files",
"[",
"'dna_1.fastq'",
"]",
")",
"else",
"''",
"if",
"gz",
":",
"for",
"read_file",
"in",
"'dna_1.fastq'",
",",
"'dna_2.fastq'",
":",
"os",
".",
"symlink",
"(",
"read_file",
",",
"read_file",
"+",
"gz",
")",
"input_files",
"[",
"read_file",
"+",
"gz",
"]",
"=",
"input_files",
"[",
"read_file",
"]",
"+",
"gz",
"input_files",
"[",
"'bwa_index'",
"]",
"=",
"untargz",
"(",
"input_files",
"[",
"'bwa_index.tar.gz'",
"]",
",",
"work_dir",
")",
"input_files",
"=",
"{",
"key",
":",
"docker_path",
"(",
"path",
")",
"for",
"key",
",",
"path",
"in",
"input_files",
".",
"items",
"(",
")",
"}",
"parameters",
"=",
"[",
"'mem'",
",",
"'-t'",
",",
"str",
"(",
"bwa_options",
"[",
"'n'",
"]",
")",
",",
"'-v'",
",",
"'1'",
",",
"'/'",
".",
"join",
"(",
"[",
"input_files",
"[",
"'bwa_index'",
"]",
",",
"univ_options",
"[",
"'ref'",
"]",
"]",
")",
",",
"input_files",
"[",
"'dna_1.fastq'",
"+",
"gz",
"]",
",",
"input_files",
"[",
"'dna_2.fastq'",
"+",
"gz",
"]",
"]",
"with",
"open",
"(",
"''",
".",
"join",
"(",
"[",
"work_dir",
",",
"'/'",
",",
"sample_type",
",",
"'.sam'",
"]",
")",
",",
"'w'",
")",
"as",
"samfile",
":",
"docker_call",
"(",
"tool",
"=",
"'bwa'",
",",
"tool_parameters",
"=",
"parameters",
",",
"work_dir",
"=",
"work_dir",
",",
"dockerhub",
"=",
"univ_options",
"[",
"'dockerhub'",
"]",
",",
"outfile",
"=",
"samfile",
",",
"tool_version",
"=",
"bwa_options",
"[",
"'version'",
"]",
")",
"output_file",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"samfile",
".",
"name",
")",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Ran bwa on %s:%s successfully'",
"%",
"(",
"univ_options",
"[",
"'patient'",
"]",
",",
"sample_type",
")",
")",
"return",
"output_file"
] | Align a pair of fastqs with bwa.
:param list fastqs: The input fastqs for alignment
:param str sample_type: Description of the sample to inject into the filename
:param dict univ_options: Dict of universal options used by almost all tools
:param dict bwa_options: Options specific to bwa
:return: fsID for the generated sam
:rtype: toil.fileStore.FileID | [
"Align",
"a",
"pair",
"of",
"fastqs",
"with",
"bwa",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/alignment/dna.py#L106-L147 | train |
BD2KGenomics/protect | src/protect/alignment/dna.py | bam_conversion | def bam_conversion(job, samfile, sample_type, univ_options, samtools_options):
"""
Convert a sam to a bam.
:param dict samfile: The input sam file
:param str sample_type: Description of the sample to inject into the filename
:param dict univ_options: Dict of universal options used by almost all tools
:param dict samtools_options: Options specific to samtools
:return: fsID for the generated bam
:rtype: toil.fileStore.FileID
"""
work_dir = os.getcwd()
input_files = {
sample_type + '.sam': samfile}
input_files = get_files_from_filestore(job, input_files, work_dir,
docker=True)
bamfile = '/'.join([work_dir, sample_type + '.bam'])
parameters = ['view',
'-bS',
'-o', docker_path(bamfile),
input_files[sample_type + '.sam']
]
docker_call(tool='samtools', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], tool_version=samtools_options['version'])
output_file = job.fileStore.writeGlobalFile(bamfile)
# The samfile is no longer useful so delete it
job.fileStore.deleteGlobalFile(samfile)
job.fileStore.logToMaster('Ran sam2bam on %s:%s successfully'
% (univ_options['patient'], sample_type))
return output_file | python | def bam_conversion(job, samfile, sample_type, univ_options, samtools_options):
"""
Convert a sam to a bam.
:param dict samfile: The input sam file
:param str sample_type: Description of the sample to inject into the filename
:param dict univ_options: Dict of universal options used by almost all tools
:param dict samtools_options: Options specific to samtools
:return: fsID for the generated bam
:rtype: toil.fileStore.FileID
"""
work_dir = os.getcwd()
input_files = {
sample_type + '.sam': samfile}
input_files = get_files_from_filestore(job, input_files, work_dir,
docker=True)
bamfile = '/'.join([work_dir, sample_type + '.bam'])
parameters = ['view',
'-bS',
'-o', docker_path(bamfile),
input_files[sample_type + '.sam']
]
docker_call(tool='samtools', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], tool_version=samtools_options['version'])
output_file = job.fileStore.writeGlobalFile(bamfile)
# The samfile is no longer useful so delete it
job.fileStore.deleteGlobalFile(samfile)
job.fileStore.logToMaster('Ran sam2bam on %s:%s successfully'
% (univ_options['patient'], sample_type))
return output_file | [
"def",
"bam_conversion",
"(",
"job",
",",
"samfile",
",",
"sample_type",
",",
"univ_options",
",",
"samtools_options",
")",
":",
"work_dir",
"=",
"os",
".",
"getcwd",
"(",
")",
"input_files",
"=",
"{",
"sample_type",
"+",
"'.sam'",
":",
"samfile",
"}",
"input_files",
"=",
"get_files_from_filestore",
"(",
"job",
",",
"input_files",
",",
"work_dir",
",",
"docker",
"=",
"True",
")",
"bamfile",
"=",
"'/'",
".",
"join",
"(",
"[",
"work_dir",
",",
"sample_type",
"+",
"'.bam'",
"]",
")",
"parameters",
"=",
"[",
"'view'",
",",
"'-bS'",
",",
"'-o'",
",",
"docker_path",
"(",
"bamfile",
")",
",",
"input_files",
"[",
"sample_type",
"+",
"'.sam'",
"]",
"]",
"docker_call",
"(",
"tool",
"=",
"'samtools'",
",",
"tool_parameters",
"=",
"parameters",
",",
"work_dir",
"=",
"work_dir",
",",
"dockerhub",
"=",
"univ_options",
"[",
"'dockerhub'",
"]",
",",
"tool_version",
"=",
"samtools_options",
"[",
"'version'",
"]",
")",
"output_file",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"bamfile",
")",
"job",
".",
"fileStore",
".",
"deleteGlobalFile",
"(",
"samfile",
")",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Ran sam2bam on %s:%s successfully'",
"%",
"(",
"univ_options",
"[",
"'patient'",
"]",
",",
"sample_type",
")",
")",
"return",
"output_file"
] | Convert a sam to a bam.
:param dict samfile: The input sam file
:param str sample_type: Description of the sample to inject into the filename
:param dict univ_options: Dict of universal options used by almost all tools
:param dict samtools_options: Options specific to samtools
:return: fsID for the generated bam
:rtype: toil.fileStore.FileID | [
"Convert",
"a",
"sam",
"to",
"a",
"bam",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/alignment/dna.py#L150-L179 | train |
BD2KGenomics/protect | src/protect/alignment/dna.py | fix_bam_header | def fix_bam_header(job, bamfile, sample_type, univ_options, samtools_options, retained_chroms=None):
"""
Fix the bam header to remove the command line call. Failing to do this causes Picard to reject
the bam.
:param dict bamfile: The input bam file
:param str sample_type: Description of the sample to inject into the filename
:param dict univ_options: Dict of universal options used by almost all tools
:param dict samtools_options: Options specific to samtools
:param list retained_chroms: A list of chromosomes to retain
:return: fsID for the output bam
:rtype: toil.fileStore.FileID
"""
if retained_chroms is None:
retained_chroms = []
work_dir = os.getcwd()
input_files = {
sample_type + '.bam': bamfile}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=True)
parameters = ['view',
'-H',
input_files[sample_type + '.bam']]
with open('/'.join([work_dir, sample_type + '_input_bam.header']), 'w') as headerfile:
docker_call(tool='samtools', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], outfile=headerfile,
tool_version=samtools_options['version'])
with open(headerfile.name, 'r') as headerfile, \
open('/'.join([work_dir, sample_type + '_output_bam.header']), 'w') as outheaderfile:
for line in headerfile:
if line.startswith('@PG'):
line = '\t'.join([x for x in line.strip().split('\t') if not x.startswith('CL')])
if retained_chroms and line.startswith('@SQ'):
if line.strip().split()[1].lstrip('SN:') not in retained_chroms:
continue
print(line.strip(), file=outheaderfile)
parameters = ['reheader',
docker_path(outheaderfile.name),
input_files[sample_type + '.bam']]
with open('/'.join([work_dir, sample_type + '_fixPG.bam']), 'w') as fixpg_bamfile:
docker_call(tool='samtools', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], outfile=fixpg_bamfile,
tool_version=samtools_options['version'])
output_file = job.fileStore.writeGlobalFile(fixpg_bamfile.name)
# The old bam file is now useless.
job.fileStore.deleteGlobalFile(bamfile)
job.fileStore.logToMaster('Ran reheader on %s:%s successfully'
% (univ_options['patient'], sample_type))
return output_file | python | def fix_bam_header(job, bamfile, sample_type, univ_options, samtools_options, retained_chroms=None):
"""
Fix the bam header to remove the command line call. Failing to do this causes Picard to reject
the bam.
:param dict bamfile: The input bam file
:param str sample_type: Description of the sample to inject into the filename
:param dict univ_options: Dict of universal options used by almost all tools
:param dict samtools_options: Options specific to samtools
:param list retained_chroms: A list of chromosomes to retain
:return: fsID for the output bam
:rtype: toil.fileStore.FileID
"""
if retained_chroms is None:
retained_chroms = []
work_dir = os.getcwd()
input_files = {
sample_type + '.bam': bamfile}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=True)
parameters = ['view',
'-H',
input_files[sample_type + '.bam']]
with open('/'.join([work_dir, sample_type + '_input_bam.header']), 'w') as headerfile:
docker_call(tool='samtools', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], outfile=headerfile,
tool_version=samtools_options['version'])
with open(headerfile.name, 'r') as headerfile, \
open('/'.join([work_dir, sample_type + '_output_bam.header']), 'w') as outheaderfile:
for line in headerfile:
if line.startswith('@PG'):
line = '\t'.join([x for x in line.strip().split('\t') if not x.startswith('CL')])
if retained_chroms and line.startswith('@SQ'):
if line.strip().split()[1].lstrip('SN:') not in retained_chroms:
continue
print(line.strip(), file=outheaderfile)
parameters = ['reheader',
docker_path(outheaderfile.name),
input_files[sample_type + '.bam']]
with open('/'.join([work_dir, sample_type + '_fixPG.bam']), 'w') as fixpg_bamfile:
docker_call(tool='samtools', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], outfile=fixpg_bamfile,
tool_version=samtools_options['version'])
output_file = job.fileStore.writeGlobalFile(fixpg_bamfile.name)
# The old bam file is now useless.
job.fileStore.deleteGlobalFile(bamfile)
job.fileStore.logToMaster('Ran reheader on %s:%s successfully'
% (univ_options['patient'], sample_type))
return output_file | [
"def",
"fix_bam_header",
"(",
"job",
",",
"bamfile",
",",
"sample_type",
",",
"univ_options",
",",
"samtools_options",
",",
"retained_chroms",
"=",
"None",
")",
":",
"if",
"retained_chroms",
"is",
"None",
":",
"retained_chroms",
"=",
"[",
"]",
"work_dir",
"=",
"os",
".",
"getcwd",
"(",
")",
"input_files",
"=",
"{",
"sample_type",
"+",
"'.bam'",
":",
"bamfile",
"}",
"input_files",
"=",
"get_files_from_filestore",
"(",
"job",
",",
"input_files",
",",
"work_dir",
",",
"docker",
"=",
"True",
")",
"parameters",
"=",
"[",
"'view'",
",",
"'-H'",
",",
"input_files",
"[",
"sample_type",
"+",
"'.bam'",
"]",
"]",
"with",
"open",
"(",
"'/'",
".",
"join",
"(",
"[",
"work_dir",
",",
"sample_type",
"+",
"'_input_bam.header'",
"]",
")",
",",
"'w'",
")",
"as",
"headerfile",
":",
"docker_call",
"(",
"tool",
"=",
"'samtools'",
",",
"tool_parameters",
"=",
"parameters",
",",
"work_dir",
"=",
"work_dir",
",",
"dockerhub",
"=",
"univ_options",
"[",
"'dockerhub'",
"]",
",",
"outfile",
"=",
"headerfile",
",",
"tool_version",
"=",
"samtools_options",
"[",
"'version'",
"]",
")",
"with",
"open",
"(",
"headerfile",
".",
"name",
",",
"'r'",
")",
"as",
"headerfile",
",",
"open",
"(",
"'/'",
".",
"join",
"(",
"[",
"work_dir",
",",
"sample_type",
"+",
"'_output_bam.header'",
"]",
")",
",",
"'w'",
")",
"as",
"outheaderfile",
":",
"for",
"line",
"in",
"headerfile",
":",
"if",
"line",
".",
"startswith",
"(",
"'@PG'",
")",
":",
"line",
"=",
"'\\t'",
".",
"join",
"(",
"[",
"x",
"for",
"x",
"in",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'\\t'",
")",
"if",
"not",
"x",
".",
"startswith",
"(",
"'CL'",
")",
"]",
")",
"if",
"retained_chroms",
"and",
"line",
".",
"startswith",
"(",
"'@SQ'",
")",
":",
"if",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"[",
"1",
"]",
".",
"lstrip",
"(",
"'SN:'",
")",
"not",
"in",
"retained_chroms",
":",
"continue",
"print",
"(",
"line",
".",
"strip",
"(",
")",
",",
"file",
"=",
"outheaderfile",
")",
"parameters",
"=",
"[",
"'reheader'",
",",
"docker_path",
"(",
"outheaderfile",
".",
"name",
")",
",",
"input_files",
"[",
"sample_type",
"+",
"'.bam'",
"]",
"]",
"with",
"open",
"(",
"'/'",
".",
"join",
"(",
"[",
"work_dir",
",",
"sample_type",
"+",
"'_fixPG.bam'",
"]",
")",
",",
"'w'",
")",
"as",
"fixpg_bamfile",
":",
"docker_call",
"(",
"tool",
"=",
"'samtools'",
",",
"tool_parameters",
"=",
"parameters",
",",
"work_dir",
"=",
"work_dir",
",",
"dockerhub",
"=",
"univ_options",
"[",
"'dockerhub'",
"]",
",",
"outfile",
"=",
"fixpg_bamfile",
",",
"tool_version",
"=",
"samtools_options",
"[",
"'version'",
"]",
")",
"output_file",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"fixpg_bamfile",
".",
"name",
")",
"job",
".",
"fileStore",
".",
"deleteGlobalFile",
"(",
"bamfile",
")",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Ran reheader on %s:%s successfully'",
"%",
"(",
"univ_options",
"[",
"'patient'",
"]",
",",
"sample_type",
")",
")",
"return",
"output_file"
] | Fix the bam header to remove the command line call. Failing to do this causes Picard to reject
the bam.
:param dict bamfile: The input bam file
:param str sample_type: Description of the sample to inject into the filename
:param dict univ_options: Dict of universal options used by almost all tools
:param dict samtools_options: Options specific to samtools
:param list retained_chroms: A list of chromosomes to retain
:return: fsID for the output bam
:rtype: toil.fileStore.FileID | [
"Fix",
"the",
"bam",
"header",
"to",
"remove",
"the",
"command",
"line",
"call",
".",
"Failing",
"to",
"do",
"this",
"causes",
"Picard",
"to",
"reject",
"the",
"bam",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/alignment/dna.py#L182-L230 | train |
BD2KGenomics/protect | src/protect/alignment/dna.py | add_readgroups | def add_readgroups(job, bamfile, sample_type, univ_options, picard_options):
"""
Add read groups to the bam.
:param dict bamfile: The input bam file
:param str sample_type: Description of the sample to inject into the filename
:param dict univ_options: Dict of universal options used by almost all tools
:param dict picard_options: Options specific to picard
:return: fsID for the output bam
:rtype: toil.fileStore.FileID
"""
work_dir = os.getcwd()
input_files = {
sample_type + '.bam': bamfile}
get_files_from_filestore(job, input_files, work_dir, docker=True)
parameters = ['AddOrReplaceReadGroups',
'CREATE_INDEX=false',
'I=/data/' + sample_type + '.bam',
'O=/data/' + sample_type + '_reheader.bam',
'SO=coordinate',
'ID=1',
''.join(['LB=', univ_options['patient']]),
'PL=ILLUMINA',
'PU=12345',
''.join(['SM=', sample_type.rstrip('_dna')])]
docker_call(tool='picard', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], java_xmx=univ_options['java_Xmx'],
tool_version=picard_options['version'])
output_file = job.fileStore.writeGlobalFile(
'/'.join([work_dir, sample_type + '_reheader.bam']))
# Delete the old bam file
job.fileStore.deleteGlobalFile(bamfile)
job.fileStore.logToMaster('Ran add_read_groups on %s:%s successfully'
% (univ_options['patient'], sample_type))
return output_file | python | def add_readgroups(job, bamfile, sample_type, univ_options, picard_options):
"""
Add read groups to the bam.
:param dict bamfile: The input bam file
:param str sample_type: Description of the sample to inject into the filename
:param dict univ_options: Dict of universal options used by almost all tools
:param dict picard_options: Options specific to picard
:return: fsID for the output bam
:rtype: toil.fileStore.FileID
"""
work_dir = os.getcwd()
input_files = {
sample_type + '.bam': bamfile}
get_files_from_filestore(job, input_files, work_dir, docker=True)
parameters = ['AddOrReplaceReadGroups',
'CREATE_INDEX=false',
'I=/data/' + sample_type + '.bam',
'O=/data/' + sample_type + '_reheader.bam',
'SO=coordinate',
'ID=1',
''.join(['LB=', univ_options['patient']]),
'PL=ILLUMINA',
'PU=12345',
''.join(['SM=', sample_type.rstrip('_dna')])]
docker_call(tool='picard', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], java_xmx=univ_options['java_Xmx'],
tool_version=picard_options['version'])
output_file = job.fileStore.writeGlobalFile(
'/'.join([work_dir, sample_type + '_reheader.bam']))
# Delete the old bam file
job.fileStore.deleteGlobalFile(bamfile)
job.fileStore.logToMaster('Ran add_read_groups on %s:%s successfully'
% (univ_options['patient'], sample_type))
return output_file | [
"def",
"add_readgroups",
"(",
"job",
",",
"bamfile",
",",
"sample_type",
",",
"univ_options",
",",
"picard_options",
")",
":",
"work_dir",
"=",
"os",
".",
"getcwd",
"(",
")",
"input_files",
"=",
"{",
"sample_type",
"+",
"'.bam'",
":",
"bamfile",
"}",
"get_files_from_filestore",
"(",
"job",
",",
"input_files",
",",
"work_dir",
",",
"docker",
"=",
"True",
")",
"parameters",
"=",
"[",
"'AddOrReplaceReadGroups'",
",",
"'CREATE_INDEX=false'",
",",
"'I=/data/'",
"+",
"sample_type",
"+",
"'.bam'",
",",
"'O=/data/'",
"+",
"sample_type",
"+",
"'_reheader.bam'",
",",
"'SO=coordinate'",
",",
"'ID=1'",
",",
"''",
".",
"join",
"(",
"[",
"'LB='",
",",
"univ_options",
"[",
"'patient'",
"]",
"]",
")",
",",
"'PL=ILLUMINA'",
",",
"'PU=12345'",
",",
"''",
".",
"join",
"(",
"[",
"'SM='",
",",
"sample_type",
".",
"rstrip",
"(",
"'_dna'",
")",
"]",
")",
"]",
"docker_call",
"(",
"tool",
"=",
"'picard'",
",",
"tool_parameters",
"=",
"parameters",
",",
"work_dir",
"=",
"work_dir",
",",
"dockerhub",
"=",
"univ_options",
"[",
"'dockerhub'",
"]",
",",
"java_xmx",
"=",
"univ_options",
"[",
"'java_Xmx'",
"]",
",",
"tool_version",
"=",
"picard_options",
"[",
"'version'",
"]",
")",
"output_file",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"'/'",
".",
"join",
"(",
"[",
"work_dir",
",",
"sample_type",
"+",
"'_reheader.bam'",
"]",
")",
")",
"job",
".",
"fileStore",
".",
"deleteGlobalFile",
"(",
"bamfile",
")",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Ran add_read_groups on %s:%s successfully'",
"%",
"(",
"univ_options",
"[",
"'patient'",
"]",
",",
"sample_type",
")",
")",
"return",
"output_file"
] | Add read groups to the bam.
:param dict bamfile: The input bam file
:param str sample_type: Description of the sample to inject into the filename
:param dict univ_options: Dict of universal options used by almost all tools
:param dict picard_options: Options specific to picard
:return: fsID for the output bam
:rtype: toil.fileStore.FileID | [
"Add",
"read",
"groups",
"to",
"the",
"bam",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/alignment/dna.py#L233-L267 | train |
nepalicalendar/nepalicalendar-py | nepalicalendar/nepcal.py | NepCal.weekday | def weekday(cls, year, month, day):
"""Returns the weekday of the date. 0 = aaitabar"""
return NepDate.from_bs_date(year, month, day).weekday() | python | def weekday(cls, year, month, day):
"""Returns the weekday of the date. 0 = aaitabar"""
return NepDate.from_bs_date(year, month, day).weekday() | [
"def",
"weekday",
"(",
"cls",
",",
"year",
",",
"month",
",",
"day",
")",
":",
"return",
"NepDate",
".",
"from_bs_date",
"(",
"year",
",",
"month",
",",
"day",
")",
".",
"weekday",
"(",
")"
] | Returns the weekday of the date. 0 = aaitabar | [
"Returns",
"the",
"weekday",
"of",
"the",
"date",
".",
"0",
"=",
"aaitabar"
] | a589c28b8e085049f30a7287753476b59eca6f50 | https://github.com/nepalicalendar/nepalicalendar-py/blob/a589c28b8e085049f30a7287753476b59eca6f50/nepalicalendar/nepcal.py#L24-L26 | train |
nepalicalendar/nepalicalendar-py | nepalicalendar/nepcal.py | NepCal.monthrange | def monthrange(cls, year, month):
"""Returns the number of days in a month"""
functions.check_valid_bs_range(NepDate(year, month, 1))
return values.NEPALI_MONTH_DAY_DATA[year][month - 1] | python | def monthrange(cls, year, month):
"""Returns the number of days in a month"""
functions.check_valid_bs_range(NepDate(year, month, 1))
return values.NEPALI_MONTH_DAY_DATA[year][month - 1] | [
"def",
"monthrange",
"(",
"cls",
",",
"year",
",",
"month",
")",
":",
"functions",
".",
"check_valid_bs_range",
"(",
"NepDate",
"(",
"year",
",",
"month",
",",
"1",
")",
")",
"return",
"values",
".",
"NEPALI_MONTH_DAY_DATA",
"[",
"year",
"]",
"[",
"month",
"-",
"1",
"]"
] | Returns the number of days in a month | [
"Returns",
"the",
"number",
"of",
"days",
"in",
"a",
"month"
] | a589c28b8e085049f30a7287753476b59eca6f50 | https://github.com/nepalicalendar/nepalicalendar-py/blob/a589c28b8e085049f30a7287753476b59eca6f50/nepalicalendar/nepcal.py#L29-L32 | train |
nepalicalendar/nepalicalendar-py | nepalicalendar/nepcal.py | NepCal.itermonthdays | def itermonthdays(cls, year, month):
"""Similar to itermonthdates but returns day number instead of NepDate object
"""
for day in NepCal.itermonthdates(year, month):
if day.month == month:
yield day.day
else:
yield 0 | python | def itermonthdays(cls, year, month):
"""Similar to itermonthdates but returns day number instead of NepDate object
"""
for day in NepCal.itermonthdates(year, month):
if day.month == month:
yield day.day
else:
yield 0 | [
"def",
"itermonthdays",
"(",
"cls",
",",
"year",
",",
"month",
")",
":",
"for",
"day",
"in",
"NepCal",
".",
"itermonthdates",
"(",
"year",
",",
"month",
")",
":",
"if",
"day",
".",
"month",
"==",
"month",
":",
"yield",
"day",
".",
"day",
"else",
":",
"yield",
"0"
] | Similar to itermonthdates but returns day number instead of NepDate object | [
"Similar",
"to",
"itermonthdates",
"but",
"returns",
"day",
"number",
"instead",
"of",
"NepDate",
"object"
] | a589c28b8e085049f30a7287753476b59eca6f50 | https://github.com/nepalicalendar/nepalicalendar-py/blob/a589c28b8e085049f30a7287753476b59eca6f50/nepalicalendar/nepcal.py#L65-L72 | train |
nepalicalendar/nepalicalendar-py | nepalicalendar/nepcal.py | NepCal.itermonthdays2 | def itermonthdays2(cls, year, month):
"""Similar to itermonthdays2 but returns tuples of day and weekday.
"""
for day in NepCal.itermonthdates(year, month):
if day.month == month:
yield (day.day, day.weekday())
else:
yield (0, day.weekday()) | python | def itermonthdays2(cls, year, month):
"""Similar to itermonthdays2 but returns tuples of day and weekday.
"""
for day in NepCal.itermonthdates(year, month):
if day.month == month:
yield (day.day, day.weekday())
else:
yield (0, day.weekday()) | [
"def",
"itermonthdays2",
"(",
"cls",
",",
"year",
",",
"month",
")",
":",
"for",
"day",
"in",
"NepCal",
".",
"itermonthdates",
"(",
"year",
",",
"month",
")",
":",
"if",
"day",
".",
"month",
"==",
"month",
":",
"yield",
"(",
"day",
".",
"day",
",",
"day",
".",
"weekday",
"(",
")",
")",
"else",
":",
"yield",
"(",
"0",
",",
"day",
".",
"weekday",
"(",
")",
")"
] | Similar to itermonthdays2 but returns tuples of day and weekday. | [
"Similar",
"to",
"itermonthdays2",
"but",
"returns",
"tuples",
"of",
"day",
"and",
"weekday",
"."
] | a589c28b8e085049f30a7287753476b59eca6f50 | https://github.com/nepalicalendar/nepalicalendar-py/blob/a589c28b8e085049f30a7287753476b59eca6f50/nepalicalendar/nepcal.py#L75-L82 | train |
nepalicalendar/nepalicalendar-py | nepalicalendar/nepcal.py | NepCal.monthdatescalendar | def monthdatescalendar(cls, year, month):
""" Returns a list of week in a month. A week is a list of NepDate objects """
weeks = []
week = []
for day in NepCal.itermonthdates(year, month):
week.append(day)
if len(week) == 7:
weeks.append(week)
week = []
if len(week) > 0:
weeks.append(week)
return weeks | python | def monthdatescalendar(cls, year, month):
""" Returns a list of week in a month. A week is a list of NepDate objects """
weeks = []
week = []
for day in NepCal.itermonthdates(year, month):
week.append(day)
if len(week) == 7:
weeks.append(week)
week = []
if len(week) > 0:
weeks.append(week)
return weeks | [
"def",
"monthdatescalendar",
"(",
"cls",
",",
"year",
",",
"month",
")",
":",
"weeks",
"=",
"[",
"]",
"week",
"=",
"[",
"]",
"for",
"day",
"in",
"NepCal",
".",
"itermonthdates",
"(",
"year",
",",
"month",
")",
":",
"week",
".",
"append",
"(",
"day",
")",
"if",
"len",
"(",
"week",
")",
"==",
"7",
":",
"weeks",
".",
"append",
"(",
"week",
")",
"week",
"=",
"[",
"]",
"if",
"len",
"(",
"week",
")",
">",
"0",
":",
"weeks",
".",
"append",
"(",
"week",
")",
"return",
"weeks"
] | Returns a list of week in a month. A week is a list of NepDate objects | [
"Returns",
"a",
"list",
"of",
"week",
"in",
"a",
"month",
".",
"A",
"week",
"is",
"a",
"list",
"of",
"NepDate",
"objects"
] | a589c28b8e085049f30a7287753476b59eca6f50 | https://github.com/nepalicalendar/nepalicalendar-py/blob/a589c28b8e085049f30a7287753476b59eca6f50/nepalicalendar/nepcal.py#L85-L96 | train |
nepalicalendar/nepalicalendar-py | nepalicalendar/nepcal.py | NepCal.monthdayscalendar | def monthdayscalendar(cls, year, month):
"""Return a list of the weeks in the month month of the year as full weeks.
Weeks are lists of seven day numbers."""
weeks = []
week = []
for day in NepCal.itermonthdays(year, month):
week.append(day)
if len(week) == 7:
weeks.append(week)
week = []
if len(week) > 0:
weeks.append(week)
return weeks | python | def monthdayscalendar(cls, year, month):
"""Return a list of the weeks in the month month of the year as full weeks.
Weeks are lists of seven day numbers."""
weeks = []
week = []
for day in NepCal.itermonthdays(year, month):
week.append(day)
if len(week) == 7:
weeks.append(week)
week = []
if len(week) > 0:
weeks.append(week)
return weeks | [
"def",
"monthdayscalendar",
"(",
"cls",
",",
"year",
",",
"month",
")",
":",
"weeks",
"=",
"[",
"]",
"week",
"=",
"[",
"]",
"for",
"day",
"in",
"NepCal",
".",
"itermonthdays",
"(",
"year",
",",
"month",
")",
":",
"week",
".",
"append",
"(",
"day",
")",
"if",
"len",
"(",
"week",
")",
"==",
"7",
":",
"weeks",
".",
"append",
"(",
"week",
")",
"week",
"=",
"[",
"]",
"if",
"len",
"(",
"week",
")",
">",
"0",
":",
"weeks",
".",
"append",
"(",
"week",
")",
"return",
"weeks"
] | Return a list of the weeks in the month month of the year as full weeks.
Weeks are lists of seven day numbers. | [
"Return",
"a",
"list",
"of",
"the",
"weeks",
"in",
"the",
"month",
"month",
"of",
"the",
"year",
"as",
"full",
"weeks",
".",
"Weeks",
"are",
"lists",
"of",
"seven",
"day",
"numbers",
"."
] | a589c28b8e085049f30a7287753476b59eca6f50 | https://github.com/nepalicalendar/nepalicalendar-py/blob/a589c28b8e085049f30a7287753476b59eca6f50/nepalicalendar/nepcal.py#L99-L111 | train |
nepalicalendar/nepalicalendar-py | nepalicalendar/nepcal.py | NepCal.monthdays2calendar | def monthdays2calendar(cls, year, month):
""" Return a list of the weeks in the month month of the year as full weeks.
Weeks are lists of seven tuples of day numbers and weekday numbers. """
weeks = []
week = []
for day in NepCal.itermonthdays2(year, month):
week.append(day)
if len(week) == 7:
weeks.append(week)
week = []
if len(week) > 0:
weeks.append(week)
return weeks | python | def monthdays2calendar(cls, year, month):
""" Return a list of the weeks in the month month of the year as full weeks.
Weeks are lists of seven tuples of day numbers and weekday numbers. """
weeks = []
week = []
for day in NepCal.itermonthdays2(year, month):
week.append(day)
if len(week) == 7:
weeks.append(week)
week = []
if len(week) > 0:
weeks.append(week)
return weeks | [
"def",
"monthdays2calendar",
"(",
"cls",
",",
"year",
",",
"month",
")",
":",
"weeks",
"=",
"[",
"]",
"week",
"=",
"[",
"]",
"for",
"day",
"in",
"NepCal",
".",
"itermonthdays2",
"(",
"year",
",",
"month",
")",
":",
"week",
".",
"append",
"(",
"day",
")",
"if",
"len",
"(",
"week",
")",
"==",
"7",
":",
"weeks",
".",
"append",
"(",
"week",
")",
"week",
"=",
"[",
"]",
"if",
"len",
"(",
"week",
")",
">",
"0",
":",
"weeks",
".",
"append",
"(",
"week",
")",
"return",
"weeks"
] | Return a list of the weeks in the month month of the year as full weeks.
Weeks are lists of seven tuples of day numbers and weekday numbers. | [
"Return",
"a",
"list",
"of",
"the",
"weeks",
"in",
"the",
"month",
"month",
"of",
"the",
"year",
"as",
"full",
"weeks",
".",
"Weeks",
"are",
"lists",
"of",
"seven",
"tuples",
"of",
"day",
"numbers",
"and",
"weekday",
"numbers",
"."
] | a589c28b8e085049f30a7287753476b59eca6f50 | https://github.com/nepalicalendar/nepalicalendar-py/blob/a589c28b8e085049f30a7287753476b59eca6f50/nepalicalendar/nepcal.py#L114-L126 | train |
BD2KGenomics/protect | src/protect/mutation_calling/somaticsniper.py | run_somaticsniper_with_merge | def run_somaticsniper_with_merge(job, tumor_bam, normal_bam, univ_options, somaticsniper_options):
"""
A wrapper for the the entire SomaticSniper sub-graph.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict somaticsniper_options: Options specific to SomaticSniper
:return: fsID to the merged SomaticSniper calls
:rtype: toil.fileStore.FileID
"""
spawn = job.wrapJobFn(run_somaticsniper, tumor_bam, normal_bam, univ_options,
somaticsniper_options, split=False).encapsulate()
job.addChild(spawn)
return spawn.rv() | python | def run_somaticsniper_with_merge(job, tumor_bam, normal_bam, univ_options, somaticsniper_options):
"""
A wrapper for the the entire SomaticSniper sub-graph.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict somaticsniper_options: Options specific to SomaticSniper
:return: fsID to the merged SomaticSniper calls
:rtype: toil.fileStore.FileID
"""
spawn = job.wrapJobFn(run_somaticsniper, tumor_bam, normal_bam, univ_options,
somaticsniper_options, split=False).encapsulate()
job.addChild(spawn)
return spawn.rv() | [
"def",
"run_somaticsniper_with_merge",
"(",
"job",
",",
"tumor_bam",
",",
"normal_bam",
",",
"univ_options",
",",
"somaticsniper_options",
")",
":",
"spawn",
"=",
"job",
".",
"wrapJobFn",
"(",
"run_somaticsniper",
",",
"tumor_bam",
",",
"normal_bam",
",",
"univ_options",
",",
"somaticsniper_options",
",",
"split",
"=",
"False",
")",
".",
"encapsulate",
"(",
")",
"job",
".",
"addChild",
"(",
"spawn",
")",
"return",
"spawn",
".",
"rv",
"(",
")"
] | A wrapper for the the entire SomaticSniper sub-graph.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict somaticsniper_options: Options specific to SomaticSniper
:return: fsID to the merged SomaticSniper calls
:rtype: toil.fileStore.FileID | [
"A",
"wrapper",
"for",
"the",
"the",
"entire",
"SomaticSniper",
"sub",
"-",
"graph",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/somaticsniper.py#L50-L64 | train |
BD2KGenomics/protect | src/protect/mutation_calling/somaticsniper.py | run_somaticsniper | def run_somaticsniper(job, tumor_bam, normal_bam, univ_options, somaticsniper_options, split=True):
"""
Run the SomaticSniper subgraph on the DNA bams. Optionally split the results into
per-chromosome vcfs.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict somaticsniper_options: Options specific to SomaticSniper
:param bool split: Should the results be split into perchrom vcfs?
:return: Either the fsID to the genome-level vcf or a dict of results from running SomaticSniper
on every chromosome
perchrom_somaticsniper:
|- 'chr1': fsID
|- 'chr2' fsID
|
|-...
|
+- 'chrM': fsID
:rtype: toil.fileStore.FileID|dict
"""
# Get a list of chromosomes to handle
if somaticsniper_options['chromosomes']:
chromosomes = somaticsniper_options['chromosomes']
else:
chromosomes = sample_chromosomes(job, somaticsniper_options['genome_fai'])
perchrom_somaticsniper = defaultdict()
snipe = job.wrapJobFn(run_somaticsniper_full, tumor_bam, normal_bam, univ_options,
somaticsniper_options,
disk=PromisedRequirement(sniper_disk,
tumor_bam['tumor_dna_fix_pg_sorted.bam'],
normal_bam['normal_dna_fix_pg_sorted.bam'],
somaticsniper_options['genome_fasta']),
memory='6G')
pileup = job.wrapJobFn(run_pileup, tumor_bam, univ_options, somaticsniper_options,
disk=PromisedRequirement(pileup_disk,
tumor_bam['tumor_dna_fix_pg_sorted.bam'],
somaticsniper_options['genome_fasta']),
memory='6G')
filtersnipes = job.wrapJobFn(filter_somaticsniper, tumor_bam, snipe.rv(), pileup.rv(),
univ_options, somaticsniper_options,
disk=PromisedRequirement(sniper_filter_disk,
tumor_bam['tumor_dna_fix_pg_sorted.bam'],
somaticsniper_options['genome_fasta']),
memory='6G')
job.addChild(snipe)
job.addChild(pileup)
snipe.addChild(filtersnipes)
pileup.addChild(filtersnipes)
if split:
unmerge_snipes = job.wrapJobFn(unmerge, filtersnipes.rv(), 'somaticsniper', chromosomes,
somaticsniper_options, univ_options)
filtersnipes.addChild(unmerge_snipes)
return unmerge_snipes.rv()
else:
return filtersnipes.rv() | python | def run_somaticsniper(job, tumor_bam, normal_bam, univ_options, somaticsniper_options, split=True):
"""
Run the SomaticSniper subgraph on the DNA bams. Optionally split the results into
per-chromosome vcfs.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict somaticsniper_options: Options specific to SomaticSniper
:param bool split: Should the results be split into perchrom vcfs?
:return: Either the fsID to the genome-level vcf or a dict of results from running SomaticSniper
on every chromosome
perchrom_somaticsniper:
|- 'chr1': fsID
|- 'chr2' fsID
|
|-...
|
+- 'chrM': fsID
:rtype: toil.fileStore.FileID|dict
"""
# Get a list of chromosomes to handle
if somaticsniper_options['chromosomes']:
chromosomes = somaticsniper_options['chromosomes']
else:
chromosomes = sample_chromosomes(job, somaticsniper_options['genome_fai'])
perchrom_somaticsniper = defaultdict()
snipe = job.wrapJobFn(run_somaticsniper_full, tumor_bam, normal_bam, univ_options,
somaticsniper_options,
disk=PromisedRequirement(sniper_disk,
tumor_bam['tumor_dna_fix_pg_sorted.bam'],
normal_bam['normal_dna_fix_pg_sorted.bam'],
somaticsniper_options['genome_fasta']),
memory='6G')
pileup = job.wrapJobFn(run_pileup, tumor_bam, univ_options, somaticsniper_options,
disk=PromisedRequirement(pileup_disk,
tumor_bam['tumor_dna_fix_pg_sorted.bam'],
somaticsniper_options['genome_fasta']),
memory='6G')
filtersnipes = job.wrapJobFn(filter_somaticsniper, tumor_bam, snipe.rv(), pileup.rv(),
univ_options, somaticsniper_options,
disk=PromisedRequirement(sniper_filter_disk,
tumor_bam['tumor_dna_fix_pg_sorted.bam'],
somaticsniper_options['genome_fasta']),
memory='6G')
job.addChild(snipe)
job.addChild(pileup)
snipe.addChild(filtersnipes)
pileup.addChild(filtersnipes)
if split:
unmerge_snipes = job.wrapJobFn(unmerge, filtersnipes.rv(), 'somaticsniper', chromosomes,
somaticsniper_options, univ_options)
filtersnipes.addChild(unmerge_snipes)
return unmerge_snipes.rv()
else:
return filtersnipes.rv() | [
"def",
"run_somaticsniper",
"(",
"job",
",",
"tumor_bam",
",",
"normal_bam",
",",
"univ_options",
",",
"somaticsniper_options",
",",
"split",
"=",
"True",
")",
":",
"if",
"somaticsniper_options",
"[",
"'chromosomes'",
"]",
":",
"chromosomes",
"=",
"somaticsniper_options",
"[",
"'chromosomes'",
"]",
"else",
":",
"chromosomes",
"=",
"sample_chromosomes",
"(",
"job",
",",
"somaticsniper_options",
"[",
"'genome_fai'",
"]",
")",
"perchrom_somaticsniper",
"=",
"defaultdict",
"(",
")",
"snipe",
"=",
"job",
".",
"wrapJobFn",
"(",
"run_somaticsniper_full",
",",
"tumor_bam",
",",
"normal_bam",
",",
"univ_options",
",",
"somaticsniper_options",
",",
"disk",
"=",
"PromisedRequirement",
"(",
"sniper_disk",
",",
"tumor_bam",
"[",
"'tumor_dna_fix_pg_sorted.bam'",
"]",
",",
"normal_bam",
"[",
"'normal_dna_fix_pg_sorted.bam'",
"]",
",",
"somaticsniper_options",
"[",
"'genome_fasta'",
"]",
")",
",",
"memory",
"=",
"'6G'",
")",
"pileup",
"=",
"job",
".",
"wrapJobFn",
"(",
"run_pileup",
",",
"tumor_bam",
",",
"univ_options",
",",
"somaticsniper_options",
",",
"disk",
"=",
"PromisedRequirement",
"(",
"pileup_disk",
",",
"tumor_bam",
"[",
"'tumor_dna_fix_pg_sorted.bam'",
"]",
",",
"somaticsniper_options",
"[",
"'genome_fasta'",
"]",
")",
",",
"memory",
"=",
"'6G'",
")",
"filtersnipes",
"=",
"job",
".",
"wrapJobFn",
"(",
"filter_somaticsniper",
",",
"tumor_bam",
",",
"snipe",
".",
"rv",
"(",
")",
",",
"pileup",
".",
"rv",
"(",
")",
",",
"univ_options",
",",
"somaticsniper_options",
",",
"disk",
"=",
"PromisedRequirement",
"(",
"sniper_filter_disk",
",",
"tumor_bam",
"[",
"'tumor_dna_fix_pg_sorted.bam'",
"]",
",",
"somaticsniper_options",
"[",
"'genome_fasta'",
"]",
")",
",",
"memory",
"=",
"'6G'",
")",
"job",
".",
"addChild",
"(",
"snipe",
")",
"job",
".",
"addChild",
"(",
"pileup",
")",
"snipe",
".",
"addChild",
"(",
"filtersnipes",
")",
"pileup",
".",
"addChild",
"(",
"filtersnipes",
")",
"if",
"split",
":",
"unmerge_snipes",
"=",
"job",
".",
"wrapJobFn",
"(",
"unmerge",
",",
"filtersnipes",
".",
"rv",
"(",
")",
",",
"'somaticsniper'",
",",
"chromosomes",
",",
"somaticsniper_options",
",",
"univ_options",
")",
"filtersnipes",
".",
"addChild",
"(",
"unmerge_snipes",
")",
"return",
"unmerge_snipes",
".",
"rv",
"(",
")",
"else",
":",
"return",
"filtersnipes",
".",
"rv",
"(",
")"
] | Run the SomaticSniper subgraph on the DNA bams. Optionally split the results into
per-chromosome vcfs.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict somaticsniper_options: Options specific to SomaticSniper
:param bool split: Should the results be split into perchrom vcfs?
:return: Either the fsID to the genome-level vcf or a dict of results from running SomaticSniper
on every chromosome
perchrom_somaticsniper:
|- 'chr1': fsID
|- 'chr2' fsID
|
|-...
|
+- 'chrM': fsID
:rtype: toil.fileStore.FileID|dict | [
"Run",
"the",
"SomaticSniper",
"subgraph",
"on",
"the",
"DNA",
"bams",
".",
"Optionally",
"split",
"the",
"results",
"into",
"per",
"-",
"chromosome",
"vcfs",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/somaticsniper.py#L67-L123 | train |
BD2KGenomics/protect | src/protect/mutation_calling/somaticsniper.py | run_somaticsniper_full | def run_somaticsniper_full(job, tumor_bam, normal_bam, univ_options, somaticsniper_options):
"""
Run SomaticSniper on the DNA bams.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict somaticsniper_options: Options specific to SomaticSniper
:return: fsID to the genome-level vcf
:rtype: toil.fileStore.FileID
"""
work_dir = os.getcwd()
input_files = {
'tumor.bam': tumor_bam['tumor_dna_fix_pg_sorted.bam'],
'tumor.bam.bai': tumor_bam['tumor_dna_fix_pg_sorted.bam.bai'],
'normal.bam': normal_bam['normal_dna_fix_pg_sorted.bam'],
'normal.bam.bai': normal_bam['normal_dna_fix_pg_sorted.bam.bai'],
'genome.fa.tar.gz': somaticsniper_options['genome_fasta'],
'genome.fa.fai.tar.gz': somaticsniper_options['genome_fai']}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
for key in ('genome.fa', 'genome.fa.fai'):
input_files[key] = untargz(input_files[key + '.tar.gz'], work_dir)
input_files = {key: docker_path(path) for key, path in input_files.items()}
output_file = os.path.join(work_dir, 'somatic-sniper_full.vcf')
parameters = ['-f', input_files['genome.fa'],
'-F', 'vcf',
'-G',
'-L',
'-q', '1',
'-Q', '15',
input_files['tumor.bam'],
input_files['normal.bam'],
docker_path(output_file)]
docker_call(tool='somaticsniper', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], tool_version=somaticsniper_options['version'])
outfile = job.fileStore.writeGlobalFile(output_file)
job.fileStore.logToMaster('Ran SomaticSniper on %s successfully' % univ_options['patient'])
return outfile | python | def run_somaticsniper_full(job, tumor_bam, normal_bam, univ_options, somaticsniper_options):
"""
Run SomaticSniper on the DNA bams.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict somaticsniper_options: Options specific to SomaticSniper
:return: fsID to the genome-level vcf
:rtype: toil.fileStore.FileID
"""
work_dir = os.getcwd()
input_files = {
'tumor.bam': tumor_bam['tumor_dna_fix_pg_sorted.bam'],
'tumor.bam.bai': tumor_bam['tumor_dna_fix_pg_sorted.bam.bai'],
'normal.bam': normal_bam['normal_dna_fix_pg_sorted.bam'],
'normal.bam.bai': normal_bam['normal_dna_fix_pg_sorted.bam.bai'],
'genome.fa.tar.gz': somaticsniper_options['genome_fasta'],
'genome.fa.fai.tar.gz': somaticsniper_options['genome_fai']}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
for key in ('genome.fa', 'genome.fa.fai'):
input_files[key] = untargz(input_files[key + '.tar.gz'], work_dir)
input_files = {key: docker_path(path) for key, path in input_files.items()}
output_file = os.path.join(work_dir, 'somatic-sniper_full.vcf')
parameters = ['-f', input_files['genome.fa'],
'-F', 'vcf',
'-G',
'-L',
'-q', '1',
'-Q', '15',
input_files['tumor.bam'],
input_files['normal.bam'],
docker_path(output_file)]
docker_call(tool='somaticsniper', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], tool_version=somaticsniper_options['version'])
outfile = job.fileStore.writeGlobalFile(output_file)
job.fileStore.logToMaster('Ran SomaticSniper on %s successfully' % univ_options['patient'])
return outfile | [
"def",
"run_somaticsniper_full",
"(",
"job",
",",
"tumor_bam",
",",
"normal_bam",
",",
"univ_options",
",",
"somaticsniper_options",
")",
":",
"work_dir",
"=",
"os",
".",
"getcwd",
"(",
")",
"input_files",
"=",
"{",
"'tumor.bam'",
":",
"tumor_bam",
"[",
"'tumor_dna_fix_pg_sorted.bam'",
"]",
",",
"'tumor.bam.bai'",
":",
"tumor_bam",
"[",
"'tumor_dna_fix_pg_sorted.bam.bai'",
"]",
",",
"'normal.bam'",
":",
"normal_bam",
"[",
"'normal_dna_fix_pg_sorted.bam'",
"]",
",",
"'normal.bam.bai'",
":",
"normal_bam",
"[",
"'normal_dna_fix_pg_sorted.bam.bai'",
"]",
",",
"'genome.fa.tar.gz'",
":",
"somaticsniper_options",
"[",
"'genome_fasta'",
"]",
",",
"'genome.fa.fai.tar.gz'",
":",
"somaticsniper_options",
"[",
"'genome_fai'",
"]",
"}",
"input_files",
"=",
"get_files_from_filestore",
"(",
"job",
",",
"input_files",
",",
"work_dir",
",",
"docker",
"=",
"False",
")",
"for",
"key",
"in",
"(",
"'genome.fa'",
",",
"'genome.fa.fai'",
")",
":",
"input_files",
"[",
"key",
"]",
"=",
"untargz",
"(",
"input_files",
"[",
"key",
"+",
"'.tar.gz'",
"]",
",",
"work_dir",
")",
"input_files",
"=",
"{",
"key",
":",
"docker_path",
"(",
"path",
")",
"for",
"key",
",",
"path",
"in",
"input_files",
".",
"items",
"(",
")",
"}",
"output_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'somatic-sniper_full.vcf'",
")",
"parameters",
"=",
"[",
"'-f'",
",",
"input_files",
"[",
"'genome.fa'",
"]",
",",
"'-F'",
",",
"'vcf'",
",",
"'-G'",
",",
"'-L'",
",",
"'-q'",
",",
"'1'",
",",
"'-Q'",
",",
"'15'",
",",
"input_files",
"[",
"'tumor.bam'",
"]",
",",
"input_files",
"[",
"'normal.bam'",
"]",
",",
"docker_path",
"(",
"output_file",
")",
"]",
"docker_call",
"(",
"tool",
"=",
"'somaticsniper'",
",",
"tool_parameters",
"=",
"parameters",
",",
"work_dir",
"=",
"work_dir",
",",
"dockerhub",
"=",
"univ_options",
"[",
"'dockerhub'",
"]",
",",
"tool_version",
"=",
"somaticsniper_options",
"[",
"'version'",
"]",
")",
"outfile",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"output_file",
")",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Ran SomaticSniper on %s successfully'",
"%",
"univ_options",
"[",
"'patient'",
"]",
")",
"return",
"outfile"
] | Run SomaticSniper on the DNA bams.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict somaticsniper_options: Options specific to SomaticSniper
:return: fsID to the genome-level vcf
:rtype: toil.fileStore.FileID | [
"Run",
"SomaticSniper",
"on",
"the",
"DNA",
"bams",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/somaticsniper.py#L126-L165 | train |
BD2KGenomics/protect | src/protect/mutation_calling/somaticsniper.py | filter_somaticsniper | def filter_somaticsniper(job, tumor_bam, somaticsniper_output, tumor_pileup, univ_options,
somaticsniper_options):
"""
Filter SomaticSniper calls.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param toil.fileStore.FileID somaticsniper_output: SomaticSniper output vcf
:param toil.fileStore.FileID tumor_pileup: Pileup generated for the tumor bam
:param dict univ_options: Dict of universal options used by almost all tools
:param dict somaticsniper_options: Options specific to SomaticSniper
:returns: fsID for the filtered genome-level vcf
:rtype: toil.fileStore.FileID
"""
work_dir = os.getcwd()
input_files = {
'tumor.bam': tumor_bam['tumor_dna_fix_pg_sorted.bam'],
'tumor.bam.bai': tumor_bam['tumor_dna_fix_pg_sorted.bam.bai'],
'input.vcf': somaticsniper_output,
'pileup.txt': tumor_pileup,
'genome.fa.tar.gz': somaticsniper_options['genome_fasta'],
'genome.fa.fai.tar.gz': somaticsniper_options['genome_fai']}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
for key in ('genome.fa', 'genome.fa.fai'):
input_files[key] = untargz(input_files[key + '.tar.gz'], work_dir)
input_files = {key: docker_path(path) for key, path in input_files.items()}
# Run snpfilter.pl
parameters = ['snpfilter.pl',
'--snp-file', input_files['input.vcf'],
'--indel-file', input_files['pileup.txt']]
# Creates /data/input.vcf.SNPfilter
docker_call(tool='somaticsniper-addons', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], tool_version=somaticsniper_options['version'])
# Run prepare_for_readcount.pl
parameters = ['prepare_for_readcount.pl',
'--snp-file', input_files['input.vcf'] + '.SNPfilter']
# Creates /data/input.vcf.SNPfilter.pos
docker_call(tool='somaticsniper-addons', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], tool_version=somaticsniper_options['version'])
# Run bam-readcount
parameters = ['-b', '15',
'-f', input_files['genome.fa'],
'-l', input_files['input.vcf'] + '.SNPfilter.pos',
'-w', '1',
input_files['tumor.bam']]
# Creates the read counts file
with open(os.path.join(work_dir, 'readcounts.txt'), 'w') as readcounts_file:
docker_call(tool='bam-readcount', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], outfile=readcounts_file,
tool_version=somaticsniper_options['bam_readcount']['version'])
# Run fpfilter.pl
parameters = ['fpfilter.pl',
'--snp-file', input_files['input.vcf'] + '.SNPfilter',
'--readcount-file', docker_path(readcounts_file.name)]
# Creates input.vcf.SNPfilter.fp_pass and input.vcf.SNPfilter.fp_fail
docker_call(tool='somaticsniper-addons', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], tool_version=somaticsniper_options['version'])
# Run highconfidence.pl
parameters = ['highconfidence.pl',
'--snp-file', input_files['input.vcf'] + '.SNPfilter.fp_pass']
# Creates input.vcf.SNPfilter.fp_pass.hc
docker_call(tool='somaticsniper-addons', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], tool_version=somaticsniper_options['version'])
outfile = job.fileStore.writeGlobalFile(os.path.join(os.getcwd(),
'input.vcf.SNPfilter.fp_pass.hc'))
job.fileStore.logToMaster('Filtered SomaticSniper for %s successfully' %
univ_options['patient'])
return outfile | python | def filter_somaticsniper(job, tumor_bam, somaticsniper_output, tumor_pileup, univ_options,
somaticsniper_options):
"""
Filter SomaticSniper calls.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param toil.fileStore.FileID somaticsniper_output: SomaticSniper output vcf
:param toil.fileStore.FileID tumor_pileup: Pileup generated for the tumor bam
:param dict univ_options: Dict of universal options used by almost all tools
:param dict somaticsniper_options: Options specific to SomaticSniper
:returns: fsID for the filtered genome-level vcf
:rtype: toil.fileStore.FileID
"""
work_dir = os.getcwd()
input_files = {
'tumor.bam': tumor_bam['tumor_dna_fix_pg_sorted.bam'],
'tumor.bam.bai': tumor_bam['tumor_dna_fix_pg_sorted.bam.bai'],
'input.vcf': somaticsniper_output,
'pileup.txt': tumor_pileup,
'genome.fa.tar.gz': somaticsniper_options['genome_fasta'],
'genome.fa.fai.tar.gz': somaticsniper_options['genome_fai']}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
for key in ('genome.fa', 'genome.fa.fai'):
input_files[key] = untargz(input_files[key + '.tar.gz'], work_dir)
input_files = {key: docker_path(path) for key, path in input_files.items()}
# Run snpfilter.pl
parameters = ['snpfilter.pl',
'--snp-file', input_files['input.vcf'],
'--indel-file', input_files['pileup.txt']]
# Creates /data/input.vcf.SNPfilter
docker_call(tool='somaticsniper-addons', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], tool_version=somaticsniper_options['version'])
# Run prepare_for_readcount.pl
parameters = ['prepare_for_readcount.pl',
'--snp-file', input_files['input.vcf'] + '.SNPfilter']
# Creates /data/input.vcf.SNPfilter.pos
docker_call(tool='somaticsniper-addons', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], tool_version=somaticsniper_options['version'])
# Run bam-readcount
parameters = ['-b', '15',
'-f', input_files['genome.fa'],
'-l', input_files['input.vcf'] + '.SNPfilter.pos',
'-w', '1',
input_files['tumor.bam']]
# Creates the read counts file
with open(os.path.join(work_dir, 'readcounts.txt'), 'w') as readcounts_file:
docker_call(tool='bam-readcount', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], outfile=readcounts_file,
tool_version=somaticsniper_options['bam_readcount']['version'])
# Run fpfilter.pl
parameters = ['fpfilter.pl',
'--snp-file', input_files['input.vcf'] + '.SNPfilter',
'--readcount-file', docker_path(readcounts_file.name)]
# Creates input.vcf.SNPfilter.fp_pass and input.vcf.SNPfilter.fp_fail
docker_call(tool='somaticsniper-addons', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], tool_version=somaticsniper_options['version'])
# Run highconfidence.pl
parameters = ['highconfidence.pl',
'--snp-file', input_files['input.vcf'] + '.SNPfilter.fp_pass']
# Creates input.vcf.SNPfilter.fp_pass.hc
docker_call(tool='somaticsniper-addons', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], tool_version=somaticsniper_options['version'])
outfile = job.fileStore.writeGlobalFile(os.path.join(os.getcwd(),
'input.vcf.SNPfilter.fp_pass.hc'))
job.fileStore.logToMaster('Filtered SomaticSniper for %s successfully' %
univ_options['patient'])
return outfile | [
"def",
"filter_somaticsniper",
"(",
"job",
",",
"tumor_bam",
",",
"somaticsniper_output",
",",
"tumor_pileup",
",",
"univ_options",
",",
"somaticsniper_options",
")",
":",
"work_dir",
"=",
"os",
".",
"getcwd",
"(",
")",
"input_files",
"=",
"{",
"'tumor.bam'",
":",
"tumor_bam",
"[",
"'tumor_dna_fix_pg_sorted.bam'",
"]",
",",
"'tumor.bam.bai'",
":",
"tumor_bam",
"[",
"'tumor_dna_fix_pg_sorted.bam.bai'",
"]",
",",
"'input.vcf'",
":",
"somaticsniper_output",
",",
"'pileup.txt'",
":",
"tumor_pileup",
",",
"'genome.fa.tar.gz'",
":",
"somaticsniper_options",
"[",
"'genome_fasta'",
"]",
",",
"'genome.fa.fai.tar.gz'",
":",
"somaticsniper_options",
"[",
"'genome_fai'",
"]",
"}",
"input_files",
"=",
"get_files_from_filestore",
"(",
"job",
",",
"input_files",
",",
"work_dir",
",",
"docker",
"=",
"False",
")",
"for",
"key",
"in",
"(",
"'genome.fa'",
",",
"'genome.fa.fai'",
")",
":",
"input_files",
"[",
"key",
"]",
"=",
"untargz",
"(",
"input_files",
"[",
"key",
"+",
"'.tar.gz'",
"]",
",",
"work_dir",
")",
"input_files",
"=",
"{",
"key",
":",
"docker_path",
"(",
"path",
")",
"for",
"key",
",",
"path",
"in",
"input_files",
".",
"items",
"(",
")",
"}",
"parameters",
"=",
"[",
"'snpfilter.pl'",
",",
"'--snp-file'",
",",
"input_files",
"[",
"'input.vcf'",
"]",
",",
"'--indel-file'",
",",
"input_files",
"[",
"'pileup.txt'",
"]",
"]",
"docker_call",
"(",
"tool",
"=",
"'somaticsniper-addons'",
",",
"tool_parameters",
"=",
"parameters",
",",
"work_dir",
"=",
"work_dir",
",",
"dockerhub",
"=",
"univ_options",
"[",
"'dockerhub'",
"]",
",",
"tool_version",
"=",
"somaticsniper_options",
"[",
"'version'",
"]",
")",
"parameters",
"=",
"[",
"'prepare_for_readcount.pl'",
",",
"'--snp-file'",
",",
"input_files",
"[",
"'input.vcf'",
"]",
"+",
"'.SNPfilter'",
"]",
"docker_call",
"(",
"tool",
"=",
"'somaticsniper-addons'",
",",
"tool_parameters",
"=",
"parameters",
",",
"work_dir",
"=",
"work_dir",
",",
"dockerhub",
"=",
"univ_options",
"[",
"'dockerhub'",
"]",
",",
"tool_version",
"=",
"somaticsniper_options",
"[",
"'version'",
"]",
")",
"parameters",
"=",
"[",
"'-b'",
",",
"'15'",
",",
"'-f'",
",",
"input_files",
"[",
"'genome.fa'",
"]",
",",
"'-l'",
",",
"input_files",
"[",
"'input.vcf'",
"]",
"+",
"'.SNPfilter.pos'",
",",
"'-w'",
",",
"'1'",
",",
"input_files",
"[",
"'tumor.bam'",
"]",
"]",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'readcounts.txt'",
")",
",",
"'w'",
")",
"as",
"readcounts_file",
":",
"docker_call",
"(",
"tool",
"=",
"'bam-readcount'",
",",
"tool_parameters",
"=",
"parameters",
",",
"work_dir",
"=",
"work_dir",
",",
"dockerhub",
"=",
"univ_options",
"[",
"'dockerhub'",
"]",
",",
"outfile",
"=",
"readcounts_file",
",",
"tool_version",
"=",
"somaticsniper_options",
"[",
"'bam_readcount'",
"]",
"[",
"'version'",
"]",
")",
"parameters",
"=",
"[",
"'fpfilter.pl'",
",",
"'--snp-file'",
",",
"input_files",
"[",
"'input.vcf'",
"]",
"+",
"'.SNPfilter'",
",",
"'--readcount-file'",
",",
"docker_path",
"(",
"readcounts_file",
".",
"name",
")",
"]",
"docker_call",
"(",
"tool",
"=",
"'somaticsniper-addons'",
",",
"tool_parameters",
"=",
"parameters",
",",
"work_dir",
"=",
"work_dir",
",",
"dockerhub",
"=",
"univ_options",
"[",
"'dockerhub'",
"]",
",",
"tool_version",
"=",
"somaticsniper_options",
"[",
"'version'",
"]",
")",
"parameters",
"=",
"[",
"'highconfidence.pl'",
",",
"'--snp-file'",
",",
"input_files",
"[",
"'input.vcf'",
"]",
"+",
"'.SNPfilter.fp_pass'",
"]",
"docker_call",
"(",
"tool",
"=",
"'somaticsniper-addons'",
",",
"tool_parameters",
"=",
"parameters",
",",
"work_dir",
"=",
"work_dir",
",",
"dockerhub",
"=",
"univ_options",
"[",
"'dockerhub'",
"]",
",",
"tool_version",
"=",
"somaticsniper_options",
"[",
"'version'",
"]",
")",
"outfile",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"getcwd",
"(",
")",
",",
"'input.vcf.SNPfilter.fp_pass.hc'",
")",
")",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Filtered SomaticSniper for %s successfully'",
"%",
"univ_options",
"[",
"'patient'",
"]",
")",
"return",
"outfile"
] | Filter SomaticSniper calls.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param toil.fileStore.FileID somaticsniper_output: SomaticSniper output vcf
:param toil.fileStore.FileID tumor_pileup: Pileup generated for the tumor bam
:param dict univ_options: Dict of universal options used by almost all tools
:param dict somaticsniper_options: Options specific to SomaticSniper
:returns: fsID for the filtered genome-level vcf
:rtype: toil.fileStore.FileID | [
"Filter",
"SomaticSniper",
"calls",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/somaticsniper.py#L168-L243 | train |
BD2KGenomics/protect | src/protect/mutation_calling/somaticsniper.py | run_pileup | def run_pileup(job, tumor_bam, univ_options, somaticsniper_options):
"""
Runs a samtools pileup on the tumor bam.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict somaticsniper_options: Options specific to SomaticSniper
:return: fsID for the pileup file
:rtype: toil.fileStore.FileID
"""
work_dir = os.getcwd()
input_files = {
'tumor.bam': tumor_bam['tumor_dna_fix_pg_sorted.bam'],
'tumor.bam.bai': tumor_bam['tumor_dna_fix_pg_sorted.bam.bai'],
'genome.fa.tar.gz': somaticsniper_options['genome_fasta'],
'genome.fa.fai.tar.gz': somaticsniper_options['genome_fai']}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
for key in ('genome.fa', 'genome.fa.fai'):
input_files[key] = untargz(input_files[key + '.tar.gz'], work_dir)
input_files = {key: docker_path(path) for key, path in input_files.items()}
parameters = ['pileup',
'-cvi',
'-f', docker_path(input_files['genome.fa']),
docker_path(input_files['tumor.bam'])]
with open(os.path.join(work_dir, 'pileup.txt'), 'w') as pileup_file:
docker_call(tool='samtools', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], outfile=pileup_file,
tool_version=somaticsniper_options['samtools']['version'])
outfile = job.fileStore.writeGlobalFile(pileup_file.name)
job.fileStore.logToMaster('Ran samtools pileup on %s successfully' % univ_options['patient'])
return outfile | python | def run_pileup(job, tumor_bam, univ_options, somaticsniper_options):
"""
Runs a samtools pileup on the tumor bam.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict somaticsniper_options: Options specific to SomaticSniper
:return: fsID for the pileup file
:rtype: toil.fileStore.FileID
"""
work_dir = os.getcwd()
input_files = {
'tumor.bam': tumor_bam['tumor_dna_fix_pg_sorted.bam'],
'tumor.bam.bai': tumor_bam['tumor_dna_fix_pg_sorted.bam.bai'],
'genome.fa.tar.gz': somaticsniper_options['genome_fasta'],
'genome.fa.fai.tar.gz': somaticsniper_options['genome_fai']}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
for key in ('genome.fa', 'genome.fa.fai'):
input_files[key] = untargz(input_files[key + '.tar.gz'], work_dir)
input_files = {key: docker_path(path) for key, path in input_files.items()}
parameters = ['pileup',
'-cvi',
'-f', docker_path(input_files['genome.fa']),
docker_path(input_files['tumor.bam'])]
with open(os.path.join(work_dir, 'pileup.txt'), 'w') as pileup_file:
docker_call(tool='samtools', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], outfile=pileup_file,
tool_version=somaticsniper_options['samtools']['version'])
outfile = job.fileStore.writeGlobalFile(pileup_file.name)
job.fileStore.logToMaster('Ran samtools pileup on %s successfully' % univ_options['patient'])
return outfile | [
"def",
"run_pileup",
"(",
"job",
",",
"tumor_bam",
",",
"univ_options",
",",
"somaticsniper_options",
")",
":",
"work_dir",
"=",
"os",
".",
"getcwd",
"(",
")",
"input_files",
"=",
"{",
"'tumor.bam'",
":",
"tumor_bam",
"[",
"'tumor_dna_fix_pg_sorted.bam'",
"]",
",",
"'tumor.bam.bai'",
":",
"tumor_bam",
"[",
"'tumor_dna_fix_pg_sorted.bam.bai'",
"]",
",",
"'genome.fa.tar.gz'",
":",
"somaticsniper_options",
"[",
"'genome_fasta'",
"]",
",",
"'genome.fa.fai.tar.gz'",
":",
"somaticsniper_options",
"[",
"'genome_fai'",
"]",
"}",
"input_files",
"=",
"get_files_from_filestore",
"(",
"job",
",",
"input_files",
",",
"work_dir",
",",
"docker",
"=",
"False",
")",
"for",
"key",
"in",
"(",
"'genome.fa'",
",",
"'genome.fa.fai'",
")",
":",
"input_files",
"[",
"key",
"]",
"=",
"untargz",
"(",
"input_files",
"[",
"key",
"+",
"'.tar.gz'",
"]",
",",
"work_dir",
")",
"input_files",
"=",
"{",
"key",
":",
"docker_path",
"(",
"path",
")",
"for",
"key",
",",
"path",
"in",
"input_files",
".",
"items",
"(",
")",
"}",
"parameters",
"=",
"[",
"'pileup'",
",",
"'-cvi'",
",",
"'-f'",
",",
"docker_path",
"(",
"input_files",
"[",
"'genome.fa'",
"]",
")",
",",
"docker_path",
"(",
"input_files",
"[",
"'tumor.bam'",
"]",
")",
"]",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'pileup.txt'",
")",
",",
"'w'",
")",
"as",
"pileup_file",
":",
"docker_call",
"(",
"tool",
"=",
"'samtools'",
",",
"tool_parameters",
"=",
"parameters",
",",
"work_dir",
"=",
"work_dir",
",",
"dockerhub",
"=",
"univ_options",
"[",
"'dockerhub'",
"]",
",",
"outfile",
"=",
"pileup_file",
",",
"tool_version",
"=",
"somaticsniper_options",
"[",
"'samtools'",
"]",
"[",
"'version'",
"]",
")",
"outfile",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"pileup_file",
".",
"name",
")",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Ran samtools pileup on %s successfully'",
"%",
"univ_options",
"[",
"'patient'",
"]",
")",
"return",
"outfile"
] | Runs a samtools pileup on the tumor bam.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict somaticsniper_options: Options specific to SomaticSniper
:return: fsID for the pileup file
:rtype: toil.fileStore.FileID | [
"Runs",
"a",
"samtools",
"pileup",
"on",
"the",
"tumor",
"bam",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/somaticsniper.py#L261-L294 | train |
Subsets and Splits