repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
sequence
docstring
stringlengths
3
17.3k
docstring_tokens
sequence
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
OpenHydrology/floodestimation
floodestimation/parsers.py
FehFileParser.parse
def parse(self, file_name): """ Parse entire file and return relevant object. :param file_name: File path :type file_name: str :return: Parsed object """ self.object = self.parsed_class() with open(file_name, encoding='utf-8') as f: self.parse_str(f.read()) return self.object
python
def parse(self, file_name): """ Parse entire file and return relevant object. :param file_name: File path :type file_name: str :return: Parsed object """ self.object = self.parsed_class() with open(file_name, encoding='utf-8') as f: self.parse_str(f.read()) return self.object
[ "def", "parse", "(", "self", ",", "file_name", ")", ":", "self", ".", "object", "=", "self", ".", "parsed_class", "(", ")", "with", "open", "(", "file_name", ",", "encoding", "=", "'utf-8'", ")", "as", "f", ":", "self", ".", "parse_str", "(", "f", ".", "read", "(", ")", ")", "return", "self", ".", "object" ]
Parse entire file and return relevant object. :param file_name: File path :type file_name: str :return: Parsed object
[ "Parse", "entire", "file", "and", "return", "relevant", "object", "." ]
782da7c5abd1348923129efe89fb70003ebb088c
https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/parsers.py#L95-L106
train
staticdev/django-pagination-bootstrap
pagination_bootstrap/paginator.py
FinitePage.has_next
def has_next(self): """ Checks for one more item than last on this page. """ try: next_item = self.paginator.object_list[self.paginator.per_page] except IndexError: return False return True
python
def has_next(self): """ Checks for one more item than last on this page. """ try: next_item = self.paginator.object_list[self.paginator.per_page] except IndexError: return False return True
[ "def", "has_next", "(", "self", ")", ":", "try", ":", "next_item", "=", "self", ".", "paginator", ".", "object_list", "[", "self", ".", "paginator", ".", "per_page", "]", "except", "IndexError", ":", "return", "False", "return", "True" ]
Checks for one more item than last on this page.
[ "Checks", "for", "one", "more", "item", "than", "last", "on", "this", "page", "." ]
b4bf8352a364b223babbc5f33e14ecabd82c0886
https://github.com/staticdev/django-pagination-bootstrap/blob/b4bf8352a364b223babbc5f33e14ecabd82c0886/pagination_bootstrap/paginator.py#L157-L165
train
acutesoftware/AIKIF
scripts/examples/puzzle_missions_canninballs.py
parse_miss_cann
def parse_miss_cann(node, m, c): """ extracts names from the node to get counts of miss + cann on both sides """ if node[2]: m1 = node[0] m2 = m-node[0] c1 = node[1] c2 = c-node[1] else: m1=m-node[0] m2=node[0] c1=c-node[1] c2=node[1] return m1, c1, m2, c2
python
def parse_miss_cann(node, m, c): """ extracts names from the node to get counts of miss + cann on both sides """ if node[2]: m1 = node[0] m2 = m-node[0] c1 = node[1] c2 = c-node[1] else: m1=m-node[0] m2=node[0] c1=c-node[1] c2=node[1] return m1, c1, m2, c2
[ "def", "parse_miss_cann", "(", "node", ",", "m", ",", "c", ")", ":", "if", "node", "[", "2", "]", ":", "m1", "=", "node", "[", "0", "]", "m2", "=", "m", "-", "node", "[", "0", "]", "c1", "=", "node", "[", "1", "]", "c2", "=", "c", "-", "node", "[", "1", "]", "else", ":", "m1", "=", "m", "-", "node", "[", "0", "]", "m2", "=", "node", "[", "0", "]", "c1", "=", "c", "-", "node", "[", "1", "]", "c2", "=", "node", "[", "1", "]", "return", "m1", ",", "c1", ",", "m2", ",", "c2" ]
extracts names from the node to get counts of miss + cann on both sides
[ "extracts", "names", "from", "the", "node", "to", "get", "counts", "of", "miss", "+", "cann", "on", "both", "sides" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/scripts/examples/puzzle_missions_canninballs.py#L167-L183
train
acutesoftware/AIKIF
scripts/examples/puzzle_missions_canninballs.py
solve
def solve(m,c): """ run the algorithm to find the path list """ G={ (m,c,1):[] } frontier=[ (m,c,1) ] # 1 as boat starts on left bank while len(frontier) > 0: hold=list(frontier) for node in hold: newnode=[] frontier.remove(node) newnode.extend(pick_next_boat_trip(node, m,c, frontier)) for neighbor in newnode: if neighbor not in G: G[node].append(neighbor) G[neighbor]=[node] frontier.append(neighbor) return mod_plan.find_path_BFS(G,(m,c,1),(0,0,0))
python
def solve(m,c): """ run the algorithm to find the path list """ G={ (m,c,1):[] } frontier=[ (m,c,1) ] # 1 as boat starts on left bank while len(frontier) > 0: hold=list(frontier) for node in hold: newnode=[] frontier.remove(node) newnode.extend(pick_next_boat_trip(node, m,c, frontier)) for neighbor in newnode: if neighbor not in G: G[node].append(neighbor) G[neighbor]=[node] frontier.append(neighbor) return mod_plan.find_path_BFS(G,(m,c,1),(0,0,0))
[ "def", "solve", "(", "m", ",", "c", ")", ":", "G", "=", "{", "(", "m", ",", "c", ",", "1", ")", ":", "[", "]", "}", "frontier", "=", "[", "(", "m", ",", "c", ",", "1", ")", "]", "while", "len", "(", "frontier", ")", ">", "0", ":", "hold", "=", "list", "(", "frontier", ")", "for", "node", "in", "hold", ":", "newnode", "=", "[", "]", "frontier", ".", "remove", "(", "node", ")", "newnode", ".", "extend", "(", "pick_next_boat_trip", "(", "node", ",", "m", ",", "c", ",", "frontier", ")", ")", "for", "neighbor", "in", "newnode", ":", "if", "neighbor", "not", "in", "G", ":", "G", "[", "node", "]", ".", "append", "(", "neighbor", ")", "G", "[", "neighbor", "]", "=", "[", "node", "]", "frontier", ".", "append", "(", "neighbor", ")", "return", "mod_plan", ".", "find_path_BFS", "(", "G", ",", "(", "m", ",", "c", ",", "1", ")", ",", "(", "0", ",", "0", ",", "0", ")", ")" ]
run the algorithm to find the path list
[ "run", "the", "algorithm", "to", "find", "the", "path", "list" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/scripts/examples/puzzle_missions_canninballs.py#L225-L242
train
acutesoftware/AIKIF
aikif/dataTools/cls_sql_code_generator.py
SQLCodeGenerator.create_script_fact
def create_script_fact(self): """ appends the CREATE TABLE, index etc to self.ddl_text """ self.ddl_text += '---------------------------------------------\n' self.ddl_text += '-- CREATE Fact Table - ' + self.fact_table + '\n' self.ddl_text += '---------------------------------------------\n' self.ddl_text += 'DROP TABLE ' + self.fact_table + ' CASCADE CONSTRAINTS;\n' self.ddl_text += 'CREATE TABLE ' + self.fact_table + ' (\n' self.ddl_text += ' '.join([col + ' VARCHAR2(200), \n' for col in self.col_list]) self.ddl_text += ' ' + self.date_updated_col + ' DATE \n' # + src_table + '; \n' self.ddl_text += ');\n'
python
def create_script_fact(self): """ appends the CREATE TABLE, index etc to self.ddl_text """ self.ddl_text += '---------------------------------------------\n' self.ddl_text += '-- CREATE Fact Table - ' + self.fact_table + '\n' self.ddl_text += '---------------------------------------------\n' self.ddl_text += 'DROP TABLE ' + self.fact_table + ' CASCADE CONSTRAINTS;\n' self.ddl_text += 'CREATE TABLE ' + self.fact_table + ' (\n' self.ddl_text += ' '.join([col + ' VARCHAR2(200), \n' for col in self.col_list]) self.ddl_text += ' ' + self.date_updated_col + ' DATE \n' # + src_table + '; \n' self.ddl_text += ');\n'
[ "def", "create_script_fact", "(", "self", ")", ":", "self", ".", "ddl_text", "+=", "'---------------------------------------------\\n'", "self", ".", "ddl_text", "+=", "'-- CREATE Fact Table - '", "+", "self", ".", "fact_table", "+", "'\\n'", "self", ".", "ddl_text", "+=", "'---------------------------------------------\\n'", "self", ".", "ddl_text", "+=", "'DROP TABLE '", "+", "self", ".", "fact_table", "+", "' CASCADE CONSTRAINTS;\\n'", "self", ".", "ddl_text", "+=", "'CREATE TABLE '", "+", "self", ".", "fact_table", "+", "' (\\n'", "self", ".", "ddl_text", "+=", "' '", ".", "join", "(", "[", "col", "+", "' VARCHAR2(200), \\n'", "for", "col", "in", "self", ".", "col_list", "]", ")", "self", ".", "ddl_text", "+=", "' '", "+", "self", ".", "date_updated_col", "+", "' DATE \\n'", "self", ".", "ddl_text", "+=", "');\\n'" ]
appends the CREATE TABLE, index etc to self.ddl_text
[ "appends", "the", "CREATE", "TABLE", "index", "etc", "to", "self", ".", "ddl_text" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/dataTools/cls_sql_code_generator.py#L60-L71
train
acutesoftware/AIKIF
aikif/dataTools/cls_sql_code_generator.py
SQLCodeGenerator.create_script_staging_table
def create_script_staging_table(self, output_table, col_list): """ appends the CREATE TABLE, index etc to another table """ self.ddl_text += '---------------------------------------------\n' self.ddl_text += '-- CREATE Staging Table - ' + output_table + '\n' self.ddl_text += '---------------------------------------------\n' self.ddl_text += 'DROP TABLE ' + output_table + ' CASCADE CONSTRAINTS;\n' self.ddl_text += 'CREATE TABLE ' + output_table + ' (\n ' self.ddl_text += ' '.join([col + ' VARCHAR2(200), \n' for col in col_list]) self.ddl_text += ' ' + self.date_updated_col + ' DATE \n' # + src_table + '; \n' self.ddl_text += ');\n'
python
def create_script_staging_table(self, output_table, col_list): """ appends the CREATE TABLE, index etc to another table """ self.ddl_text += '---------------------------------------------\n' self.ddl_text += '-- CREATE Staging Table - ' + output_table + '\n' self.ddl_text += '---------------------------------------------\n' self.ddl_text += 'DROP TABLE ' + output_table + ' CASCADE CONSTRAINTS;\n' self.ddl_text += 'CREATE TABLE ' + output_table + ' (\n ' self.ddl_text += ' '.join([col + ' VARCHAR2(200), \n' for col in col_list]) self.ddl_text += ' ' + self.date_updated_col + ' DATE \n' # + src_table + '; \n' self.ddl_text += ');\n'
[ "def", "create_script_staging_table", "(", "self", ",", "output_table", ",", "col_list", ")", ":", "self", ".", "ddl_text", "+=", "'---------------------------------------------\\n'", "self", ".", "ddl_text", "+=", "'-- CREATE Staging Table - '", "+", "output_table", "+", "'\\n'", "self", ".", "ddl_text", "+=", "'---------------------------------------------\\n'", "self", ".", "ddl_text", "+=", "'DROP TABLE '", "+", "output_table", "+", "' CASCADE CONSTRAINTS;\\n'", "self", ".", "ddl_text", "+=", "'CREATE TABLE '", "+", "output_table", "+", "' (\\n '", "self", ".", "ddl_text", "+=", "' '", ".", "join", "(", "[", "col", "+", "' VARCHAR2(200), \\n'", "for", "col", "in", "col_list", "]", ")", "self", ".", "ddl_text", "+=", "' '", "+", "self", ".", "date_updated_col", "+", "' DATE \\n'", "self", ".", "ddl_text", "+=", "');\\n'" ]
appends the CREATE TABLE, index etc to another table
[ "appends", "the", "CREATE", "TABLE", "index", "etc", "to", "another", "table" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/dataTools/cls_sql_code_generator.py#L73-L84
train
acutesoftware/AIKIF
scripts/examples/table_compare.py
distinct_values
def distinct_values(t_old, t_new): """ for all columns, check which values are not in the other table """ res = [] res.append([' -- NOT IN check -- ']) for new_col in t_new.header: dist_new = t_new.get_distinct_values_from_cols([new_col]) #print('NEW Distinct values for ' + new_col + ' = ' + str(dist_new)) for old_col in t_old.header: if old_col == new_col: dist_old = t_old.get_distinct_values_from_cols([old_col]) #print('OLD Distinct values for ' + old_col + ' = ' + str(dist_old)) # Now compare the old and new values to see what is different not_in_new = [x for x in dist_old[0] if x not in dist_new[0]] if not_in_new != []: #print(old_col + ' not_in_new = ' , not_in_new) res.append(['Not in New', old_col, not_in_new]) not_in_old = [x for x in dist_new[0] if x not in dist_old[0]] if not_in_old != []: #print(new_col + ' not_in_old = ' , not_in_old) res.append(['Not in Old', new_col, not_in_old]) return sorted(res)
python
def distinct_values(t_old, t_new): """ for all columns, check which values are not in the other table """ res = [] res.append([' -- NOT IN check -- ']) for new_col in t_new.header: dist_new = t_new.get_distinct_values_from_cols([new_col]) #print('NEW Distinct values for ' + new_col + ' = ' + str(dist_new)) for old_col in t_old.header: if old_col == new_col: dist_old = t_old.get_distinct_values_from_cols([old_col]) #print('OLD Distinct values for ' + old_col + ' = ' + str(dist_old)) # Now compare the old and new values to see what is different not_in_new = [x for x in dist_old[0] if x not in dist_new[0]] if not_in_new != []: #print(old_col + ' not_in_new = ' , not_in_new) res.append(['Not in New', old_col, not_in_new]) not_in_old = [x for x in dist_new[0] if x not in dist_old[0]] if not_in_old != []: #print(new_col + ' not_in_old = ' , not_in_old) res.append(['Not in Old', new_col, not_in_old]) return sorted(res)
[ "def", "distinct_values", "(", "t_old", ",", "t_new", ")", ":", "res", "=", "[", "]", "res", ".", "append", "(", "[", "' -- NOT IN check -- '", "]", ")", "for", "new_col", "in", "t_new", ".", "header", ":", "dist_new", "=", "t_new", ".", "get_distinct_values_from_cols", "(", "[", "new_col", "]", ")", "for", "old_col", "in", "t_old", ".", "header", ":", "if", "old_col", "==", "new_col", ":", "dist_old", "=", "t_old", ".", "get_distinct_values_from_cols", "(", "[", "old_col", "]", ")", "not_in_new", "=", "[", "x", "for", "x", "in", "dist_old", "[", "0", "]", "if", "x", "not", "in", "dist_new", "[", "0", "]", "]", "if", "not_in_new", "!=", "[", "]", ":", "res", ".", "append", "(", "[", "'Not in New'", ",", "old_col", ",", "not_in_new", "]", ")", "not_in_old", "=", "[", "x", "for", "x", "in", "dist_new", "[", "0", "]", "if", "x", "not", "in", "dist_old", "[", "0", "]", "]", "if", "not_in_old", "!=", "[", "]", ":", "res", ".", "append", "(", "[", "'Not in Old'", ",", "new_col", ",", "not_in_old", "]", ")", "return", "sorted", "(", "res", ")" ]
for all columns, check which values are not in the other table
[ "for", "all", "columns", "check", "which", "values", "are", "not", "in", "the", "other", "table" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/scripts/examples/table_compare.py#L123-L151
train
acutesoftware/AIKIF
aikif/web_app/web_aikif.py
aikif_web_menu
def aikif_web_menu(cur=''): """ returns the web page header containing standard AIKIF top level web menu """ pgeHdg = '' pgeBlurb = '' if cur == '': cur = 'Home' txt = get_header(cur) #"<div id=top_menu>" txt += '<div id = "container">\n' txt += ' <div id = "header">\n' txt += ' <!-- Banner -->\n' txt += ' <img src = "' + os.path.join('/static','aikif_banner.jpg') + '" alt="AIKIF Banner"/>\n' txt += ' <ul id = "menu_list">\n' for m in menu: if m[1] == cur: txt += ' <LI id="top_menu_selected"><a href=' + m[0] + '>' + m[1] + '</a></li>\n' pgeHdg = m[1] try: pgeBlurb = m[2] except Exception: pass else: txt += ' <LI id="top_menu"><a href=' + m[0] + '>' + m[1] + '</a></li>\n' txt += " </ul>\n </div>\n\n" txt += '<H1>AIKIF ' + pgeHdg + '</H1>\n' txt += '<H4>' + pgeBlurb + '</H4>\n' return txt
python
def aikif_web_menu(cur=''): """ returns the web page header containing standard AIKIF top level web menu """ pgeHdg = '' pgeBlurb = '' if cur == '': cur = 'Home' txt = get_header(cur) #"<div id=top_menu>" txt += '<div id = "container">\n' txt += ' <div id = "header">\n' txt += ' <!-- Banner -->\n' txt += ' <img src = "' + os.path.join('/static','aikif_banner.jpg') + '" alt="AIKIF Banner"/>\n' txt += ' <ul id = "menu_list">\n' for m in menu: if m[1] == cur: txt += ' <LI id="top_menu_selected"><a href=' + m[0] + '>' + m[1] + '</a></li>\n' pgeHdg = m[1] try: pgeBlurb = m[2] except Exception: pass else: txt += ' <LI id="top_menu"><a href=' + m[0] + '>' + m[1] + '</a></li>\n' txt += " </ul>\n </div>\n\n" txt += '<H1>AIKIF ' + pgeHdg + '</H1>\n' txt += '<H4>' + pgeBlurb + '</H4>\n' return txt
[ "def", "aikif_web_menu", "(", "cur", "=", "''", ")", ":", "pgeHdg", "=", "''", "pgeBlurb", "=", "''", "if", "cur", "==", "''", ":", "cur", "=", "'Home'", "txt", "=", "get_header", "(", "cur", ")", "txt", "+=", "'<div id = \"container\">\\n'", "txt", "+=", "' <div id = \"header\">\\n'", "txt", "+=", "' <!-- Banner ", "txt", "+=", "' <img src = \"'", "+", "os", ".", "path", ".", "join", "(", "'/static'", ",", "'aikif_banner.jpg'", ")", "+", "'\" alt=\"AIKIF Banner\"/>\\n'", "txt", "+=", "' <ul id = \"menu_list\">\\n'", "for", "m", "in", "menu", ":", "if", "m", "[", "1", "]", "==", "cur", ":", "txt", "+=", "' <LI id=\"top_menu_selected\"><a href='", "+", "m", "[", "0", "]", "+", "'>'", "+", "m", "[", "1", "]", "+", "'</a></li>\\n'", "pgeHdg", "=", "m", "[", "1", "]", "try", ":", "pgeBlurb", "=", "m", "[", "2", "]", "except", "Exception", ":", "pass", "else", ":", "txt", "+=", "' <LI id=\"top_menu\"><a href='", "+", "m", "[", "0", "]", "+", "'>'", "+", "m", "[", "1", "]", "+", "'</a></li>\\n'", "txt", "+=", "\" </ul>\\n </div>\\n\\n\"", "txt", "+=", "'<H1>AIKIF '", "+", "pgeHdg", "+", "'</H1>\\n'", "txt", "+=", "'<H4>'", "+", "pgeBlurb", "+", "'</H4>\\n'", "return", "txt" ]
returns the web page header containing standard AIKIF top level web menu
[ "returns", "the", "web", "page", "header", "containing", "standard", "AIKIF", "top", "level", "web", "menu" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/web_app/web_aikif.py#L183-L208
train
acutesoftware/AIKIF
scripts/res_core_data_NOTES.py
main
def main(): """ This generates the research document based on the results of the various programs and includes RST imports for introduction and summary """ print("Generating research notes...") if os.path.exists(fname): os.remove(fname) append_rst('================================================\n') append_rst('Comparison of Information Aggregation Techniques\n') append_rst('================================================\n\n') append_rst('.. contents::\n\n') # import header append_rst(open('res_core_data_HEADER.rst', 'r').read()) append_rst(res_core_data_mthd1.get_method()) append_rst(res_core_data_mthd2.get_method()) # call programs append_rst('Results\n') append_rst('=====================================\n') for dat in data_files: append_rst('\nData File : ' + dat + '\n---------------------------------------\n\n') res_core_data_mthd1.get_results(fname, dat) res_core_data_mthd2.get_results(fname, dat) # import footer append_rst(open('res_core_data_FOOTER.rst', 'r').read()) print("Done!")
python
def main(): """ This generates the research document based on the results of the various programs and includes RST imports for introduction and summary """ print("Generating research notes...") if os.path.exists(fname): os.remove(fname) append_rst('================================================\n') append_rst('Comparison of Information Aggregation Techniques\n') append_rst('================================================\n\n') append_rst('.. contents::\n\n') # import header append_rst(open('res_core_data_HEADER.rst', 'r').read()) append_rst(res_core_data_mthd1.get_method()) append_rst(res_core_data_mthd2.get_method()) # call programs append_rst('Results\n') append_rst('=====================================\n') for dat in data_files: append_rst('\nData File : ' + dat + '\n---------------------------------------\n\n') res_core_data_mthd1.get_results(fname, dat) res_core_data_mthd2.get_results(fname, dat) # import footer append_rst(open('res_core_data_FOOTER.rst', 'r').read()) print("Done!")
[ "def", "main", "(", ")", ":", "print", "(", "\"Generating research notes...\"", ")", "if", "os", ".", "path", ".", "exists", "(", "fname", ")", ":", "os", ".", "remove", "(", "fname", ")", "append_rst", "(", "'================================================\\n'", ")", "append_rst", "(", "'Comparison of Information Aggregation Techniques\\n'", ")", "append_rst", "(", "'================================================\\n\\n'", ")", "append_rst", "(", "'.. contents::\\n\\n'", ")", "append_rst", "(", "open", "(", "'res_core_data_HEADER.rst'", ",", "'r'", ")", ".", "read", "(", ")", ")", "append_rst", "(", "res_core_data_mthd1", ".", "get_method", "(", ")", ")", "append_rst", "(", "res_core_data_mthd2", ".", "get_method", "(", ")", ")", "append_rst", "(", "'Results\\n'", ")", "append_rst", "(", "'=====================================\\n'", ")", "for", "dat", "in", "data_files", ":", "append_rst", "(", "'\\nData File : '", "+", "dat", "+", "'\\n---------------------------------------\\n\\n'", ")", "res_core_data_mthd1", ".", "get_results", "(", "fname", ",", "dat", ")", "res_core_data_mthd2", ".", "get_results", "(", "fname", ",", "dat", ")", "append_rst", "(", "open", "(", "'res_core_data_FOOTER.rst'", ",", "'r'", ")", ".", "read", "(", ")", ")", "print", "(", "\"Done!\"", ")" ]
This generates the research document based on the results of the various programs and includes RST imports for introduction and summary
[ "This", "generates", "the", "research", "document", "based", "on", "the", "results", "of", "the", "various", "programs", "and", "includes", "RST", "imports", "for", "introduction", "and", "summary" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/scripts/res_core_data_NOTES.py#L20-L50
train
acutesoftware/AIKIF
aikif/knowledge.py
RawData.find
def find(self, txt): """ returns a list of records containing text """ result = [] for d in self.data: if txt in d: result.append(d) return result
python
def find(self, txt): """ returns a list of records containing text """ result = [] for d in self.data: if txt in d: result.append(d) return result
[ "def", "find", "(", "self", ",", "txt", ")", ":", "result", "=", "[", "]", "for", "d", "in", "self", ".", "data", ":", "if", "txt", "in", "d", ":", "result", ".", "append", "(", "d", ")", "return", "result" ]
returns a list of records containing text
[ "returns", "a", "list", "of", "records", "containing", "text" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/knowledge.py#L56-L64
train
Nachtfeuer/pipeline
spline/tools/report/collector.py
CollectorUpdate.schema_complete
def schema_complete(): """Schema for data in CollectorUpdate.""" return Schema({ 'stage': And(str, len), 'timestamp': int, 'status': And(str, lambda s: s in ['started', 'succeeded', 'failed']), # optional matrix Optional('matrix', default='default'): And(str, len), # optional information Optional('information', default={}): { Optional(Regex(r'([a-z][_a-z]*)')): object } })
python
def schema_complete(): """Schema for data in CollectorUpdate.""" return Schema({ 'stage': And(str, len), 'timestamp': int, 'status': And(str, lambda s: s in ['started', 'succeeded', 'failed']), # optional matrix Optional('matrix', default='default'): And(str, len), # optional information Optional('information', default={}): { Optional(Regex(r'([a-z][_a-z]*)')): object } })
[ "def", "schema_complete", "(", ")", ":", "return", "Schema", "(", "{", "'stage'", ":", "And", "(", "str", ",", "len", ")", ",", "'timestamp'", ":", "int", ",", "'status'", ":", "And", "(", "str", ",", "lambda", "s", ":", "s", "in", "[", "'started'", ",", "'succeeded'", ",", "'failed'", "]", ")", ",", "Optional", "(", "'matrix'", ",", "default", "=", "'default'", ")", ":", "And", "(", "str", ",", "len", ")", ",", "Optional", "(", "'information'", ",", "default", "=", "{", "}", ")", ":", "{", "Optional", "(", "Regex", "(", "r'([a-z][_a-z]*)'", ")", ")", ":", "object", "}", "}", ")" ]
Schema for data in CollectorUpdate.
[ "Schema", "for", "data", "in", "CollectorUpdate", "." ]
04ca18c4e95e4349532bb45b768206393e1f2c13
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/report/collector.py#L56-L68
train
Nachtfeuer/pipeline
spline/tools/report/collector.py
CollectorStage.schema_event_items
def schema_event_items(): """Schema for event items.""" return { 'timestamp': And(int, lambda n: n > 0), Optional('information', default={}): { Optional(Regex(r'([a-z][_a-z]*)')): object } }
python
def schema_event_items(): """Schema for event items.""" return { 'timestamp': And(int, lambda n: n > 0), Optional('information', default={}): { Optional(Regex(r'([a-z][_a-z]*)')): object } }
[ "def", "schema_event_items", "(", ")", ":", "return", "{", "'timestamp'", ":", "And", "(", "int", ",", "lambda", "n", ":", "n", ">", "0", ")", ",", "Optional", "(", "'information'", ",", "default", "=", "{", "}", ")", ":", "{", "Optional", "(", "Regex", "(", "r'([a-z][_a-z]*)'", ")", ")", ":", "object", "}", "}" ]
Schema for event items.
[ "Schema", "for", "event", "items", "." ]
04ca18c4e95e4349532bb45b768206393e1f2c13
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/report/collector.py#L115-L122
train
Nachtfeuer/pipeline
spline/tools/report/collector.py
CollectorStage.schema_complete
def schema_complete(): """Schema for data in CollectorStage.""" return Schema({ 'stage': And(str, len), 'status': And(str, lambda s: s in ['started', 'succeeded', 'failed']), Optional('events', default=[]): And(len, [CollectorStage.schema_event_items()]) })
python
def schema_complete(): """Schema for data in CollectorStage.""" return Schema({ 'stage': And(str, len), 'status': And(str, lambda s: s in ['started', 'succeeded', 'failed']), Optional('events', default=[]): And(len, [CollectorStage.schema_event_items()]) })
[ "def", "schema_complete", "(", ")", ":", "return", "Schema", "(", "{", "'stage'", ":", "And", "(", "str", ",", "len", ")", ",", "'status'", ":", "And", "(", "str", ",", "lambda", "s", ":", "s", "in", "[", "'started'", ",", "'succeeded'", ",", "'failed'", "]", ")", ",", "Optional", "(", "'events'", ",", "default", "=", "[", "]", ")", ":", "And", "(", "len", ",", "[", "CollectorStage", ".", "schema_event_items", "(", ")", "]", ")", "}", ")" ]
Schema for data in CollectorStage.
[ "Schema", "for", "data", "in", "CollectorStage", "." ]
04ca18c4e95e4349532bb45b768206393e1f2c13
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/report/collector.py#L125-L131
train
Nachtfeuer/pipeline
spline/tools/report/collector.py
CollectorStage.add
def add(self, timestamp, information): """ Add event information. Args: timestamp (int): event timestamp. information (dict): event information. Raises: RuntimeError: when validation of parameters has failed. """ try: item = Schema(CollectorStage.schema_event_items()).validate({ 'timestamp': timestamp, 'information': information }) self.events.append(item) except SchemaError as exception: Logger.get_logger(__name__).error(exception) raise RuntimeError(str(exception))
python
def add(self, timestamp, information): """ Add event information. Args: timestamp (int): event timestamp. information (dict): event information. Raises: RuntimeError: when validation of parameters has failed. """ try: item = Schema(CollectorStage.schema_event_items()).validate({ 'timestamp': timestamp, 'information': information }) self.events.append(item) except SchemaError as exception: Logger.get_logger(__name__).error(exception) raise RuntimeError(str(exception))
[ "def", "add", "(", "self", ",", "timestamp", ",", "information", ")", ":", "try", ":", "item", "=", "Schema", "(", "CollectorStage", ".", "schema_event_items", "(", ")", ")", ".", "validate", "(", "{", "'timestamp'", ":", "timestamp", ",", "'information'", ":", "information", "}", ")", "self", ".", "events", ".", "append", "(", "item", ")", "except", "SchemaError", "as", "exception", ":", "Logger", ".", "get_logger", "(", "__name__", ")", ".", "error", "(", "exception", ")", "raise", "RuntimeError", "(", "str", "(", "exception", ")", ")" ]
Add event information. Args: timestamp (int): event timestamp. information (dict): event information. Raises: RuntimeError: when validation of parameters has failed.
[ "Add", "event", "information", "." ]
04ca18c4e95e4349532bb45b768206393e1f2c13
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/report/collector.py#L152-L170
train
Nachtfeuer/pipeline
spline/tools/report/collector.py
CollectorStage.duration
def duration(self): """ Calculate how long the stage took. Returns: float: (current) duration of the stage """ duration = 0.0 if len(self.events) > 0: first = datetime.fromtimestamp(self.events[0]['timestamp']) last = datetime.fromtimestamp(self.events[-1]['timestamp']) duration = (last - first).total_seconds() return duration
python
def duration(self): """ Calculate how long the stage took. Returns: float: (current) duration of the stage """ duration = 0.0 if len(self.events) > 0: first = datetime.fromtimestamp(self.events[0]['timestamp']) last = datetime.fromtimestamp(self.events[-1]['timestamp']) duration = (last - first).total_seconds() return duration
[ "def", "duration", "(", "self", ")", ":", "duration", "=", "0.0", "if", "len", "(", "self", ".", "events", ")", ">", "0", ":", "first", "=", "datetime", ".", "fromtimestamp", "(", "self", ".", "events", "[", "0", "]", "[", "'timestamp'", "]", ")", "last", "=", "datetime", ".", "fromtimestamp", "(", "self", ".", "events", "[", "-", "1", "]", "[", "'timestamp'", "]", ")", "duration", "=", "(", "last", "-", "first", ")", ".", "total_seconds", "(", ")", "return", "duration" ]
Calculate how long the stage took. Returns: float: (current) duration of the stage
[ "Calculate", "how", "long", "the", "stage", "took", "." ]
04ca18c4e95e4349532bb45b768206393e1f2c13
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/report/collector.py#L172-L184
train
Nachtfeuer/pipeline
spline/tools/report/collector.py
Store.count_stages
def count_stages(self, matrix_name): """ Number of registered stages for given matrix name. Parameters: matrix_name (str): name of the matrix Returns: int: number of reported stages for given matrix name. """ return len(self.data[matrix_name]) if matrix_name in self.data else 0
python
def count_stages(self, matrix_name): """ Number of registered stages for given matrix name. Parameters: matrix_name (str): name of the matrix Returns: int: number of reported stages for given matrix name. """ return len(self.data[matrix_name]) if matrix_name in self.data else 0
[ "def", "count_stages", "(", "self", ",", "matrix_name", ")", ":", "return", "len", "(", "self", ".", "data", "[", "matrix_name", "]", ")", "if", "matrix_name", "in", "self", ".", "data", "else", "0" ]
Number of registered stages for given matrix name. Parameters: matrix_name (str): name of the matrix Returns: int: number of reported stages for given matrix name.
[ "Number", "of", "registered", "stages", "for", "given", "matrix", "name", "." ]
04ca18c4e95e4349532bb45b768206393e1f2c13
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/report/collector.py#L229-L239
train
Nachtfeuer/pipeline
spline/tools/report/collector.py
Store.get_stage
def get_stage(self, matrix_name, stage_name): """ Get Stage of a concrete matrix. Attributes: matrix_name (str): name of the matrix stage_name (str): name of the stage. Returns: CollectorStage: when stage has been found or None. """ found_stage = None if matrix_name in self.data: result = Select(self.data[matrix_name]).where( lambda entry: entry.stage == stage_name).build() found_stage = result[0] if len(result) > 0 else None return found_stage
python
def get_stage(self, matrix_name, stage_name): """ Get Stage of a concrete matrix. Attributes: matrix_name (str): name of the matrix stage_name (str): name of the stage. Returns: CollectorStage: when stage has been found or None. """ found_stage = None if matrix_name in self.data: result = Select(self.data[matrix_name]).where( lambda entry: entry.stage == stage_name).build() found_stage = result[0] if len(result) > 0 else None return found_stage
[ "def", "get_stage", "(", "self", ",", "matrix_name", ",", "stage_name", ")", ":", "found_stage", "=", "None", "if", "matrix_name", "in", "self", ".", "data", ":", "result", "=", "Select", "(", "self", ".", "data", "[", "matrix_name", "]", ")", ".", "where", "(", "lambda", "entry", ":", "entry", ".", "stage", "==", "stage_name", ")", ".", "build", "(", ")", "found_stage", "=", "result", "[", "0", "]", "if", "len", "(", "result", ")", ">", "0", "else", "None", "return", "found_stage" ]
Get Stage of a concrete matrix. Attributes: matrix_name (str): name of the matrix stage_name (str): name of the stage. Returns: CollectorStage: when stage has been found or None.
[ "Get", "Stage", "of", "a", "concrete", "matrix", "." ]
04ca18c4e95e4349532bb45b768206393e1f2c13
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/report/collector.py#L245-L261
train
Nachtfeuer/pipeline
spline/tools/report/collector.py
Store.get_duration
def get_duration(self, matrix_name): """ Get duration for a concrete matrix. Args: matrix_name (str): name of the Matrix. Returns: float: duration of concrete matrix in seconds. """ duration = 0.0 if matrix_name in self.data: duration = sum([stage.duration() for stage in self.data[matrix_name]]) return duration
python
def get_duration(self, matrix_name): """ Get duration for a concrete matrix. Args: matrix_name (str): name of the Matrix. Returns: float: duration of concrete matrix in seconds. """ duration = 0.0 if matrix_name in self.data: duration = sum([stage.duration() for stage in self.data[matrix_name]]) return duration
[ "def", "get_duration", "(", "self", ",", "matrix_name", ")", ":", "duration", "=", "0.0", "if", "matrix_name", "in", "self", ".", "data", ":", "duration", "=", "sum", "(", "[", "stage", ".", "duration", "(", ")", "for", "stage", "in", "self", ".", "data", "[", "matrix_name", "]", "]", ")", "return", "duration" ]
Get duration for a concrete matrix. Args: matrix_name (str): name of the Matrix. Returns: float: duration of concrete matrix in seconds.
[ "Get", "duration", "for", "a", "concrete", "matrix", "." ]
04ca18c4e95e4349532bb45b768206393e1f2c13
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/report/collector.py#L263-L276
train
Nachtfeuer/pipeline
spline/tools/report/collector.py
Store.update
def update(self, item): """ Add a collector item. Args: item (CollectorUpdate): event data like stage, timestampe and status. """ if item.matrix not in self.data: self.data[item.matrix] = [] result = Select(self.data[item.matrix]).where( lambda entry: entry.stage == item.stage).build() if len(result) > 0: stage = result[0] stage.status = item.status stage.add(item.timestamp, item.information) else: stage = CollectorStage(stage=item.stage, status=item.status) stage.add(item.timestamp, item.information) self.data[item.matrix].append(stage)
python
def update(self, item): """ Add a collector item. Args: item (CollectorUpdate): event data like stage, timestampe and status. """ if item.matrix not in self.data: self.data[item.matrix] = [] result = Select(self.data[item.matrix]).where( lambda entry: entry.stage == item.stage).build() if len(result) > 0: stage = result[0] stage.status = item.status stage.add(item.timestamp, item.information) else: stage = CollectorStage(stage=item.stage, status=item.status) stage.add(item.timestamp, item.information) self.data[item.matrix].append(stage)
[ "def", "update", "(", "self", ",", "item", ")", ":", "if", "item", ".", "matrix", "not", "in", "self", ".", "data", ":", "self", ".", "data", "[", "item", ".", "matrix", "]", "=", "[", "]", "result", "=", "Select", "(", "self", ".", "data", "[", "item", ".", "matrix", "]", ")", ".", "where", "(", "lambda", "entry", ":", "entry", ".", "stage", "==", "item", ".", "stage", ")", ".", "build", "(", ")", "if", "len", "(", "result", ")", ">", "0", ":", "stage", "=", "result", "[", "0", "]", "stage", ".", "status", "=", "item", ".", "status", "stage", ".", "add", "(", "item", ".", "timestamp", ",", "item", ".", "information", ")", "else", ":", "stage", "=", "CollectorStage", "(", "stage", "=", "item", ".", "stage", ",", "status", "=", "item", ".", "status", ")", "stage", ".", "add", "(", "item", ".", "timestamp", ",", "item", ".", "information", ")", "self", ".", "data", "[", "item", ".", "matrix", "]", ".", "append", "(", "stage", ")" ]
Add a collector item. Args: item (CollectorUpdate): event data like stage, timestampe and status.
[ "Add", "a", "collector", "item", "." ]
04ca18c4e95e4349532bb45b768206393e1f2c13
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/report/collector.py#L278-L298
train
Nachtfeuer/pipeline
spline/tools/report/collector.py
Collector.run
def run(self): """Collector main loop.""" while True: data = self.queue.get() if data is None: Logger.get_logger(__name__).info("Stopping collector process ...") break # updating the report data self.store.update(data) # writing the report generate(self.store, 'html', os.getcwd())
python
def run(self): """Collector main loop.""" while True: data = self.queue.get() if data is None: Logger.get_logger(__name__).info("Stopping collector process ...") break # updating the report data self.store.update(data) # writing the report generate(self.store, 'html', os.getcwd())
[ "def", "run", "(", "self", ")", ":", "while", "True", ":", "data", "=", "self", ".", "queue", ".", "get", "(", ")", "if", "data", "is", "None", ":", "Logger", ".", "get_logger", "(", "__name__", ")", ".", "info", "(", "\"Stopping collector process ...\"", ")", "break", "self", ".", "store", ".", "update", "(", "data", ")", "generate", "(", "self", ".", "store", ",", "'html'", ",", "os", ".", "getcwd", "(", ")", ")" ]
Collector main loop.
[ "Collector", "main", "loop", "." ]
04ca18c4e95e4349532bb45b768206393e1f2c13
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/report/collector.py#L310-L321
train
acutesoftware/AIKIF
scripts/examples/gui_view_world.py
read_map
def read_map(fname): """ reads a saved text file to list """ lst = [] with open(fname, "r") as f: for line in f: lst.append(line) return lst
python
def read_map(fname): """ reads a saved text file to list """ lst = [] with open(fname, "r") as f: for line in f: lst.append(line) return lst
[ "def", "read_map", "(", "fname", ")", ":", "lst", "=", "[", "]", "with", "open", "(", "fname", ",", "\"r\"", ")", "as", "f", ":", "for", "line", "in", "f", ":", "lst", ".", "append", "(", "line", ")", "return", "lst" ]
reads a saved text file to list
[ "reads", "a", "saved", "text", "file", "to", "list" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/scripts/examples/gui_view_world.py#L39-L47
train
acutesoftware/AIKIF
scripts/examples/gui_view_world.py
gui_view_tk.show_grid_from_file
def show_grid_from_file(self, fname): """ reads a saved grid file and paints it on the canvas """ with open(fname, "r") as f: for y, row in enumerate(f): for x, val in enumerate(row): self.draw_cell(y, x, val)
python
def show_grid_from_file(self, fname): """ reads a saved grid file and paints it on the canvas """ with open(fname, "r") as f: for y, row in enumerate(f): for x, val in enumerate(row): self.draw_cell(y, x, val)
[ "def", "show_grid_from_file", "(", "self", ",", "fname", ")", ":", "with", "open", "(", "fname", ",", "\"r\"", ")", "as", "f", ":", "for", "y", ",", "row", "in", "enumerate", "(", "f", ")", ":", "for", "x", ",", "val", "in", "enumerate", "(", "row", ")", ":", "self", ".", "draw_cell", "(", "y", ",", "x", ",", "val", ")" ]
reads a saved grid file and paints it on the canvas
[ "reads", "a", "saved", "grid", "file", "and", "paints", "it", "on", "the", "canvas" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/scripts/examples/gui_view_world.py#L98-L105
train
acutesoftware/AIKIF
scripts/examples/gui_view_world.py
gui_view_tk.draw_cell
def draw_cell(self, row, col, val): """ draw a cell as position row, col containing val """ if val == 'T': self.paint_target(row,col) elif val == '#': self.paint_block(row,col) elif val == 'X': self.paint_hill(row,col) elif val == '.': self.paint_land(row,col) elif val in ['A']: self.paint_agent_location(row,col) elif val in ['1','2','3','4','5','6','7','8','9']: self.paint_agent_trail(row,col, val)
python
def draw_cell(self, row, col, val): """ draw a cell as position row, col containing val """ if val == 'T': self.paint_target(row,col) elif val == '#': self.paint_block(row,col) elif val == 'X': self.paint_hill(row,col) elif val == '.': self.paint_land(row,col) elif val in ['A']: self.paint_agent_location(row,col) elif val in ['1','2','3','4','5','6','7','8','9']: self.paint_agent_trail(row,col, val)
[ "def", "draw_cell", "(", "self", ",", "row", ",", "col", ",", "val", ")", ":", "if", "val", "==", "'T'", ":", "self", ".", "paint_target", "(", "row", ",", "col", ")", "elif", "val", "==", "'#'", ":", "self", ".", "paint_block", "(", "row", ",", "col", ")", "elif", "val", "==", "'X'", ":", "self", ".", "paint_hill", "(", "row", ",", "col", ")", "elif", "val", "==", "'.'", ":", "self", ".", "paint_land", "(", "row", ",", "col", ")", "elif", "val", "in", "[", "'A'", "]", ":", "self", ".", "paint_agent_location", "(", "row", ",", "col", ")", "elif", "val", "in", "[", "'1'", ",", "'2'", ",", "'3'", ",", "'4'", ",", "'5'", ",", "'6'", ",", "'7'", ",", "'8'", ",", "'9'", "]", ":", "self", ".", "paint_agent_trail", "(", "row", ",", "col", ",", "val", ")" ]
draw a cell as position row, col containing val
[ "draw", "a", "cell", "as", "position", "row", "col", "containing", "val" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/scripts/examples/gui_view_world.py#L108-L123
train
acutesoftware/AIKIF
scripts/examples/gui_view_world.py
gui_view_tk.paint_agent_trail
def paint_agent_trail(self, y, x, val): """ paint an agent trail as ONE pixel to allow for multiple agent trails to be seen in the same cell """ for j in range(1,self.cell_height-1): for i in range(1,self.cell_width-1): self.img.put(self.agent_color(val), (x*self.cell_width+i, y*self.cell_height+j))
python
def paint_agent_trail(self, y, x, val): """ paint an agent trail as ONE pixel to allow for multiple agent trails to be seen in the same cell """ for j in range(1,self.cell_height-1): for i in range(1,self.cell_width-1): self.img.put(self.agent_color(val), (x*self.cell_width+i, y*self.cell_height+j))
[ "def", "paint_agent_trail", "(", "self", ",", "y", ",", "x", ",", "val", ")", ":", "for", "j", "in", "range", "(", "1", ",", "self", ".", "cell_height", "-", "1", ")", ":", "for", "i", "in", "range", "(", "1", ",", "self", ".", "cell_width", "-", "1", ")", ":", "self", ".", "img", ".", "put", "(", "self", ".", "agent_color", "(", "val", ")", ",", "(", "x", "*", "self", ".", "cell_width", "+", "i", ",", "y", "*", "self", ".", "cell_height", "+", "j", ")", ")" ]
paint an agent trail as ONE pixel to allow for multiple agent trails to be seen in the same cell
[ "paint", "an", "agent", "trail", "as", "ONE", "pixel", "to", "allow", "for", "multiple", "agent", "trails", "to", "be", "seen", "in", "the", "same", "cell" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/scripts/examples/gui_view_world.py#L150-L157
train
acutesoftware/AIKIF
scripts/examples/gui_view_world.py
gui_view_tk.agent_color
def agent_color(self, val): """ gets a colour for agent 0 - 9 """ if val == '0': colour = 'blue' elif val == '1': colour = 'navy' elif val == '2': colour = 'firebrick' elif val == '3': colour = 'blue' elif val == '4': colour = 'blue2' elif val == '5': colour = 'blue4' elif val == '6': colour = 'gray22' elif val == '7': colour = 'gray57' elif val == '8': colour = 'red4' elif val == '9': colour = 'red3' return colour
python
def agent_color(self, val): """ gets a colour for agent 0 - 9 """ if val == '0': colour = 'blue' elif val == '1': colour = 'navy' elif val == '2': colour = 'firebrick' elif val == '3': colour = 'blue' elif val == '4': colour = 'blue2' elif val == '5': colour = 'blue4' elif val == '6': colour = 'gray22' elif val == '7': colour = 'gray57' elif val == '8': colour = 'red4' elif val == '9': colour = 'red3' return colour
[ "def", "agent_color", "(", "self", ",", "val", ")", ":", "if", "val", "==", "'0'", ":", "colour", "=", "'blue'", "elif", "val", "==", "'1'", ":", "colour", "=", "'navy'", "elif", "val", "==", "'2'", ":", "colour", "=", "'firebrick'", "elif", "val", "==", "'3'", ":", "colour", "=", "'blue'", "elif", "val", "==", "'4'", ":", "colour", "=", "'blue2'", "elif", "val", "==", "'5'", ":", "colour", "=", "'blue4'", "elif", "val", "==", "'6'", ":", "colour", "=", "'gray22'", "elif", "val", "==", "'7'", ":", "colour", "=", "'gray57'", "elif", "val", "==", "'8'", ":", "colour", "=", "'red4'", "elif", "val", "==", "'9'", ":", "colour", "=", "'red3'", "return", "colour" ]
gets a colour for agent 0 - 9
[ "gets", "a", "colour", "for", "agent", "0", "-", "9" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/scripts/examples/gui_view_world.py#L167-L194
train
acutesoftware/AIKIF
scripts/examples/happiness_solver.py
create_random_population
def create_random_population(num=100): """ create a list of people with randomly generated names and stats """ people = [] for _ in range(num): nme = 'blah' tax_min = random.randint(1,40)/100 tax_max = tax_min + random.randint(1,40)/100 tradition = random.randint(1,100)/100 equity = random.randint(1,100)/100 pers = mod_hap_env.Person(nme, {'tax_min':tax_min, 'tax_max':tax_max, 'tradition':tradition, 'equity':equity}) people.append(pers) print(pers) return people
python
def create_random_population(num=100): """ create a list of people with randomly generated names and stats """ people = [] for _ in range(num): nme = 'blah' tax_min = random.randint(1,40)/100 tax_max = tax_min + random.randint(1,40)/100 tradition = random.randint(1,100)/100 equity = random.randint(1,100)/100 pers = mod_hap_env.Person(nme, {'tax_min':tax_min, 'tax_max':tax_max, 'tradition':tradition, 'equity':equity}) people.append(pers) print(pers) return people
[ "def", "create_random_population", "(", "num", "=", "100", ")", ":", "people", "=", "[", "]", "for", "_", "in", "range", "(", "num", ")", ":", "nme", "=", "'blah'", "tax_min", "=", "random", ".", "randint", "(", "1", ",", "40", ")", "/", "100", "tax_max", "=", "tax_min", "+", "random", ".", "randint", "(", "1", ",", "40", ")", "/", "100", "tradition", "=", "random", ".", "randint", "(", "1", ",", "100", ")", "/", "100", "equity", "=", "random", ".", "randint", "(", "1", ",", "100", ")", "/", "100", "pers", "=", "mod_hap_env", ".", "Person", "(", "nme", ",", "{", "'tax_min'", ":", "tax_min", ",", "'tax_max'", ":", "tax_max", ",", "'tradition'", ":", "tradition", ",", "'equity'", ":", "equity", "}", ")", "people", ".", "append", "(", "pers", ")", "print", "(", "pers", ")", "return", "people" ]
create a list of people with randomly generated names and stats
[ "create", "a", "list", "of", "people", "with", "randomly", "generated", "names", "and", "stats" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/scripts/examples/happiness_solver.py#L29-L44
train
Nachtfeuer/pipeline
spline/pipeline.py
Pipeline.cleanup
def cleanup(self): """Run cleanup script of pipeline when hook is configured.""" if self.data.hooks and len(self.data.hooks.cleanup) > 0: env = self.data.env_list[0].copy() env.update({'PIPELINE_RESULT': 'SUCCESS', 'PIPELINE_SHELL_EXIT_CODE': '0'}) config = ShellConfig(script=self.data.hooks.cleanup, model=self.model, env=env, dry_run=self.options.dry_run, debug=self.options.debug, strict=self.options.strict, temporary_scripts_path=self.options.temporary_scripts_path) cleanup_shell = Bash(config) for line in cleanup_shell.process(): yield line
python
def cleanup(self): """Run cleanup script of pipeline when hook is configured.""" if self.data.hooks and len(self.data.hooks.cleanup) > 0: env = self.data.env_list[0].copy() env.update({'PIPELINE_RESULT': 'SUCCESS', 'PIPELINE_SHELL_EXIT_CODE': '0'}) config = ShellConfig(script=self.data.hooks.cleanup, model=self.model, env=env, dry_run=self.options.dry_run, debug=self.options.debug, strict=self.options.strict, temporary_scripts_path=self.options.temporary_scripts_path) cleanup_shell = Bash(config) for line in cleanup_shell.process(): yield line
[ "def", "cleanup", "(", "self", ")", ":", "if", "self", ".", "data", ".", "hooks", "and", "len", "(", "self", ".", "data", ".", "hooks", ".", "cleanup", ")", ">", "0", ":", "env", "=", "self", ".", "data", ".", "env_list", "[", "0", "]", ".", "copy", "(", ")", "env", ".", "update", "(", "{", "'PIPELINE_RESULT'", ":", "'SUCCESS'", ",", "'PIPELINE_SHELL_EXIT_CODE'", ":", "'0'", "}", ")", "config", "=", "ShellConfig", "(", "script", "=", "self", ".", "data", ".", "hooks", ".", "cleanup", ",", "model", "=", "self", ".", "model", ",", "env", "=", "env", ",", "dry_run", "=", "self", ".", "options", ".", "dry_run", ",", "debug", "=", "self", ".", "options", ".", "debug", ",", "strict", "=", "self", ".", "options", ".", "strict", ",", "temporary_scripts_path", "=", "self", ".", "options", ".", "temporary_scripts_path", ")", "cleanup_shell", "=", "Bash", "(", "config", ")", "for", "line", "in", "cleanup_shell", ".", "process", "(", ")", ":", "yield", "line" ]
Run cleanup script of pipeline when hook is configured.
[ "Run", "cleanup", "script", "of", "pipeline", "when", "hook", "is", "configured", "." ]
04ca18c4e95e4349532bb45b768206393e1f2c13
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/pipeline.py#L68-L79
train
Nachtfeuer/pipeline
spline/pipeline.py
Pipeline.process
def process(self, pipeline): """Processing the whole pipeline definition.""" output = [] for entry in pipeline: key = list(entry.keys())[0] # an environment block can be repeated if key == "env": self.data.env_list[0].update(entry[key]) self.logger.debug("Updating environment at level 0 with %s", self.data.env_list[0]) continue # after validation it can't be anything else but a stage # and the title is inside the round brackets: stage = Stage(self, re.match(r"stage\((?P<title>.*)\)", key).group("title")) result = stage.process(entry[key]) output += result['output'] if not result['success']: return {'success': False, 'output': output} # logging the output of the cleanup shell when registered for line in self.cleanup(): output.append(line) self.logger.info(" | %s", line) self.event.succeeded() return {'success': True, 'output': output}
python
def process(self, pipeline): """Processing the whole pipeline definition.""" output = [] for entry in pipeline: key = list(entry.keys())[0] # an environment block can be repeated if key == "env": self.data.env_list[0].update(entry[key]) self.logger.debug("Updating environment at level 0 with %s", self.data.env_list[0]) continue # after validation it can't be anything else but a stage # and the title is inside the round brackets: stage = Stage(self, re.match(r"stage\((?P<title>.*)\)", key).group("title")) result = stage.process(entry[key]) output += result['output'] if not result['success']: return {'success': False, 'output': output} # logging the output of the cleanup shell when registered for line in self.cleanup(): output.append(line) self.logger.info(" | %s", line) self.event.succeeded() return {'success': True, 'output': output}
[ "def", "process", "(", "self", ",", "pipeline", ")", ":", "output", "=", "[", "]", "for", "entry", "in", "pipeline", ":", "key", "=", "list", "(", "entry", ".", "keys", "(", ")", ")", "[", "0", "]", "if", "key", "==", "\"env\"", ":", "self", ".", "data", ".", "env_list", "[", "0", "]", ".", "update", "(", "entry", "[", "key", "]", ")", "self", ".", "logger", ".", "debug", "(", "\"Updating environment at level 0 with %s\"", ",", "self", ".", "data", ".", "env_list", "[", "0", "]", ")", "continue", "stage", "=", "Stage", "(", "self", ",", "re", ".", "match", "(", "r\"stage\\((?P<title>.*)\\)\"", ",", "key", ")", ".", "group", "(", "\"title\"", ")", ")", "result", "=", "stage", ".", "process", "(", "entry", "[", "key", "]", ")", "output", "+=", "result", "[", "'output'", "]", "if", "not", "result", "[", "'success'", "]", ":", "return", "{", "'success'", ":", "False", ",", "'output'", ":", "output", "}", "for", "line", "in", "self", ".", "cleanup", "(", ")", ":", "output", ".", "append", "(", "line", ")", "self", ".", "logger", ".", "info", "(", "\" | %s\"", ",", "line", ")", "self", ".", "event", ".", "succeeded", "(", ")", "return", "{", "'success'", ":", "True", ",", "'output'", ":", "output", "}" ]
Processing the whole pipeline definition.
[ "Processing", "the", "whole", "pipeline", "definition", "." ]
04ca18c4e95e4349532bb45b768206393e1f2c13
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/pipeline.py#L81-L107
train
acutesoftware/AIKIF
scripts/AI_CLI.py
AICLI.process
def process(self, txt, mode): """ Top level function to process the command, mainly depending on mode. This should work by using the function name defined in all_commamnds """ result = '' if mode == 'ADD': # already in add mode, so add data if txt in self.all_commands['cmd'][0]: self.show_output('Returning to Command mode') mode = 'COMMAND' self.prompt = '> ' else: self.show_output('Adding Text : ', txt) result = self.cmd_add(txt) elif mode == 'QUERY': if txt in self.all_commands['cmd'][0]: self.show_output('Returning to Command mode') mode = 'COMMAND' self.prompt = '> ' else: self.show_output('Query : ', txt) result = self.cmd_query(txt) else: if txt in self.all_commands['exit'][0]: self.cmd_exit() elif txt in self.all_commands['help'][0]: self.cmd_help() elif txt in self.all_commands['cmd'][0]: result = 'Returning to Command mode' mode = 'COMMAND' self.prompt = '> ' elif txt in self.all_commands['add'][0]: result = 'Entering Add mode' mode = 'ADD' self.prompt = 'ADD > ' elif txt in self.all_commands['query'][0]: result = 'Entering Query mode' mode = 'QUERY' self.prompt = '?? > ' else: result = 'Unknown command - type help for list of commands' return result, mode
python
def process(self, txt, mode): """ Top level function to process the command, mainly depending on mode. This should work by using the function name defined in all_commamnds """ result = '' if mode == 'ADD': # already in add mode, so add data if txt in self.all_commands['cmd'][0]: self.show_output('Returning to Command mode') mode = 'COMMAND' self.prompt = '> ' else: self.show_output('Adding Text : ', txt) result = self.cmd_add(txt) elif mode == 'QUERY': if txt in self.all_commands['cmd'][0]: self.show_output('Returning to Command mode') mode = 'COMMAND' self.prompt = '> ' else: self.show_output('Query : ', txt) result = self.cmd_query(txt) else: if txt in self.all_commands['exit'][0]: self.cmd_exit() elif txt in self.all_commands['help'][0]: self.cmd_help() elif txt in self.all_commands['cmd'][0]: result = 'Returning to Command mode' mode = 'COMMAND' self.prompt = '> ' elif txt in self.all_commands['add'][0]: result = 'Entering Add mode' mode = 'ADD' self.prompt = 'ADD > ' elif txt in self.all_commands['query'][0]: result = 'Entering Query mode' mode = 'QUERY' self.prompt = '?? > ' else: result = 'Unknown command - type help for list of commands' return result, mode
[ "def", "process", "(", "self", ",", "txt", ",", "mode", ")", ":", "result", "=", "''", "if", "mode", "==", "'ADD'", ":", "if", "txt", "in", "self", ".", "all_commands", "[", "'cmd'", "]", "[", "0", "]", ":", "self", ".", "show_output", "(", "'Returning to Command mode'", ")", "mode", "=", "'COMMAND'", "self", ".", "prompt", "=", "'> '", "else", ":", "self", ".", "show_output", "(", "'Adding Text : '", ",", "txt", ")", "result", "=", "self", ".", "cmd_add", "(", "txt", ")", "elif", "mode", "==", "'QUERY'", ":", "if", "txt", "in", "self", ".", "all_commands", "[", "'cmd'", "]", "[", "0", "]", ":", "self", ".", "show_output", "(", "'Returning to Command mode'", ")", "mode", "=", "'COMMAND'", "self", ".", "prompt", "=", "'> '", "else", ":", "self", ".", "show_output", "(", "'Query : '", ",", "txt", ")", "result", "=", "self", ".", "cmd_query", "(", "txt", ")", "else", ":", "if", "txt", "in", "self", ".", "all_commands", "[", "'exit'", "]", "[", "0", "]", ":", "self", ".", "cmd_exit", "(", ")", "elif", "txt", "in", "self", ".", "all_commands", "[", "'help'", "]", "[", "0", "]", ":", "self", ".", "cmd_help", "(", ")", "elif", "txt", "in", "self", ".", "all_commands", "[", "'cmd'", "]", "[", "0", "]", ":", "result", "=", "'Returning to Command mode'", "mode", "=", "'COMMAND'", "self", ".", "prompt", "=", "'> '", "elif", "txt", "in", "self", ".", "all_commands", "[", "'add'", "]", "[", "0", "]", ":", "result", "=", "'Entering Add mode'", "mode", "=", "'ADD'", "self", ".", "prompt", "=", "'ADD > '", "elif", "txt", "in", "self", ".", "all_commands", "[", "'query'", "]", "[", "0", "]", ":", "result", "=", "'Entering Query mode'", "mode", "=", "'QUERY'", "self", ".", "prompt", "=", "'?? > '", "else", ":", "result", "=", "'Unknown command - type help for list of commands'", "return", "result", ",", "mode" ]
Top level function to process the command, mainly depending on mode. This should work by using the function name defined in all_commamnds
[ "Top", "level", "function", "to", "process", "the", "command", "mainly", "depending", "on", "mode", ".", "This", "should", "work", "by", "using", "the", "function", "name", "defined", "in", "all_commamnds" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/scripts/AI_CLI.py#L62-L110
train
acutesoftware/AIKIF
scripts/AI_CLI.py
AICLI.cmd_add
def cmd_add(self, txt): """ Enter add mode - all text entered now will be processed as adding information until cancelled """ self.show_output('Adding ', txt) self.raw.add(txt) print(self.raw) return 'Added ' + txt
python
def cmd_add(self, txt): """ Enter add mode - all text entered now will be processed as adding information until cancelled """ self.show_output('Adding ', txt) self.raw.add(txt) print(self.raw) return 'Added ' + txt
[ "def", "cmd_add", "(", "self", ",", "txt", ")", ":", "self", ".", "show_output", "(", "'Adding '", ",", "txt", ")", "self", ".", "raw", ".", "add", "(", "txt", ")", "print", "(", "self", ".", "raw", ")", "return", "'Added '", "+", "txt" ]
Enter add mode - all text entered now will be processed as adding information until cancelled
[ "Enter", "add", "mode", "-", "all", "text", "entered", "now", "will", "be", "processed", "as", "adding", "information", "until", "cancelled" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/scripts/AI_CLI.py#L135-L143
train
acutesoftware/AIKIF
scripts/AI_CLI.py
AICLI.cmd_query
def cmd_query(self, txt): """ search and query the AIKIF """ self.show_output('Searching for ', txt) res = self.raw.find(txt) for d in res: self.show_output(d) return str(len(res)) + ' results for ' + txt
python
def cmd_query(self, txt): """ search and query the AIKIF """ self.show_output('Searching for ', txt) res = self.raw.find(txt) for d in res: self.show_output(d) return str(len(res)) + ' results for ' + txt
[ "def", "cmd_query", "(", "self", ",", "txt", ")", ":", "self", ".", "show_output", "(", "'Searching for '", ",", "txt", ")", "res", "=", "self", ".", "raw", ".", "find", "(", "txt", ")", "for", "d", "in", "res", ":", "self", ".", "show_output", "(", "d", ")", "return", "str", "(", "len", "(", "res", ")", ")", "+", "' results for '", "+", "txt" ]
search and query the AIKIF
[ "search", "and", "query", "the", "AIKIF" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/scripts/AI_CLI.py#L146-L154
train
herrjemand/flask-fido-u2f
flask_fido_u2f.py
U2F.verify_integrity
def verify_integrity(self): """Verifies that all required functions been injected.""" if not self.__integrity_check: if not self.__appid: raise Exception('U2F_APPID was not defined! Please define it in configuration file.') if self.__facets_enabled and not len(self.__facets_list): raise Exception("""U2F facets been enabled, but U2F facet list is empty. Please either disable facets by setting U2F_FACETS_ENABLED to False. Or add facets list using, by assigning it to U2F_FACETS_LIST. """) # Injection undefined_message = 'U2F {name} handler is not defined! Please import {name} through {method}!' if not self.__get_u2f_devices: raise Exception(undefined_message.format(name='Read', method='@u2f.read')) if not self.__save_u2f_devices: raise Exception(undefined_message.format(name='Save', method='@u2f.save')) if not self.__call_success_enroll: raise Exception(undefined_message.format(name='enroll onSuccess', method='@u2f.enroll_on_success')) if not self.__call_success_sign: raise Exception(undefined_message.format(name='sign onSuccess', method='@u2f.sign_on_success')) self.__integrity_check = True return True
python
def verify_integrity(self): """Verifies that all required functions been injected.""" if not self.__integrity_check: if not self.__appid: raise Exception('U2F_APPID was not defined! Please define it in configuration file.') if self.__facets_enabled and not len(self.__facets_list): raise Exception("""U2F facets been enabled, but U2F facet list is empty. Please either disable facets by setting U2F_FACETS_ENABLED to False. Or add facets list using, by assigning it to U2F_FACETS_LIST. """) # Injection undefined_message = 'U2F {name} handler is not defined! Please import {name} through {method}!' if not self.__get_u2f_devices: raise Exception(undefined_message.format(name='Read', method='@u2f.read')) if not self.__save_u2f_devices: raise Exception(undefined_message.format(name='Save', method='@u2f.save')) if not self.__call_success_enroll: raise Exception(undefined_message.format(name='enroll onSuccess', method='@u2f.enroll_on_success')) if not self.__call_success_sign: raise Exception(undefined_message.format(name='sign onSuccess', method='@u2f.sign_on_success')) self.__integrity_check = True return True
[ "def", "verify_integrity", "(", "self", ")", ":", "if", "not", "self", ".", "__integrity_check", ":", "if", "not", "self", ".", "__appid", ":", "raise", "Exception", "(", "'U2F_APPID was not defined! Please define it in configuration file.'", ")", "if", "self", ".", "__facets_enabled", "and", "not", "len", "(", "self", ".", "__facets_list", ")", ":", "raise", "Exception", "(", ")", "undefined_message", "=", "'U2F {name} handler is not defined! Please import {name} through {method}!'", "if", "not", "self", ".", "__get_u2f_devices", ":", "raise", "Exception", "(", "undefined_message", ".", "format", "(", "name", "=", "'Read'", ",", "method", "=", "'@u2f.read'", ")", ")", "if", "not", "self", ".", "__save_u2f_devices", ":", "raise", "Exception", "(", "undefined_message", ".", "format", "(", "name", "=", "'Save'", ",", "method", "=", "'@u2f.save'", ")", ")", "if", "not", "self", ".", "__call_success_enroll", ":", "raise", "Exception", "(", "undefined_message", ".", "format", "(", "name", "=", "'enroll onSuccess'", ",", "method", "=", "'@u2f.enroll_on_success'", ")", ")", "if", "not", "self", ".", "__call_success_sign", ":", "raise", "Exception", "(", "undefined_message", ".", "format", "(", "name", "=", "'sign onSuccess'", ",", "method", "=", "'@u2f.sign_on_success'", ")", ")", "self", ".", "__integrity_check", "=", "True", "return", "True" ]
Verifies that all required functions been injected.
[ "Verifies", "that", "all", "required", "functions", "been", "injected", "." ]
23acac4cfe285a33411e8a6bf980b3c345b04feb
https://github.com/herrjemand/flask-fido-u2f/blob/23acac4cfe285a33411e8a6bf980b3c345b04feb/flask_fido_u2f.py#L132-L163
train
herrjemand/flask-fido-u2f
flask_fido_u2f.py
U2F.devices
def devices(self): """Manages users enrolled u2f devices""" self.verify_integrity() if session.get('u2f_device_management_authorized', False): if request.method == 'GET': return jsonify(self.get_devices()), 200 elif request.method == 'DELETE': response = self.remove_device(request.json) if response['status'] == 'ok': return jsonify(response), 200 else: return jsonify(response), 404 return jsonify({'status': 'failed', 'error': 'Unauthorized!'}), 401
python
def devices(self): """Manages users enrolled u2f devices""" self.verify_integrity() if session.get('u2f_device_management_authorized', False): if request.method == 'GET': return jsonify(self.get_devices()), 200 elif request.method == 'DELETE': response = self.remove_device(request.json) if response['status'] == 'ok': return jsonify(response), 200 else: return jsonify(response), 404 return jsonify({'status': 'failed', 'error': 'Unauthorized!'}), 401
[ "def", "devices", "(", "self", ")", ":", "self", ".", "verify_integrity", "(", ")", "if", "session", ".", "get", "(", "'u2f_device_management_authorized'", ",", "False", ")", ":", "if", "request", ".", "method", "==", "'GET'", ":", "return", "jsonify", "(", "self", ".", "get_devices", "(", ")", ")", ",", "200", "elif", "request", ".", "method", "==", "'DELETE'", ":", "response", "=", "self", ".", "remove_device", "(", "request", ".", "json", ")", "if", "response", "[", "'status'", "]", "==", "'ok'", ":", "return", "jsonify", "(", "response", ")", ",", "200", "else", ":", "return", "jsonify", "(", "response", ")", ",", "404", "return", "jsonify", "(", "{", "'status'", ":", "'failed'", ",", "'error'", ":", "'Unauthorized!'", "}", ")", ",", "401" ]
Manages users enrolled u2f devices
[ "Manages", "users", "enrolled", "u2f", "devices" ]
23acac4cfe285a33411e8a6bf980b3c345b04feb
https://github.com/herrjemand/flask-fido-u2f/blob/23acac4cfe285a33411e8a6bf980b3c345b04feb/flask_fido_u2f.py#L208-L224
train
herrjemand/flask-fido-u2f
flask_fido_u2f.py
U2F.facets
def facets(self): """Provides facets support. REQUIRES VALID HTTPS!""" self.verify_integrity() if self.__facets_enabled: data = json.dumps({ 'trustedFacets' : [{ 'version': { 'major': 1, 'minor' : 0 }, 'ids': self.__facets_list }] }, sort_keys=True, indent=2, separators=(',', ': ')) mime = 'application/fido.trusted-apps+json' resp = Response(data, mimetype=mime) return resp, 200 else: return jsonify({}), 404
python
def facets(self): """Provides facets support. REQUIRES VALID HTTPS!""" self.verify_integrity() if self.__facets_enabled: data = json.dumps({ 'trustedFacets' : [{ 'version': { 'major': 1, 'minor' : 0 }, 'ids': self.__facets_list }] }, sort_keys=True, indent=2, separators=(',', ': ')) mime = 'application/fido.trusted-apps+json' resp = Response(data, mimetype=mime) return resp, 200 else: return jsonify({}), 404
[ "def", "facets", "(", "self", ")", ":", "self", ".", "verify_integrity", "(", ")", "if", "self", ".", "__facets_enabled", ":", "data", "=", "json", ".", "dumps", "(", "{", "'trustedFacets'", ":", "[", "{", "'version'", ":", "{", "'major'", ":", "1", ",", "'minor'", ":", "0", "}", ",", "'ids'", ":", "self", ".", "__facets_list", "}", "]", "}", ",", "sort_keys", "=", "True", ",", "indent", "=", "2", ",", "separators", "=", "(", "','", ",", "': '", ")", ")", "mime", "=", "'application/fido.trusted-apps+json'", "resp", "=", "Response", "(", "data", ",", "mimetype", "=", "mime", ")", "return", "resp", ",", "200", "else", ":", "return", "jsonify", "(", "{", "}", ")", ",", "404" ]
Provides facets support. REQUIRES VALID HTTPS!
[ "Provides", "facets", "support", ".", "REQUIRES", "VALID", "HTTPS!" ]
23acac4cfe285a33411e8a6bf980b3c345b04feb
https://github.com/herrjemand/flask-fido-u2f/blob/23acac4cfe285a33411e8a6bf980b3c345b04feb/flask_fido_u2f.py#L226-L243
train
herrjemand/flask-fido-u2f
flask_fido_u2f.py
U2F.get_enroll
def get_enroll(self): """Returns new enroll seed""" devices = [DeviceRegistration.wrap(device) for device in self.__get_u2f_devices()] enroll = start_register(self.__appid, devices) enroll['status'] = 'ok' session['_u2f_enroll_'] = enroll.json return enroll
python
def get_enroll(self): """Returns new enroll seed""" devices = [DeviceRegistration.wrap(device) for device in self.__get_u2f_devices()] enroll = start_register(self.__appid, devices) enroll['status'] = 'ok' session['_u2f_enroll_'] = enroll.json return enroll
[ "def", "get_enroll", "(", "self", ")", ":", "devices", "=", "[", "DeviceRegistration", ".", "wrap", "(", "device", ")", "for", "device", "in", "self", ".", "__get_u2f_devices", "(", ")", "]", "enroll", "=", "start_register", "(", "self", ".", "__appid", ",", "devices", ")", "enroll", "[", "'status'", "]", "=", "'ok'", "session", "[", "'_u2f_enroll_'", "]", "=", "enroll", ".", "json", "return", "enroll" ]
Returns new enroll seed
[ "Returns", "new", "enroll", "seed" ]
23acac4cfe285a33411e8a6bf980b3c345b04feb
https://github.com/herrjemand/flask-fido-u2f/blob/23acac4cfe285a33411e8a6bf980b3c345b04feb/flask_fido_u2f.py#L247-L255
train
herrjemand/flask-fido-u2f
flask_fido_u2f.py
U2F.verify_enroll
def verify_enroll(self, response): """Verifies and saves U2F enroll""" seed = session.pop('_u2f_enroll_') try: new_device, cert = complete_register(seed, response, self.__facets_list) except Exception as e: if self.__call_fail_enroll: self.__call_fail_enroll(e) return { 'status' : 'failed', 'error' : 'Invalid key handle!' } finally: pass devices = self.__get_u2f_devices() # Setting new device counter to 0 new_device['counter'] = 0 new_device['index'] = 0 for device in devices: if new_device['index'] <= device['index']: new_device['index'] = device['index'] + 1 devices.append(new_device) self.__save_u2f_devices(devices) self.__call_success_enroll() return {'status': 'ok', 'message': 'Successfully enrolled new U2F device!'}
python
def verify_enroll(self, response): """Verifies and saves U2F enroll""" seed = session.pop('_u2f_enroll_') try: new_device, cert = complete_register(seed, response, self.__facets_list) except Exception as e: if self.__call_fail_enroll: self.__call_fail_enroll(e) return { 'status' : 'failed', 'error' : 'Invalid key handle!' } finally: pass devices = self.__get_u2f_devices() # Setting new device counter to 0 new_device['counter'] = 0 new_device['index'] = 0 for device in devices: if new_device['index'] <= device['index']: new_device['index'] = device['index'] + 1 devices.append(new_device) self.__save_u2f_devices(devices) self.__call_success_enroll() return {'status': 'ok', 'message': 'Successfully enrolled new U2F device!'}
[ "def", "verify_enroll", "(", "self", ",", "response", ")", ":", "seed", "=", "session", ".", "pop", "(", "'_u2f_enroll_'", ")", "try", ":", "new_device", ",", "cert", "=", "complete_register", "(", "seed", ",", "response", ",", "self", ".", "__facets_list", ")", "except", "Exception", "as", "e", ":", "if", "self", ".", "__call_fail_enroll", ":", "self", ".", "__call_fail_enroll", "(", "e", ")", "return", "{", "'status'", ":", "'failed'", ",", "'error'", ":", "'Invalid key handle!'", "}", "finally", ":", "pass", "devices", "=", "self", ".", "__get_u2f_devices", "(", ")", "new_device", "[", "'counter'", "]", "=", "0", "new_device", "[", "'index'", "]", "=", "0", "for", "device", "in", "devices", ":", "if", "new_device", "[", "'index'", "]", "<=", "device", "[", "'index'", "]", ":", "new_device", "[", "'index'", "]", "=", "device", "[", "'index'", "]", "+", "1", "devices", ".", "append", "(", "new_device", ")", "self", ".", "__save_u2f_devices", "(", "devices", ")", "self", ".", "__call_success_enroll", "(", ")", "return", "{", "'status'", ":", "'ok'", ",", "'message'", ":", "'Successfully enrolled new U2F device!'", "}" ]
Verifies and saves U2F enroll
[ "Verifies", "and", "saves", "U2F", "enroll" ]
23acac4cfe285a33411e8a6bf980b3c345b04feb
https://github.com/herrjemand/flask-fido-u2f/blob/23acac4cfe285a33411e8a6bf980b3c345b04feb/flask_fido_u2f.py#L257-L293
train
herrjemand/flask-fido-u2f
flask_fido_u2f.py
U2F.get_signature_challenge
def get_signature_challenge(self): """Returns new signature challenge""" devices = [DeviceRegistration.wrap(device) for device in self.__get_u2f_devices()] if devices == []: return { 'status' : 'failed', 'error' : 'No devices been associated with the account!' } challenge = start_authenticate(devices) challenge['status'] = 'ok' session['_u2f_challenge_'] = challenge.json return challenge
python
def get_signature_challenge(self): """Returns new signature challenge""" devices = [DeviceRegistration.wrap(device) for device in self.__get_u2f_devices()] if devices == []: return { 'status' : 'failed', 'error' : 'No devices been associated with the account!' } challenge = start_authenticate(devices) challenge['status'] = 'ok' session['_u2f_challenge_'] = challenge.json return challenge
[ "def", "get_signature_challenge", "(", "self", ")", ":", "devices", "=", "[", "DeviceRegistration", ".", "wrap", "(", "device", ")", "for", "device", "in", "self", ".", "__get_u2f_devices", "(", ")", "]", "if", "devices", "==", "[", "]", ":", "return", "{", "'status'", ":", "'failed'", ",", "'error'", ":", "'No devices been associated with the account!'", "}", "challenge", "=", "start_authenticate", "(", "devices", ")", "challenge", "[", "'status'", "]", "=", "'ok'", "session", "[", "'_u2f_challenge_'", "]", "=", "challenge", ".", "json", "return", "challenge" ]
Returns new signature challenge
[ "Returns", "new", "signature", "challenge" ]
23acac4cfe285a33411e8a6bf980b3c345b04feb
https://github.com/herrjemand/flask-fido-u2f/blob/23acac4cfe285a33411e8a6bf980b3c345b04feb/flask_fido_u2f.py#L296-L312
train
herrjemand/flask-fido-u2f
flask_fido_u2f.py
U2F.remove_device
def remove_device(self, request): """Removes device specified by id""" devices = self.__get_u2f_devices() for i in range(len(devices)): if devices[i]['keyHandle'] == request['id']: del devices[i] self.__save_u2f_devices(devices) return { 'status' : 'ok', 'message' : 'Successfully deleted your device!' } return { 'status' : 'failed', 'error' : 'No device with such an id been found!' }
python
def remove_device(self, request): """Removes device specified by id""" devices = self.__get_u2f_devices() for i in range(len(devices)): if devices[i]['keyHandle'] == request['id']: del devices[i] self.__save_u2f_devices(devices) return { 'status' : 'ok', 'message' : 'Successfully deleted your device!' } return { 'status' : 'failed', 'error' : 'No device with such an id been found!' }
[ "def", "remove_device", "(", "self", ",", "request", ")", ":", "devices", "=", "self", ".", "__get_u2f_devices", "(", ")", "for", "i", "in", "range", "(", "len", "(", "devices", ")", ")", ":", "if", "devices", "[", "i", "]", "[", "'keyHandle'", "]", "==", "request", "[", "'id'", "]", ":", "del", "devices", "[", "i", "]", "self", ".", "__save_u2f_devices", "(", "devices", ")", "return", "{", "'status'", ":", "'ok'", ",", "'message'", ":", "'Successfully deleted your device!'", "}", "return", "{", "'status'", ":", "'failed'", ",", "'error'", ":", "'No device with such an id been found!'", "}" ]
Removes device specified by id
[ "Removes", "device", "specified", "by", "id" ]
23acac4cfe285a33411e8a6bf980b3c345b04feb
https://github.com/herrjemand/flask-fido-u2f/blob/23acac4cfe285a33411e8a6bf980b3c345b04feb/flask_fido_u2f.py#L367-L385
train
herrjemand/flask-fido-u2f
flask_fido_u2f.py
U2F.verify_counter
def verify_counter(self, signature, counter): """ Verifies that counter value is greater than previous signature""" devices = self.__get_u2f_devices() for device in devices: # Searching for specific keyhandle if device['keyHandle'] == signature['keyHandle']: if counter > device['counter']: # Updating counter record device['counter'] = counter self.__save_u2f_devices(devices) return True else: return False
python
def verify_counter(self, signature, counter): """ Verifies that counter value is greater than previous signature""" devices = self.__get_u2f_devices() for device in devices: # Searching for specific keyhandle if device['keyHandle'] == signature['keyHandle']: if counter > device['counter']: # Updating counter record device['counter'] = counter self.__save_u2f_devices(devices) return True else: return False
[ "def", "verify_counter", "(", "self", ",", "signature", ",", "counter", ")", ":", "devices", "=", "self", ".", "__get_u2f_devices", "(", ")", "for", "device", "in", "devices", ":", "if", "device", "[", "'keyHandle'", "]", "==", "signature", "[", "'keyHandle'", "]", ":", "if", "counter", ">", "device", "[", "'counter'", "]", ":", "device", "[", "'counter'", "]", "=", "counter", "self", ".", "__save_u2f_devices", "(", "devices", ")", "return", "True", "else", ":", "return", "False" ]
Verifies that counter value is greater than previous signature
[ "Verifies", "that", "counter", "value", "is", "greater", "than", "previous", "signature" ]
23acac4cfe285a33411e8a6bf980b3c345b04feb
https://github.com/herrjemand/flask-fido-u2f/blob/23acac4cfe285a33411e8a6bf980b3c345b04feb/flask_fido_u2f.py#L393-L409
train
Nachtfeuer/pipeline
spline/validation.py
Validator.validate
def validate(data): """ Validate data against the schema. Args: data(dict): data structure to validate. Returns: dict: data as provided and defaults where defined in schema. """ try: return Schema(Validator.SCHEMA).validate(data) except SchemaError as exception: logging.getLogger(__name__).error(exception) return None
python
def validate(data): """ Validate data against the schema. Args: data(dict): data structure to validate. Returns: dict: data as provided and defaults where defined in schema. """ try: return Schema(Validator.SCHEMA).validate(data) except SchemaError as exception: logging.getLogger(__name__).error(exception) return None
[ "def", "validate", "(", "data", ")", ":", "try", ":", "return", "Schema", "(", "Validator", ".", "SCHEMA", ")", ".", "validate", "(", "data", ")", "except", "SchemaError", "as", "exception", ":", "logging", ".", "getLogger", "(", "__name__", ")", ".", "error", "(", "exception", ")", "return", "None" ]
Validate data against the schema. Args: data(dict): data structure to validate. Returns: dict: data as provided and defaults where defined in schema.
[ "Validate", "data", "against", "the", "schema", "." ]
04ca18c4e95e4349532bb45b768206393e1f2c13
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/validation.py#L159-L173
train
Nachtfeuer/pipeline
spline/tools/loader.py
Loader.include
def include(self, node): """Include the defined yaml file.""" result = None if isinstance(node, ScalarNode): result = Loader.include_file(self.construct_scalar(node)) else: raise RuntimeError("Not supported !include on type %s" % type(node)) return result
python
def include(self, node): """Include the defined yaml file.""" result = None if isinstance(node, ScalarNode): result = Loader.include_file(self.construct_scalar(node)) else: raise RuntimeError("Not supported !include on type %s" % type(node)) return result
[ "def", "include", "(", "self", ",", "node", ")", ":", "result", "=", "None", "if", "isinstance", "(", "node", ",", "ScalarNode", ")", ":", "result", "=", "Loader", ".", "include_file", "(", "self", ".", "construct_scalar", "(", "node", ")", ")", "else", ":", "raise", "RuntimeError", "(", "\"Not supported !include on type %s\"", "%", "type", "(", "node", ")", ")", "return", "result" ]
Include the defined yaml file.
[ "Include", "the", "defined", "yaml", "file", "." ]
04ca18c4e95e4349532bb45b768206393e1f2c13
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/loader.py#L32-L39
train
Nachtfeuer/pipeline
spline/tools/loader.py
Loader.load
def load(filename): """"Load yaml file with specific include loader.""" if os.path.isfile(filename): with open(filename) as handle: return yaml_load(handle, Loader=Loader) # nosec raise RuntimeError("File %s doesn't exist!" % filename)
python
def load(filename): """"Load yaml file with specific include loader.""" if os.path.isfile(filename): with open(filename) as handle: return yaml_load(handle, Loader=Loader) # nosec raise RuntimeError("File %s doesn't exist!" % filename)
[ "def", "load", "(", "filename", ")", ":", "if", "os", ".", "path", ".", "isfile", "(", "filename", ")", ":", "with", "open", "(", "filename", ")", "as", "handle", ":", "return", "yaml_load", "(", "handle", ",", "Loader", "=", "Loader", ")", "raise", "RuntimeError", "(", "\"File %s doesn't exist!\"", "%", "filename", ")" ]
Load yaml file with specific include loader.
[ "Load", "yaml", "file", "with", "specific", "include", "loader", "." ]
04ca18c4e95e4349532bb45b768206393e1f2c13
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/loader.py#L50-L55
train
acutesoftware/AIKIF
aikif/transpose.py
Transpose.pivot
def pivot(self): """ transposes rows and columns """ self.op_data = [list(i) for i in zip(*self.ip_data)]
python
def pivot(self): """ transposes rows and columns """ self.op_data = [list(i) for i in zip(*self.ip_data)]
[ "def", "pivot", "(", "self", ")", ":", "self", ".", "op_data", "=", "[", "list", "(", "i", ")", "for", "i", "in", "zip", "(", "*", "self", ".", "ip_data", ")", "]" ]
transposes rows and columns
[ "transposes", "rows", "and", "columns" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/transpose.py#L20-L24
train
acutesoftware/AIKIF
aikif/transpose.py
Transpose.key_value_pairs
def key_value_pairs(self): """ convert list to key value pairs This should also create unique id's to allow for any dataset to be transposed, and then later manipulated r1c1,r1c2,r1c3 r2c1,r2c2,r2c3 should be converted to ID COLNUM VAL r1c1, """ self.op_data = [] hdrs = self.ip_data[0] for row in self.ip_data[1:]: id_col = row[0] for col_num, col in enumerate(row): self.op_data.append([id_col, hdrs[col_num], col])
python
def key_value_pairs(self): """ convert list to key value pairs This should also create unique id's to allow for any dataset to be transposed, and then later manipulated r1c1,r1c2,r1c3 r2c1,r2c2,r2c3 should be converted to ID COLNUM VAL r1c1, """ self.op_data = [] hdrs = self.ip_data[0] for row in self.ip_data[1:]: id_col = row[0] for col_num, col in enumerate(row): self.op_data.append([id_col, hdrs[col_num], col])
[ "def", "key_value_pairs", "(", "self", ")", ":", "self", ".", "op_data", "=", "[", "]", "hdrs", "=", "self", ".", "ip_data", "[", "0", "]", "for", "row", "in", "self", ".", "ip_data", "[", "1", ":", "]", ":", "id_col", "=", "row", "[", "0", "]", "for", "col_num", ",", "col", "in", "enumerate", "(", "row", ")", ":", "self", ".", "op_data", ".", "append", "(", "[", "id_col", ",", "hdrs", "[", "col_num", "]", ",", "col", "]", ")" ]
convert list to key value pairs This should also create unique id's to allow for any dataset to be transposed, and then later manipulated r1c1,r1c2,r1c3 r2c1,r2c2,r2c3 should be converted to ID COLNUM VAL r1c1,
[ "convert", "list", "to", "key", "value", "pairs", "This", "should", "also", "create", "unique", "id", "s", "to", "allow", "for", "any", "dataset", "to", "be", "transposed", "and", "then", "later", "manipulated", "r1c1", "r1c2", "r1c3", "r2c1", "r2c2", "r2c3", "should", "be", "converted", "to", "ID", "COLNUM", "VAL", "r1c1" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/transpose.py#L26-L44
train
acutesoftware/AIKIF
aikif/transpose.py
Transpose.links_to_data
def links_to_data(self, col_name_col_num, col_val_col_num, id_a_col_num, id_b_col_num): """ This is the reverse of data_to_links and takes a links table and generates a data table as follows Input Table Output Table Cat_Name,CAT_val,Person_a,person_b NAME,Location Location,Perth,John,Fred John,Perth Location,Perth,John,Cindy Cindy,Perth Location,Perth,Fred,Cindy Fred,Perth """ print('Converting links to data') self.op_data unique_ids = [] unique_vals = [] self.op_data.append(['Name', self.ip_data[1][col_name_col_num]]) for r in self.ip_data[1:]: if r[id_a_col_num] not in unique_ids: unique_ids.append(r[id_a_col_num]) self.op_data.append([r[id_a_col_num], r[col_val_col_num]]) if r[id_b_col_num] not in unique_ids: unique_ids.append(r[id_b_col_num]) if r[col_val_col_num] not in unique_vals: unique_vals.append(r[col_val_col_num]) #for id in unique_ids: # self.op_data.append([id, '']) print('unique_ids = ', unique_ids) print('unique_vals= ', unique_vals) print('op_data = ', self.op_data) return self.op_data
python
def links_to_data(self, col_name_col_num, col_val_col_num, id_a_col_num, id_b_col_num): """ This is the reverse of data_to_links and takes a links table and generates a data table as follows Input Table Output Table Cat_Name,CAT_val,Person_a,person_b NAME,Location Location,Perth,John,Fred John,Perth Location,Perth,John,Cindy Cindy,Perth Location,Perth,Fred,Cindy Fred,Perth """ print('Converting links to data') self.op_data unique_ids = [] unique_vals = [] self.op_data.append(['Name', self.ip_data[1][col_name_col_num]]) for r in self.ip_data[1:]: if r[id_a_col_num] not in unique_ids: unique_ids.append(r[id_a_col_num]) self.op_data.append([r[id_a_col_num], r[col_val_col_num]]) if r[id_b_col_num] not in unique_ids: unique_ids.append(r[id_b_col_num]) if r[col_val_col_num] not in unique_vals: unique_vals.append(r[col_val_col_num]) #for id in unique_ids: # self.op_data.append([id, '']) print('unique_ids = ', unique_ids) print('unique_vals= ', unique_vals) print('op_data = ', self.op_data) return self.op_data
[ "def", "links_to_data", "(", "self", ",", "col_name_col_num", ",", "col_val_col_num", ",", "id_a_col_num", ",", "id_b_col_num", ")", ":", "print", "(", "'Converting links to data'", ")", "self", ".", "op_data", "unique_ids", "=", "[", "]", "unique_vals", "=", "[", "]", "self", ".", "op_data", ".", "append", "(", "[", "'Name'", ",", "self", ".", "ip_data", "[", "1", "]", "[", "col_name_col_num", "]", "]", ")", "for", "r", "in", "self", ".", "ip_data", "[", "1", ":", "]", ":", "if", "r", "[", "id_a_col_num", "]", "not", "in", "unique_ids", ":", "unique_ids", ".", "append", "(", "r", "[", "id_a_col_num", "]", ")", "self", ".", "op_data", ".", "append", "(", "[", "r", "[", "id_a_col_num", "]", ",", "r", "[", "col_val_col_num", "]", "]", ")", "if", "r", "[", "id_b_col_num", "]", "not", "in", "unique_ids", ":", "unique_ids", ".", "append", "(", "r", "[", "id_b_col_num", "]", ")", "if", "r", "[", "col_val_col_num", "]", "not", "in", "unique_vals", ":", "unique_vals", ".", "append", "(", "r", "[", "col_val_col_num", "]", ")", "print", "(", "'unique_ids = '", ",", "unique_ids", ")", "print", "(", "'unique_vals= '", ",", "unique_vals", ")", "print", "(", "'op_data = '", ",", "self", ".", "op_data", ")", "return", "self", ".", "op_data" ]
This is the reverse of data_to_links and takes a links table and generates a data table as follows Input Table Output Table Cat_Name,CAT_val,Person_a,person_b NAME,Location Location,Perth,John,Fred John,Perth Location,Perth,John,Cindy Cindy,Perth Location,Perth,Fred,Cindy Fred,Perth
[ "This", "is", "the", "reverse", "of", "data_to_links", "and", "takes", "a", "links", "table", "and", "generates", "a", "data", "table", "as", "follows", "Input", "Table", "Output", "Table", "Cat_Name", "CAT_val", "Person_a", "person_b", "NAME", "Location", "Location", "Perth", "John", "Fred", "John", "Perth", "Location", "Perth", "John", "Cindy", "Cindy", "Perth", "Location", "Perth", "Fred", "Cindy", "Fred", "Perth" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/transpose.py#L94-L129
train
acutesoftware/AIKIF
aikif/lib/cls_goal_friendly.py
GoalFriendly.find_best_plan
def find_best_plan(self): """ try each strategy with different amounts """ for plan in self.plans: for strat in self.strategy: self.run_plan(plan, strat)
python
def find_best_plan(self): """ try each strategy with different amounts """ for plan in self.plans: for strat in self.strategy: self.run_plan(plan, strat)
[ "def", "find_best_plan", "(", "self", ")", ":", "for", "plan", "in", "self", ".", "plans", ":", "for", "strat", "in", "self", ".", "strategy", ":", "self", ".", "run_plan", "(", "plan", ",", "strat", ")" ]
try each strategy with different amounts
[ "try", "each", "strategy", "with", "different", "amounts" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/lib/cls_goal_friendly.py#L37-L43
train
acutesoftware/AIKIF
aikif/ontology/read_opencyc.py
load_data
def load_data(fname): """ loads previously exported CSV file to redis database """ print('Loading ' + fname + ' to redis') r = redis.StrictRedis(host = '127.0.0.1', port = 6379, db = 0); with open(fname, 'r') as f: for line_num, row in enumerate(f): if row.strip('') != '': if line_num < 100000000: l_key, l_val = parse_n3(row, 'csv') if line_num % 1000 == 0: print('loading line #', line_num, 'key=', l_key, ' = ', l_val) if l_key != '': r.set(l_key, l_val)
python
def load_data(fname): """ loads previously exported CSV file to redis database """ print('Loading ' + fname + ' to redis') r = redis.StrictRedis(host = '127.0.0.1', port = 6379, db = 0); with open(fname, 'r') as f: for line_num, row in enumerate(f): if row.strip('') != '': if line_num < 100000000: l_key, l_val = parse_n3(row, 'csv') if line_num % 1000 == 0: print('loading line #', line_num, 'key=', l_key, ' = ', l_val) if l_key != '': r.set(l_key, l_val)
[ "def", "load_data", "(", "fname", ")", ":", "print", "(", "'Loading '", "+", "fname", "+", "' to redis'", ")", "r", "=", "redis", ".", "StrictRedis", "(", "host", "=", "'127.0.0.1'", ",", "port", "=", "6379", ",", "db", "=", "0", ")", "with", "open", "(", "fname", ",", "'r'", ")", "as", "f", ":", "for", "line_num", ",", "row", "in", "enumerate", "(", "f", ")", ":", "if", "row", ".", "strip", "(", "''", ")", "!=", "''", ":", "if", "line_num", "<", "100000000", ":", "l_key", ",", "l_val", "=", "parse_n3", "(", "row", ",", "'csv'", ")", "if", "line_num", "%", "1000", "==", "0", ":", "print", "(", "'loading line #'", ",", "line_num", ",", "'key='", ",", "l_key", ",", "' = '", ",", "l_val", ")", "if", "l_key", "!=", "''", ":", "r", ".", "set", "(", "l_key", ",", "l_val", ")" ]
loads previously exported CSV file to redis database
[ "loads", "previously", "exported", "CSV", "file", "to", "redis", "database" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/ontology/read_opencyc.py#L28-L40
train
acutesoftware/AIKIF
aikif/ontology/read_opencyc.py
parse_n3
def parse_n3(row, src='csv'): """ takes a row from an n3 file and returns the triple NOTE - currently parses a CSV line already split via cyc_extract.py """ if row.strip() == '': return '','' l_root = 'opencyc' key = '' val = '' if src == 'csv': cols = row.split(',') if len(cols) < 3: #print('PARSE ISSUE : ', row) return '','' key = '' val = '' key = l_root + ':' + cols[1].strip('"').strip() + ':' + cols[2].strip('"').strip() try: val = cols[3].strip('"').strip() except Exception: val = "Error parsing " + row elif src == 'n3': pass return key, val
python
def parse_n3(row, src='csv'): """ takes a row from an n3 file and returns the triple NOTE - currently parses a CSV line already split via cyc_extract.py """ if row.strip() == '': return '','' l_root = 'opencyc' key = '' val = '' if src == 'csv': cols = row.split(',') if len(cols) < 3: #print('PARSE ISSUE : ', row) return '','' key = '' val = '' key = l_root + ':' + cols[1].strip('"').strip() + ':' + cols[2].strip('"').strip() try: val = cols[3].strip('"').strip() except Exception: val = "Error parsing " + row elif src == 'n3': pass return key, val
[ "def", "parse_n3", "(", "row", ",", "src", "=", "'csv'", ")", ":", "if", "row", ".", "strip", "(", ")", "==", "''", ":", "return", "''", ",", "''", "l_root", "=", "'opencyc'", "key", "=", "''", "val", "=", "''", "if", "src", "==", "'csv'", ":", "cols", "=", "row", ".", "split", "(", "','", ")", "if", "len", "(", "cols", ")", "<", "3", ":", "return", "''", ",", "''", "key", "=", "''", "val", "=", "''", "key", "=", "l_root", "+", "':'", "+", "cols", "[", "1", "]", ".", "strip", "(", "'\"'", ")", ".", "strip", "(", ")", "+", "':'", "+", "cols", "[", "2", "]", ".", "strip", "(", "'\"'", ")", ".", "strip", "(", ")", "try", ":", "val", "=", "cols", "[", "3", "]", ".", "strip", "(", "'\"'", ")", ".", "strip", "(", ")", "except", "Exception", ":", "val", "=", "\"Error parsing \"", "+", "row", "elif", "src", "==", "'n3'", ":", "pass", "return", "key", ",", "val" ]
takes a row from an n3 file and returns the triple NOTE - currently parses a CSV line already split via cyc_extract.py
[ "takes", "a", "row", "from", "an", "n3", "file", "and", "returns", "the", "triple", "NOTE", "-", "currently", "parses", "a", "CSV", "line", "already", "split", "via", "cyc_extract", ".", "py" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/ontology/read_opencyc.py#L42-L67
train
acutesoftware/AIKIF
aikif/ontology/read_opencyc.py
summarise_file_as_html
def summarise_file_as_html(fname): """ takes a large data file and produces a HTML summary as html """ txt = '<H1>' + fname + '</H1>' num_lines = 0 print('Reading OpenCyc file - ', fname) with open(ip_folder + os.sep + fname, 'r') as f: txt += '<PRE>' for line in f: if line.strip() != '': num_lines += 1 if num_lines < 80: txt += str(num_lines) + ': ' + escape_html(line) + '' txt += '</PRE>' txt += 'Total lines = ' + str(num_lines) + '<BR><BR>' return txt
python
def summarise_file_as_html(fname): """ takes a large data file and produces a HTML summary as html """ txt = '<H1>' + fname + '</H1>' num_lines = 0 print('Reading OpenCyc file - ', fname) with open(ip_folder + os.sep + fname, 'r') as f: txt += '<PRE>' for line in f: if line.strip() != '': num_lines += 1 if num_lines < 80: txt += str(num_lines) + ': ' + escape_html(line) + '' txt += '</PRE>' txt += 'Total lines = ' + str(num_lines) + '<BR><BR>' return txt
[ "def", "summarise_file_as_html", "(", "fname", ")", ":", "txt", "=", "'<H1>'", "+", "fname", "+", "'</H1>'", "num_lines", "=", "0", "print", "(", "'Reading OpenCyc file - '", ",", "fname", ")", "with", "open", "(", "ip_folder", "+", "os", ".", "sep", "+", "fname", ",", "'r'", ")", "as", "f", ":", "txt", "+=", "'<PRE>'", "for", "line", "in", "f", ":", "if", "line", ".", "strip", "(", ")", "!=", "''", ":", "num_lines", "+=", "1", "if", "num_lines", "<", "80", ":", "txt", "+=", "str", "(", "num_lines", ")", "+", "': '", "+", "escape_html", "(", "line", ")", "+", "''", "txt", "+=", "'</PRE>'", "txt", "+=", "'Total lines = '", "+", "str", "(", "num_lines", ")", "+", "'<BR><BR>'", "return", "txt" ]
takes a large data file and produces a HTML summary as html
[ "takes", "a", "large", "data", "file", "and", "produces", "a", "HTML", "summary", "as", "html" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/ontology/read_opencyc.py#L88-L105
train
acutesoftware/AIKIF
scripts/examples/game_of_life_console.py
main
def main(): """ Example to show AIKIF logging of results. Generates a sequence of random grids and runs the Game of Life, saving results """ iterations = 9 # how many simulations to run years = 3 # how many times to run each simulation width = 22 # grid height height = 78 # grid width time_delay = 0.03 # delay when printing on screen lg = mod_log.Log('test') lg.record_process('Game of Life', 'game_of_life_console.py') for _ in range(iterations): s,e = run_game_of_life(years, width, height, time_delay, 'N') lg.record_result("Started with " + str(s) + " cells and ended with " + str(e) + " cells")
python
def main(): """ Example to show AIKIF logging of results. Generates a sequence of random grids and runs the Game of Life, saving results """ iterations = 9 # how many simulations to run years = 3 # how many times to run each simulation width = 22 # grid height height = 78 # grid width time_delay = 0.03 # delay when printing on screen lg = mod_log.Log('test') lg.record_process('Game of Life', 'game_of_life_console.py') for _ in range(iterations): s,e = run_game_of_life(years, width, height, time_delay, 'N') lg.record_result("Started with " + str(s) + " cells and ended with " + str(e) + " cells")
[ "def", "main", "(", ")", ":", "iterations", "=", "9", "years", "=", "3", "width", "=", "22", "height", "=", "78", "time_delay", "=", "0.03", "lg", "=", "mod_log", ".", "Log", "(", "'test'", ")", "lg", ".", "record_process", "(", "'Game of Life'", ",", "'game_of_life_console.py'", ")", "for", "_", "in", "range", "(", "iterations", ")", ":", "s", ",", "e", "=", "run_game_of_life", "(", "years", ",", "width", ",", "height", ",", "time_delay", ",", "'N'", ")", "lg", ".", "record_result", "(", "\"Started with \"", "+", "str", "(", "s", ")", "+", "\" cells and ended with \"", "+", "str", "(", "e", ")", "+", "\" cells\"", ")" ]
Example to show AIKIF logging of results. Generates a sequence of random grids and runs the Game of Life, saving results
[ "Example", "to", "show", "AIKIF", "logging", "of", "results", ".", "Generates", "a", "sequence", "of", "random", "grids", "and", "runs", "the", "Game", "of", "Life", "saving", "results" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/scripts/examples/game_of_life_console.py#L28-L43
train
acutesoftware/AIKIF
scripts/examples/game_of_life_console.py
run_game_of_life
def run_game_of_life(years, width, height, time_delay, silent="N"): """ run a single game of life for 'years' and log start and end living cells to aikif """ lfe = mod_grid.GameOfLife(width, height, ['.', 'x'], 1) set_random_starting_grid(lfe) lg.record_source(lfe, 'game_of_life_console.py') print(lfe) start_cells = lfe.count_filled_positions() for ndx, dummy_idx in enumerate(range(years)): lfe.update_gol() if silent == "N": print_there(1,1, "Game of Life - Iteration # " + str(ndx)) print_there(1, 2, lfe) time.sleep(time_delay) end_cells = lfe.count_filled_positions() return start_cells, end_cells
python
def run_game_of_life(years, width, height, time_delay, silent="N"): """ run a single game of life for 'years' and log start and end living cells to aikif """ lfe = mod_grid.GameOfLife(width, height, ['.', 'x'], 1) set_random_starting_grid(lfe) lg.record_source(lfe, 'game_of_life_console.py') print(lfe) start_cells = lfe.count_filled_positions() for ndx, dummy_idx in enumerate(range(years)): lfe.update_gol() if silent == "N": print_there(1,1, "Game of Life - Iteration # " + str(ndx)) print_there(1, 2, lfe) time.sleep(time_delay) end_cells = lfe.count_filled_positions() return start_cells, end_cells
[ "def", "run_game_of_life", "(", "years", ",", "width", ",", "height", ",", "time_delay", ",", "silent", "=", "\"N\"", ")", ":", "lfe", "=", "mod_grid", ".", "GameOfLife", "(", "width", ",", "height", ",", "[", "'.'", ",", "'x'", "]", ",", "1", ")", "set_random_starting_grid", "(", "lfe", ")", "lg", ".", "record_source", "(", "lfe", ",", "'game_of_life_console.py'", ")", "print", "(", "lfe", ")", "start_cells", "=", "lfe", ".", "count_filled_positions", "(", ")", "for", "ndx", ",", "dummy_idx", "in", "enumerate", "(", "range", "(", "years", ")", ")", ":", "lfe", ".", "update_gol", "(", ")", "if", "silent", "==", "\"N\"", ":", "print_there", "(", "1", ",", "1", ",", "\"Game of Life - Iteration # \"", "+", "str", "(", "ndx", ")", ")", "print_there", "(", "1", ",", "2", ",", "lfe", ")", "time", ".", "sleep", "(", "time_delay", ")", "end_cells", "=", "lfe", ".", "count_filled_positions", "(", ")", "return", "start_cells", ",", "end_cells" ]
run a single game of life for 'years' and log start and end living cells to aikif
[ "run", "a", "single", "game", "of", "life", "for", "years", "and", "log", "start", "and", "end", "living", "cells", "to", "aikif" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/scripts/examples/game_of_life_console.py#L45-L62
train
acutesoftware/AIKIF
scripts/examples/game_of_life_console.py
print_there
def print_there(x, y, text): """" allows display of a game of life on a console via resetting cursor position to a set point - looks 'ok' for testing but not production quality. """ sys.stdout.write("\x1b7\x1b[%d;%df%s\x1b8" % (x, y, text)) sys.stdout.flush()
python
def print_there(x, y, text): """" allows display of a game of life on a console via resetting cursor position to a set point - looks 'ok' for testing but not production quality. """ sys.stdout.write("\x1b7\x1b[%d;%df%s\x1b8" % (x, y, text)) sys.stdout.flush()
[ "def", "print_there", "(", "x", ",", "y", ",", "text", ")", ":", "sys", ".", "stdout", ".", "write", "(", "\"\\x1b7\\x1b[%d;%df%s\\x1b8\"", "%", "(", "x", ",", "y", ",", "text", ")", ")", "sys", ".", "stdout", ".", "flush", "(", ")" ]
allows display of a game of life on a console via resetting cursor position to a set point - looks 'ok' for testing but not production quality.
[ "allows", "display", "of", "a", "game", "of", "life", "on", "a", "console", "via", "resetting", "cursor", "position", "to", "a", "set", "point", "-", "looks", "ok", "for", "testing", "but", "not", "production", "quality", "." ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/scripts/examples/game_of_life_console.py#L79-L86
train
acutesoftware/AIKIF
aikif/toolbox/text_tools.py
identify_col_pos
def identify_col_pos(txt): """ assume no delimiter in this file, so guess the best fixed column widths to split by """ res = [] #res.append(0) lines = txt.split('\n') prev_ch = '' for col_pos, ch in enumerate(lines[0]): if _is_white_space(ch) is False and _is_white_space(prev_ch) is True: res.append(col_pos) prev_ch = ch res.append(col_pos) return res
python
def identify_col_pos(txt): """ assume no delimiter in this file, so guess the best fixed column widths to split by """ res = [] #res.append(0) lines = txt.split('\n') prev_ch = '' for col_pos, ch in enumerate(lines[0]): if _is_white_space(ch) is False and _is_white_space(prev_ch) is True: res.append(col_pos) prev_ch = ch res.append(col_pos) return res
[ "def", "identify_col_pos", "(", "txt", ")", ":", "res", "=", "[", "]", "lines", "=", "txt", ".", "split", "(", "'\\n'", ")", "prev_ch", "=", "''", "for", "col_pos", ",", "ch", "in", "enumerate", "(", "lines", "[", "0", "]", ")", ":", "if", "_is_white_space", "(", "ch", ")", "is", "False", "and", "_is_white_space", "(", "prev_ch", ")", "is", "True", ":", "res", ".", "append", "(", "col_pos", ")", "prev_ch", "=", "ch", "res", ".", "append", "(", "col_pos", ")", "return", "res" ]
assume no delimiter in this file, so guess the best fixed column widths to split by
[ "assume", "no", "delimiter", "in", "this", "file", "so", "guess", "the", "best", "fixed", "column", "widths", "to", "split", "by" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/text_tools.py#L27-L41
train
acutesoftware/AIKIF
aikif/toolbox/text_tools.py
load_tbl_from_csv
def load_tbl_from_csv(fname): """ read a CSV file to list without worrying about odd characters """ import csv rows_to_load = [] with open(fname, 'r', encoding='cp1252', errors='ignore') as csvfile: csvreader = csv.reader(csvfile, delimiter = ',' ) reader = csv.reader(csvfile) rows_to_load = list(reader) return rows_to_load
python
def load_tbl_from_csv(fname): """ read a CSV file to list without worrying about odd characters """ import csv rows_to_load = [] with open(fname, 'r', encoding='cp1252', errors='ignore') as csvfile: csvreader = csv.reader(csvfile, delimiter = ',' ) reader = csv.reader(csvfile) rows_to_load = list(reader) return rows_to_load
[ "def", "load_tbl_from_csv", "(", "fname", ")", ":", "import", "csv", "rows_to_load", "=", "[", "]", "with", "open", "(", "fname", ",", "'r'", ",", "encoding", "=", "'cp1252'", ",", "errors", "=", "'ignore'", ")", "as", "csvfile", ":", "csvreader", "=", "csv", ".", "reader", "(", "csvfile", ",", "delimiter", "=", "','", ")", "reader", "=", "csv", ".", "reader", "(", "csvfile", ")", "rows_to_load", "=", "list", "(", "reader", ")", "return", "rows_to_load" ]
read a CSV file to list without worrying about odd characters
[ "read", "a", "CSV", "file", "to", "list", "without", "worrying", "about", "odd", "characters" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/text_tools.py#L51-L66
train
acutesoftware/AIKIF
aikif/toolbox/text_tools.py
_get_dict_char_count
def _get_dict_char_count(txt): """ reads the characters in txt and returns a dictionary of all letters """ dct = {} for letter in txt: if letter in dct: dct[letter] += 1 else: dct[letter] = 1 return dct
python
def _get_dict_char_count(txt): """ reads the characters in txt and returns a dictionary of all letters """ dct = {} for letter in txt: if letter in dct: dct[letter] += 1 else: dct[letter] = 1 return dct
[ "def", "_get_dict_char_count", "(", "txt", ")", ":", "dct", "=", "{", "}", "for", "letter", "in", "txt", ":", "if", "letter", "in", "dct", ":", "dct", "[", "letter", "]", "+=", "1", "else", ":", "dct", "[", "letter", "]", "=", "1", "return", "dct" ]
reads the characters in txt and returns a dictionary of all letters
[ "reads", "the", "characters", "in", "txt", "and", "returns", "a", "dictionary", "of", "all", "letters" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/text_tools.py#L100-L111
train
Nachtfeuer/pipeline
spline/components/docker.py
Container.creator
def creator(entry, config): """Creator function for creating an instance of a Bash.""" template_file = os.path.join(os.path.dirname(__file__), 'templates/docker-container.sh.j2') with open(template_file) as handle: template = handle.read() # all fields are re-rendered via the Bash script wrapped_script = render(template, container={ 'image': 'centos:7' if 'image' not in entry else entry['image'], 'remove': True if 'remove' not in entry else str(entry['remove']).lower(), 'background': False if 'background' not in entry else str(entry['background']).lower(), 'mount': False if 'mount' not in entry else str(entry['mount']).lower(), 'network': '' if 'network' not in entry else entry['network'], 'labels': {} if 'labels' not in entry else entry['labels'], 'script': config.script }) config.script = wrapped_script return Container(config)
python
def creator(entry, config): """Creator function for creating an instance of a Bash.""" template_file = os.path.join(os.path.dirname(__file__), 'templates/docker-container.sh.j2') with open(template_file) as handle: template = handle.read() # all fields are re-rendered via the Bash script wrapped_script = render(template, container={ 'image': 'centos:7' if 'image' not in entry else entry['image'], 'remove': True if 'remove' not in entry else str(entry['remove']).lower(), 'background': False if 'background' not in entry else str(entry['background']).lower(), 'mount': False if 'mount' not in entry else str(entry['mount']).lower(), 'network': '' if 'network' not in entry else entry['network'], 'labels': {} if 'labels' not in entry else entry['labels'], 'script': config.script }) config.script = wrapped_script return Container(config)
[ "def", "creator", "(", "entry", ",", "config", ")", ":", "template_file", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "'templates/docker-container.sh.j2'", ")", "with", "open", "(", "template_file", ")", "as", "handle", ":", "template", "=", "handle", ".", "read", "(", ")", "wrapped_script", "=", "render", "(", "template", ",", "container", "=", "{", "'image'", ":", "'centos:7'", "if", "'image'", "not", "in", "entry", "else", "entry", "[", "'image'", "]", ",", "'remove'", ":", "True", "if", "'remove'", "not", "in", "entry", "else", "str", "(", "entry", "[", "'remove'", "]", ")", ".", "lower", "(", ")", ",", "'background'", ":", "False", "if", "'background'", "not", "in", "entry", "else", "str", "(", "entry", "[", "'background'", "]", ")", ".", "lower", "(", ")", ",", "'mount'", ":", "False", "if", "'mount'", "not", "in", "entry", "else", "str", "(", "entry", "[", "'mount'", "]", ")", ".", "lower", "(", ")", ",", "'network'", ":", "''", "if", "'network'", "not", "in", "entry", "else", "entry", "[", "'network'", "]", ",", "'labels'", ":", "{", "}", "if", "'labels'", "not", "in", "entry", "else", "entry", "[", "'labels'", "]", ",", "'script'", ":", "config", ".", "script", "}", ")", "config", ".", "script", "=", "wrapped_script", "return", "Container", "(", "config", ")" ]
Creator function for creating an instance of a Bash.
[ "Creator", "function", "for", "creating", "an", "instance", "of", "a", "Bash", "." ]
04ca18c4e95e4349532bb45b768206393e1f2c13
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/components/docker.py#L51-L70
train
Nachtfeuer/pipeline
spline/components/docker.py
Image.creator
def creator(entry, config): """Creator function for creating an instance of a Docker image script.""" # writing Dockerfile dockerfile = render(config.script, model=config.model, env=config.env, variables=config.variables, item=config.item) filename = "dockerfile.dry.run.see.comment" if not config.dry_run: temp = tempfile.NamedTemporaryFile( prefix="dockerfile-", mode='w+t', delete=False) temp.writelines(dockerfile) temp.close() filename = temp.name dockerfile = '' # rendering the Bash script for generating the Docker image name = entry['name'] + "-%s" % os.getpid() if entry['unique'] else entry['name'] tag = render(entry['tag'], model=config.model, env=config.env, item=config.item) template_file = os.path.join(os.path.dirname(__file__), 'templates/docker-image.sh.j2') with open(template_file) as handle: template = handle.read() config.script = render(template, name=name, tag=tag, dockerfile_content=dockerfile, dockerfile_filename=filename) return Image(config)
python
def creator(entry, config): """Creator function for creating an instance of a Docker image script.""" # writing Dockerfile dockerfile = render(config.script, model=config.model, env=config.env, variables=config.variables, item=config.item) filename = "dockerfile.dry.run.see.comment" if not config.dry_run: temp = tempfile.NamedTemporaryFile( prefix="dockerfile-", mode='w+t', delete=False) temp.writelines(dockerfile) temp.close() filename = temp.name dockerfile = '' # rendering the Bash script for generating the Docker image name = entry['name'] + "-%s" % os.getpid() if entry['unique'] else entry['name'] tag = render(entry['tag'], model=config.model, env=config.env, item=config.item) template_file = os.path.join(os.path.dirname(__file__), 'templates/docker-image.sh.j2') with open(template_file) as handle: template = handle.read() config.script = render(template, name=name, tag=tag, dockerfile_content=dockerfile, dockerfile_filename=filename) return Image(config)
[ "def", "creator", "(", "entry", ",", "config", ")", ":", "dockerfile", "=", "render", "(", "config", ".", "script", ",", "model", "=", "config", ".", "model", ",", "env", "=", "config", ".", "env", ",", "variables", "=", "config", ".", "variables", ",", "item", "=", "config", ".", "item", ")", "filename", "=", "\"dockerfile.dry.run.see.comment\"", "if", "not", "config", ".", "dry_run", ":", "temp", "=", "tempfile", ".", "NamedTemporaryFile", "(", "prefix", "=", "\"dockerfile-\"", ",", "mode", "=", "'w+t'", ",", "delete", "=", "False", ")", "temp", ".", "writelines", "(", "dockerfile", ")", "temp", ".", "close", "(", ")", "filename", "=", "temp", ".", "name", "dockerfile", "=", "''", "name", "=", "entry", "[", "'name'", "]", "+", "\"-%s\"", "%", "os", ".", "getpid", "(", ")", "if", "entry", "[", "'unique'", "]", "else", "entry", "[", "'name'", "]", "tag", "=", "render", "(", "entry", "[", "'tag'", "]", ",", "model", "=", "config", ".", "model", ",", "env", "=", "config", ".", "env", ",", "item", "=", "config", ".", "item", ")", "template_file", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "'templates/docker-image.sh.j2'", ")", "with", "open", "(", "template_file", ")", "as", "handle", ":", "template", "=", "handle", ".", "read", "(", ")", "config", ".", "script", "=", "render", "(", "template", ",", "name", "=", "name", ",", "tag", "=", "tag", ",", "dockerfile_content", "=", "dockerfile", ",", "dockerfile_filename", "=", "filename", ")", "return", "Image", "(", "config", ")" ]
Creator function for creating an instance of a Docker image script.
[ "Creator", "function", "for", "creating", "an", "instance", "of", "a", "Docker", "image", "script", "." ]
04ca18c4e95e4349532bb45b768206393e1f2c13
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/components/docker.py#L85-L111
train
Nachtfeuer/pipeline
spline/tools/stream.py
stdout_redirector
def stdout_redirector(): """ Simplify redirect of stdout. Taken from here: https://eli.thegreenplace.net/2015/redirecting-all-kinds-of-stdout-in-python/ """ old_stdout = sys.stdout sys.stdout = Stream() try: yield sys.stdout finally: sys.stdout.close() sys.stdout = old_stdout
python
def stdout_redirector(): """ Simplify redirect of stdout. Taken from here: https://eli.thegreenplace.net/2015/redirecting-all-kinds-of-stdout-in-python/ """ old_stdout = sys.stdout sys.stdout = Stream() try: yield sys.stdout finally: sys.stdout.close() sys.stdout = old_stdout
[ "def", "stdout_redirector", "(", ")", ":", "old_stdout", "=", "sys", ".", "stdout", "sys", ".", "stdout", "=", "Stream", "(", ")", "try", ":", "yield", "sys", ".", "stdout", "finally", ":", "sys", ".", "stdout", ".", "close", "(", ")", "sys", ".", "stdout", "=", "old_stdout" ]
Simplify redirect of stdout. Taken from here: https://eli.thegreenplace.net/2015/redirecting-all-kinds-of-stdout-in-python/
[ "Simplify", "redirect", "of", "stdout", "." ]
04ca18c4e95e4349532bb45b768206393e1f2c13
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/stream.py#L34-L46
train
Nachtfeuer/pipeline
spline/tools/stream.py
write_temporary_file
def write_temporary_file(content, prefix='', suffix=''): """ Generating a temporary file with content. Args: content (str): file content (usually a script, Dockerfile, playbook or config file) prefix (str): the filename starts with this prefix (default: no prefix) suffix (str): the filename ends with this suffix (default: no suffix) Returns: str: name of the temporary file Note: You are responsible for the deletion of the file. """ temp = tempfile.NamedTemporaryFile(prefix=prefix, suffix=suffix, mode='w+t', delete=False) temp.writelines(content) temp.close() return temp.name
python
def write_temporary_file(content, prefix='', suffix=''): """ Generating a temporary file with content. Args: content (str): file content (usually a script, Dockerfile, playbook or config file) prefix (str): the filename starts with this prefix (default: no prefix) suffix (str): the filename ends with this suffix (default: no suffix) Returns: str: name of the temporary file Note: You are responsible for the deletion of the file. """ temp = tempfile.NamedTemporaryFile(prefix=prefix, suffix=suffix, mode='w+t', delete=False) temp.writelines(content) temp.close() return temp.name
[ "def", "write_temporary_file", "(", "content", ",", "prefix", "=", "''", ",", "suffix", "=", "''", ")", ":", "temp", "=", "tempfile", ".", "NamedTemporaryFile", "(", "prefix", "=", "prefix", ",", "suffix", "=", "suffix", ",", "mode", "=", "'w+t'", ",", "delete", "=", "False", ")", "temp", ".", "writelines", "(", "content", ")", "temp", ".", "close", "(", ")", "return", "temp", ".", "name" ]
Generating a temporary file with content. Args: content (str): file content (usually a script, Dockerfile, playbook or config file) prefix (str): the filename starts with this prefix (default: no prefix) suffix (str): the filename ends with this suffix (default: no suffix) Returns: str: name of the temporary file Note: You are responsible for the deletion of the file.
[ "Generating", "a", "temporary", "file", "with", "content", "." ]
04ca18c4e95e4349532bb45b768206393e1f2c13
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/stream.py#L49-L67
train
aartur/mschematool
mschematool/cli.py
print_new
def print_new(ctx, name, migration_type): """Prints filename of a new migration""" click.echo(ctx.obj.repository.generate_migration_name(name, migration_type))
python
def print_new(ctx, name, migration_type): """Prints filename of a new migration""" click.echo(ctx.obj.repository.generate_migration_name(name, migration_type))
[ "def", "print_new", "(", "ctx", ",", "name", ",", "migration_type", ")", ":", "click", ".", "echo", "(", "ctx", ".", "obj", ".", "repository", ".", "generate_migration_name", "(", "name", ",", "migration_type", ")", ")" ]
Prints filename of a new migration
[ "Prints", "filename", "of", "a", "new", "migration" ]
57ec9541f80b44890294126eab92ce243c8833c4
https://github.com/aartur/mschematool/blob/57ec9541f80b44890294126eab92ce243c8833c4/mschematool/cli.py#L81-L83
train
acutesoftware/AIKIF
aikif/agents/agent.py
Agent.start
def start(self): """ Starts an agent with standard logging """ self.running = True self.status = 'RUNNING' self.mylog.record_process('agent', self.name + ' - starting')
python
def start(self): """ Starts an agent with standard logging """ self.running = True self.status = 'RUNNING' self.mylog.record_process('agent', self.name + ' - starting')
[ "def", "start", "(", "self", ")", ":", "self", ".", "running", "=", "True", "self", ".", "status", "=", "'RUNNING'", "self", ".", "mylog", ".", "record_process", "(", "'agent'", ",", "self", ".", "name", "+", "' - starting'", ")" ]
Starts an agent with standard logging
[ "Starts", "an", "agent", "with", "standard", "logging" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/agents/agent.py#L96-L102
train
acutesoftware/AIKIF
aikif/agents/agent.py
Agent.set_coords
def set_coords(self, x=0, y=0, z=0, t=0): """ set coords of agent in an arbitrary world """ self.coords = {} self.coords['x'] = x self.coords['y'] = y self.coords['z'] = z self.coords['t'] = t
python
def set_coords(self, x=0, y=0, z=0, t=0): """ set coords of agent in an arbitrary world """ self.coords = {} self.coords['x'] = x self.coords['y'] = y self.coords['z'] = z self.coords['t'] = t
[ "def", "set_coords", "(", "self", ",", "x", "=", "0", ",", "y", "=", "0", ",", "z", "=", "0", ",", "t", "=", "0", ")", ":", "self", ".", "coords", "=", "{", "}", "self", ".", "coords", "[", "'x'", "]", "=", "x", "self", ".", "coords", "[", "'y'", "]", "=", "y", "self", ".", "coords", "[", "'z'", "]", "=", "z", "self", ".", "coords", "[", "'t'", "]", "=", "t" ]
set coords of agent in an arbitrary world
[ "set", "coords", "of", "agent", "in", "an", "arbitrary", "world" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/agents/agent.py#L104-L112
train
OpenHydrology/floodestimation
floodestimation/loaders.py
from_file
def from_file(file_path, incl_pot=True): """ Load catchment object from a ``.CD3`` or ``.xml`` file. If there is also a corresponding ``.AM`` file (annual maximum flow data) or a ``.PT`` file (peaks over threshold data) in the same folder as the CD3 file, these datasets will also be loaded. :param file_path: Location of CD3 or xml file :type file_path: str :return: Catchment object with the :attr:`amax_records` and :attr:`pot_dataset` attributes set (if data available). :rtype: :class:`.entities.Catchment` :param incl_pot: Whether to load the POT (peaks-over-threshold) data. Default: ``True``. :type incl_pot: bool """ filename, ext = os.path.splitext(file_path) am_file_path = filename + '.AM' pot_file_path = filename + '.PT' parser_by_ext = { '.cd3': parsers.Cd3Parser, '.xml': parsers.XmlCatchmentParser } catchment = parser_by_ext[ext.lower()]().parse(file_path) # AMAX records try: catchment.amax_records = parsers.AmaxParser().parse(am_file_path) except FileNotFoundError: catchment.amax_records = [] # POT records if incl_pot: try: catchment.pot_dataset = parsers.PotParser().parse(pot_file_path) except FileNotFoundError: pass return catchment
python
def from_file(file_path, incl_pot=True): """ Load catchment object from a ``.CD3`` or ``.xml`` file. If there is also a corresponding ``.AM`` file (annual maximum flow data) or a ``.PT`` file (peaks over threshold data) in the same folder as the CD3 file, these datasets will also be loaded. :param file_path: Location of CD3 or xml file :type file_path: str :return: Catchment object with the :attr:`amax_records` and :attr:`pot_dataset` attributes set (if data available). :rtype: :class:`.entities.Catchment` :param incl_pot: Whether to load the POT (peaks-over-threshold) data. Default: ``True``. :type incl_pot: bool """ filename, ext = os.path.splitext(file_path) am_file_path = filename + '.AM' pot_file_path = filename + '.PT' parser_by_ext = { '.cd3': parsers.Cd3Parser, '.xml': parsers.XmlCatchmentParser } catchment = parser_by_ext[ext.lower()]().parse(file_path) # AMAX records try: catchment.amax_records = parsers.AmaxParser().parse(am_file_path) except FileNotFoundError: catchment.amax_records = [] # POT records if incl_pot: try: catchment.pot_dataset = parsers.PotParser().parse(pot_file_path) except FileNotFoundError: pass return catchment
[ "def", "from_file", "(", "file_path", ",", "incl_pot", "=", "True", ")", ":", "filename", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "file_path", ")", "am_file_path", "=", "filename", "+", "'.AM'", "pot_file_path", "=", "filename", "+", "'.PT'", "parser_by_ext", "=", "{", "'.cd3'", ":", "parsers", ".", "Cd3Parser", ",", "'.xml'", ":", "parsers", ".", "XmlCatchmentParser", "}", "catchment", "=", "parser_by_ext", "[", "ext", ".", "lower", "(", ")", "]", "(", ")", ".", "parse", "(", "file_path", ")", "try", ":", "catchment", ".", "amax_records", "=", "parsers", ".", "AmaxParser", "(", ")", ".", "parse", "(", "am_file_path", ")", "except", "FileNotFoundError", ":", "catchment", ".", "amax_records", "=", "[", "]", "if", "incl_pot", ":", "try", ":", "catchment", ".", "pot_dataset", "=", "parsers", ".", "PotParser", "(", ")", ".", "parse", "(", "pot_file_path", ")", "except", "FileNotFoundError", ":", "pass", "return", "catchment" ]
Load catchment object from a ``.CD3`` or ``.xml`` file. If there is also a corresponding ``.AM`` file (annual maximum flow data) or a ``.PT`` file (peaks over threshold data) in the same folder as the CD3 file, these datasets will also be loaded. :param file_path: Location of CD3 or xml file :type file_path: str :return: Catchment object with the :attr:`amax_records` and :attr:`pot_dataset` attributes set (if data available). :rtype: :class:`.entities.Catchment` :param incl_pot: Whether to load the POT (peaks-over-threshold) data. Default: ``True``. :type incl_pot: bool
[ "Load", "catchment", "object", "from", "a", ".", "CD3", "or", ".", "xml", "file", "." ]
782da7c5abd1348923129efe89fb70003ebb088c
https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/loaders.py#L30-L66
train
OpenHydrology/floodestimation
floodestimation/loaders.py
to_db
def to_db(catchment, session, method='create', autocommit=False): """ Load catchment object into the database. A catchment/station number (:attr:`catchment.id`) must be provided. If :attr:`method` is set to `update`, any existing catchment in the database with the same catchment number will be updated. :param catchment: New catchment object to replace any existing catchment in the database :type catchment: :class:`.entities.Catchment` :param session: Database session to use, typically `floodestimation.db.Session()` :type session: :class:`sqlalchemy.orm.session.Session` :param method: - ``create``: only new catchments will be loaded, it must not already exist in the database. - ``update``: any existing catchment in the database will be updated. Otherwise it will be created. :type method: str :param autocommit: Whether to commit the database session immediately. Default: ``False``. :type autocommit: bool """ if not catchment.id: raise ValueError("Catchment/station number (`catchment.id`) must be set.") if method == 'create': session.add(catchment) elif method == 'update': session.merge(catchment) else: raise ValueError("Method `{}` invalid. Use either `create` or `update`.") if autocommit: session.commit()
python
def to_db(catchment, session, method='create', autocommit=False): """ Load catchment object into the database. A catchment/station number (:attr:`catchment.id`) must be provided. If :attr:`method` is set to `update`, any existing catchment in the database with the same catchment number will be updated. :param catchment: New catchment object to replace any existing catchment in the database :type catchment: :class:`.entities.Catchment` :param session: Database session to use, typically `floodestimation.db.Session()` :type session: :class:`sqlalchemy.orm.session.Session` :param method: - ``create``: only new catchments will be loaded, it must not already exist in the database. - ``update``: any existing catchment in the database will be updated. Otherwise it will be created. :type method: str :param autocommit: Whether to commit the database session immediately. Default: ``False``. :type autocommit: bool """ if not catchment.id: raise ValueError("Catchment/station number (`catchment.id`) must be set.") if method == 'create': session.add(catchment) elif method == 'update': session.merge(catchment) else: raise ValueError("Method `{}` invalid. Use either `create` or `update`.") if autocommit: session.commit()
[ "def", "to_db", "(", "catchment", ",", "session", ",", "method", "=", "'create'", ",", "autocommit", "=", "False", ")", ":", "if", "not", "catchment", ".", "id", ":", "raise", "ValueError", "(", "\"Catchment/station number (`catchment.id`) must be set.\"", ")", "if", "method", "==", "'create'", ":", "session", ".", "add", "(", "catchment", ")", "elif", "method", "==", "'update'", ":", "session", ".", "merge", "(", "catchment", ")", "else", ":", "raise", "ValueError", "(", "\"Method `{}` invalid. Use either `create` or `update`.\"", ")", "if", "autocommit", ":", "session", ".", "commit", "(", ")" ]
Load catchment object into the database. A catchment/station number (:attr:`catchment.id`) must be provided. If :attr:`method` is set to `update`, any existing catchment in the database with the same catchment number will be updated. :param catchment: New catchment object to replace any existing catchment in the database :type catchment: :class:`.entities.Catchment` :param session: Database session to use, typically `floodestimation.db.Session()` :type session: :class:`sqlalchemy.orm.session.Session` :param method: - ``create``: only new catchments will be loaded, it must not already exist in the database. - ``update``: any existing catchment in the database will be updated. Otherwise it will be created. :type method: str :param autocommit: Whether to commit the database session immediately. Default: ``False``. :type autocommit: bool
[ "Load", "catchment", "object", "into", "the", "database", "." ]
782da7c5abd1348923129efe89fb70003ebb088c
https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/loaders.py#L69-L96
train
OpenHydrology/floodestimation
floodestimation/loaders.py
userdata_to_db
def userdata_to_db(session, method='update', autocommit=False): """ Add catchments from a user folder to the database. The user folder is specified in the ``config.ini`` file like this:: [import] folder = path/to/import/folder If this configuration key does not exist this will be silently ignored. :param session: database session to use, typically `floodestimation.db.Session()` :type session: :class:`sqlalchemy.orm.session.Session` :param method: - ``create``: only new catchments will be loaded, it must not already exist in the database. - ``update``: any existing catchment in the database will be updated. Otherwise it will be created. :type method: str :param autocommit: Whether to commit the database session immediately. Default: ``False``. :type autocommit: bool """ try: folder = config['import']['folder'] except KeyError: return if folder: folder_to_db(folder, session, method=method, autocommit=autocommit)
python
def userdata_to_db(session, method='update', autocommit=False): """ Add catchments from a user folder to the database. The user folder is specified in the ``config.ini`` file like this:: [import] folder = path/to/import/folder If this configuration key does not exist this will be silently ignored. :param session: database session to use, typically `floodestimation.db.Session()` :type session: :class:`sqlalchemy.orm.session.Session` :param method: - ``create``: only new catchments will be loaded, it must not already exist in the database. - ``update``: any existing catchment in the database will be updated. Otherwise it will be created. :type method: str :param autocommit: Whether to commit the database session immediately. Default: ``False``. :type autocommit: bool """ try: folder = config['import']['folder'] except KeyError: return if folder: folder_to_db(folder, session, method=method, autocommit=autocommit)
[ "def", "userdata_to_db", "(", "session", ",", "method", "=", "'update'", ",", "autocommit", "=", "False", ")", ":", "try", ":", "folder", "=", "config", "[", "'import'", "]", "[", "'folder'", "]", "except", "KeyError", ":", "return", "if", "folder", ":", "folder_to_db", "(", "folder", ",", "session", ",", "method", "=", "method", ",", "autocommit", "=", "autocommit", ")" ]
Add catchments from a user folder to the database. The user folder is specified in the ``config.ini`` file like this:: [import] folder = path/to/import/folder If this configuration key does not exist this will be silently ignored. :param session: database session to use, typically `floodestimation.db.Session()` :type session: :class:`sqlalchemy.orm.session.Session` :param method: - ``create``: only new catchments will be loaded, it must not already exist in the database. - ``update``: any existing catchment in the database will be updated. Otherwise it will be created. :type method: str :param autocommit: Whether to commit the database session immediately. Default: ``False``. :type autocommit: bool
[ "Add", "catchments", "from", "a", "user", "folder", "to", "the", "database", "." ]
782da7c5abd1348923129efe89fb70003ebb088c
https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/loaders.py#L151-L176
train
acutesoftware/AIKIF
aikif/toolbox/interface_windows_tools.py
send_text
def send_text(hwnd, txt): """ sends the text 'txt' to the window handle hwnd using SendMessage """ try: for c in txt: if c == '\n': win32api.SendMessage(hwnd, win32con.WM_KEYDOWN, win32con.VK_RETURN, 0) win32api.SendMessage(hwnd, win32con.WM_KEYUP, win32con.VK_RETURN, 0) else: win32api.SendMessage(hwnd, win32con.WM_CHAR, ord(c), 0) except Exception as ex: print('error calling SendMessage ' + str(ex))
python
def send_text(hwnd, txt): """ sends the text 'txt' to the window handle hwnd using SendMessage """ try: for c in txt: if c == '\n': win32api.SendMessage(hwnd, win32con.WM_KEYDOWN, win32con.VK_RETURN, 0) win32api.SendMessage(hwnd, win32con.WM_KEYUP, win32con.VK_RETURN, 0) else: win32api.SendMessage(hwnd, win32con.WM_CHAR, ord(c), 0) except Exception as ex: print('error calling SendMessage ' + str(ex))
[ "def", "send_text", "(", "hwnd", ",", "txt", ")", ":", "try", ":", "for", "c", "in", "txt", ":", "if", "c", "==", "'\\n'", ":", "win32api", ".", "SendMessage", "(", "hwnd", ",", "win32con", ".", "WM_KEYDOWN", ",", "win32con", ".", "VK_RETURN", ",", "0", ")", "win32api", ".", "SendMessage", "(", "hwnd", ",", "win32con", ".", "WM_KEYUP", ",", "win32con", ".", "VK_RETURN", ",", "0", ")", "else", ":", "win32api", ".", "SendMessage", "(", "hwnd", ",", "win32con", ".", "WM_CHAR", ",", "ord", "(", "c", ")", ",", "0", ")", "except", "Exception", "as", "ex", ":", "print", "(", "'error calling SendMessage '", "+", "str", "(", "ex", ")", ")" ]
sends the text 'txt' to the window handle hwnd using SendMessage
[ "sends", "the", "text", "txt", "to", "the", "window", "handle", "hwnd", "using", "SendMessage" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/interface_windows_tools.py#L43-L55
train
acutesoftware/AIKIF
aikif/toolbox/interface_windows_tools.py
launch_app
def launch_app(app_path, params=[], time_before_kill_app=15): """ start an app """ import subprocess try: res = subprocess.call([app_path, params], timeout=time_before_kill_app, shell=True) print('res = ', res) if res == 0: return True else: return False except Exception as ex: print('error launching app ' + str(app_path) + ' with params ' + str(params) + '\n' + str(ex)) return False
python
def launch_app(app_path, params=[], time_before_kill_app=15): """ start an app """ import subprocess try: res = subprocess.call([app_path, params], timeout=time_before_kill_app, shell=True) print('res = ', res) if res == 0: return True else: return False except Exception as ex: print('error launching app ' + str(app_path) + ' with params ' + str(params) + '\n' + str(ex)) return False
[ "def", "launch_app", "(", "app_path", ",", "params", "=", "[", "]", ",", "time_before_kill_app", "=", "15", ")", ":", "import", "subprocess", "try", ":", "res", "=", "subprocess", ".", "call", "(", "[", "app_path", ",", "params", "]", ",", "timeout", "=", "time_before_kill_app", ",", "shell", "=", "True", ")", "print", "(", "'res = '", ",", "res", ")", "if", "res", "==", "0", ":", "return", "True", "else", ":", "return", "False", "except", "Exception", "as", "ex", ":", "print", "(", "'error launching app '", "+", "str", "(", "app_path", ")", "+", "' with params '", "+", "str", "(", "params", ")", "+", "'\\n'", "+", "str", "(", "ex", ")", ")", "return", "False" ]
start an app
[ "start", "an", "app" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/interface_windows_tools.py#L57-L71
train
acutesoftware/AIKIF
aikif/toolbox/interface_windows_tools.py
app_activate
def app_activate(caption): """ use shell to bring the application with caption to front """ try: shell = win32com.client.Dispatch("WScript.Shell") shell.AppActivate(caption) except Exception as ex: print('error calling win32com.client.Dispatch (AppActivate)')
python
def app_activate(caption): """ use shell to bring the application with caption to front """ try: shell = win32com.client.Dispatch("WScript.Shell") shell.AppActivate(caption) except Exception as ex: print('error calling win32com.client.Dispatch (AppActivate)')
[ "def", "app_activate", "(", "caption", ")", ":", "try", ":", "shell", "=", "win32com", ".", "client", ".", "Dispatch", "(", "\"WScript.Shell\"", ")", "shell", ".", "AppActivate", "(", "caption", ")", "except", "Exception", "as", "ex", ":", "print", "(", "'error calling win32com.client.Dispatch (AppActivate)'", ")" ]
use shell to bring the application with caption to front
[ "use", "shell", "to", "bring", "the", "application", "with", "caption", "to", "front" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/interface_windows_tools.py#L74-L82
train
OpenHydrology/floodestimation
floodestimation/collections.py
CatchmentCollections.most_similar_catchments
def most_similar_catchments(self, subject_catchment, similarity_dist_function, records_limit=500, include_subject_catchment='auto'): """ Return a list of catchments sorted by hydrological similarity defined by `similarity_distance_function` :param subject_catchment: subject catchment to find similar catchments for :type subject_catchment: :class:`floodestimation.entities.Catchment` :param similarity_dist_function: a method returning a similarity distance measure with 2 arguments, both :class:`floodestimation.entities.Catchment` objects :param include_subject_catchment: - `auto`: include subject catchment if suitable for pooling and if urbext < 0.03 - `force`: always include subject catchment having at least 10 years of data - `exclude`: do not include the subject catchment :type include_subject_catchment: str :return: list of catchments sorted by similarity :type: list of :class:`floodestimation.entities.Catchment` """ if include_subject_catchment not in ['auto', 'force', 'exclude']: raise ValueError("Parameter `include_subject_catchment={}` invalid.".format(include_subject_catchment) + "Must be one of `auto`, `force` or `exclude`.") query = (self.db_session.query(Catchment). join(Catchment.descriptors). join(Catchment.amax_records). filter(Catchment.id != subject_catchment.id, Catchment.is_suitable_for_pooling, or_(Descriptors.urbext2000 < 0.03, Descriptors.urbext2000 == None), AmaxRecord.flag == 0). group_by(Catchment). having(func.count(AmaxRecord.catchment_id) >= 10)) # At least 10 AMAX records catchments = query.all() # Add subject catchment if required (may not exist in database, so add after querying db if include_subject_catchment == 'force': if len(subject_catchment.amax_records) >= 10: # Never include short-record catchments catchments.append(subject_catchment) elif include_subject_catchment == 'auto': if len(subject_catchment.amax_records) >= 10 and subject_catchment.is_suitable_for_pooling and \ (subject_catchment.descriptors.urbext2000 < 0.03 or subject_catchment.descriptors.urbext2000 is None): catchments.append(subject_catchment) # Store the similarity distance as an additional attribute for each catchment for catchment in catchments: catchment.similarity_dist = similarity_dist_function(subject_catchment, catchment) # Then simply sort by this attribute catchments.sort(key=attrgetter('similarity_dist')) # Limit catchments until total amax_records counts is at least `records_limit`, default 500 amax_records_count = 0 catchments_limited = [] for catchment in catchments: catchments_limited.append(catchment) amax_records_count += catchment.record_length if amax_records_count >= records_limit: break return catchments_limited
python
def most_similar_catchments(self, subject_catchment, similarity_dist_function, records_limit=500, include_subject_catchment='auto'): """ Return a list of catchments sorted by hydrological similarity defined by `similarity_distance_function` :param subject_catchment: subject catchment to find similar catchments for :type subject_catchment: :class:`floodestimation.entities.Catchment` :param similarity_dist_function: a method returning a similarity distance measure with 2 arguments, both :class:`floodestimation.entities.Catchment` objects :param include_subject_catchment: - `auto`: include subject catchment if suitable for pooling and if urbext < 0.03 - `force`: always include subject catchment having at least 10 years of data - `exclude`: do not include the subject catchment :type include_subject_catchment: str :return: list of catchments sorted by similarity :type: list of :class:`floodestimation.entities.Catchment` """ if include_subject_catchment not in ['auto', 'force', 'exclude']: raise ValueError("Parameter `include_subject_catchment={}` invalid.".format(include_subject_catchment) + "Must be one of `auto`, `force` or `exclude`.") query = (self.db_session.query(Catchment). join(Catchment.descriptors). join(Catchment.amax_records). filter(Catchment.id != subject_catchment.id, Catchment.is_suitable_for_pooling, or_(Descriptors.urbext2000 < 0.03, Descriptors.urbext2000 == None), AmaxRecord.flag == 0). group_by(Catchment). having(func.count(AmaxRecord.catchment_id) >= 10)) # At least 10 AMAX records catchments = query.all() # Add subject catchment if required (may not exist in database, so add after querying db if include_subject_catchment == 'force': if len(subject_catchment.amax_records) >= 10: # Never include short-record catchments catchments.append(subject_catchment) elif include_subject_catchment == 'auto': if len(subject_catchment.amax_records) >= 10 and subject_catchment.is_suitable_for_pooling and \ (subject_catchment.descriptors.urbext2000 < 0.03 or subject_catchment.descriptors.urbext2000 is None): catchments.append(subject_catchment) # Store the similarity distance as an additional attribute for each catchment for catchment in catchments: catchment.similarity_dist = similarity_dist_function(subject_catchment, catchment) # Then simply sort by this attribute catchments.sort(key=attrgetter('similarity_dist')) # Limit catchments until total amax_records counts is at least `records_limit`, default 500 amax_records_count = 0 catchments_limited = [] for catchment in catchments: catchments_limited.append(catchment) amax_records_count += catchment.record_length if amax_records_count >= records_limit: break return catchments_limited
[ "def", "most_similar_catchments", "(", "self", ",", "subject_catchment", ",", "similarity_dist_function", ",", "records_limit", "=", "500", ",", "include_subject_catchment", "=", "'auto'", ")", ":", "if", "include_subject_catchment", "not", "in", "[", "'auto'", ",", "'force'", ",", "'exclude'", "]", ":", "raise", "ValueError", "(", "\"Parameter `include_subject_catchment={}` invalid.\"", ".", "format", "(", "include_subject_catchment", ")", "+", "\"Must be one of `auto`, `force` or `exclude`.\"", ")", "query", "=", "(", "self", ".", "db_session", ".", "query", "(", "Catchment", ")", ".", "join", "(", "Catchment", ".", "descriptors", ")", ".", "join", "(", "Catchment", ".", "amax_records", ")", ".", "filter", "(", "Catchment", ".", "id", "!=", "subject_catchment", ".", "id", ",", "Catchment", ".", "is_suitable_for_pooling", ",", "or_", "(", "Descriptors", ".", "urbext2000", "<", "0.03", ",", "Descriptors", ".", "urbext2000", "==", "None", ")", ",", "AmaxRecord", ".", "flag", "==", "0", ")", ".", "group_by", "(", "Catchment", ")", ".", "having", "(", "func", ".", "count", "(", "AmaxRecord", ".", "catchment_id", ")", ">=", "10", ")", ")", "catchments", "=", "query", ".", "all", "(", ")", "if", "include_subject_catchment", "==", "'force'", ":", "if", "len", "(", "subject_catchment", ".", "amax_records", ")", ">=", "10", ":", "catchments", ".", "append", "(", "subject_catchment", ")", "elif", "include_subject_catchment", "==", "'auto'", ":", "if", "len", "(", "subject_catchment", ".", "amax_records", ")", ">=", "10", "and", "subject_catchment", ".", "is_suitable_for_pooling", "and", "(", "subject_catchment", ".", "descriptors", ".", "urbext2000", "<", "0.03", "or", "subject_catchment", ".", "descriptors", ".", "urbext2000", "is", "None", ")", ":", "catchments", ".", "append", "(", "subject_catchment", ")", "for", "catchment", "in", "catchments", ":", "catchment", ".", "similarity_dist", "=", "similarity_dist_function", "(", "subject_catchment", ",", "catchment", ")", "catchments", ".", "sort", "(", "key", "=", "attrgetter", "(", "'similarity_dist'", ")", ")", "amax_records_count", "=", "0", "catchments_limited", "=", "[", "]", "for", "catchment", "in", "catchments", ":", "catchments_limited", ".", "append", "(", "catchment", ")", "amax_records_count", "+=", "catchment", ".", "record_length", "if", "amax_records_count", ">=", "records_limit", ":", "break", "return", "catchments_limited" ]
Return a list of catchments sorted by hydrological similarity defined by `similarity_distance_function` :param subject_catchment: subject catchment to find similar catchments for :type subject_catchment: :class:`floodestimation.entities.Catchment` :param similarity_dist_function: a method returning a similarity distance measure with 2 arguments, both :class:`floodestimation.entities.Catchment` objects :param include_subject_catchment: - `auto`: include subject catchment if suitable for pooling and if urbext < 0.03 - `force`: always include subject catchment having at least 10 years of data - `exclude`: do not include the subject catchment :type include_subject_catchment: str :return: list of catchments sorted by similarity :type: list of :class:`floodestimation.entities.Catchment`
[ "Return", "a", "list", "of", "catchments", "sorted", "by", "hydrological", "similarity", "defined", "by", "similarity_distance_function" ]
782da7c5abd1348923129efe89fb70003ebb088c
https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/collections.py#L118-L173
train
mpg-age-bioinformatics/AGEpy
AGEpy/sam.py
readSAM
def readSAM(SAMfile,header=False): """ Reads and parses a sam file. :param SAMfile: /path/to/file.sam :param header: logical, if True, reads the header information :returns: a pandas dataframe with the respective SAM columns: 'QNAME','FLAG','RNAME','POS','MAPQ','CIGAR','RNEXT','PNEXT','TLEN','SEQ','QUAL' and a list of the headers if header=True """ if header==True: f=open(SAMfile,"r+") head=[] for line in f.readlines(): if line[0]=="@": head.append(line) else: continue f.close() sam=pd.read_table(SAMfile,sep="this_gives_one_column",comment="@",header=None) sam=pd.DataFrame(sam[0].str.split("\t").tolist()) acols=[0,1,2,3,4,5,6,7,8,9] sam_=sam[acols] samcols=sam.columns.tolist() bcols=[ s for s in samcols if s not in acols ] sam_[10]=sam[bcols[0]] if len(bcols) > 1: for c in bcols[1:]: sam_[10]=sam_[10].astype(str) sam[c]=sam[c].astype(str) sam_[10]=sam_[10]+"\t"+sam[c] sam_.columns=['QNAME','FLAG','RNAME','POS','MAPQ','CIGAR','RNEXT','PNEXT','TLEN','SEQ','QUAL'] if header==True: return sam_, head else: return sam_
python
def readSAM(SAMfile,header=False): """ Reads and parses a sam file. :param SAMfile: /path/to/file.sam :param header: logical, if True, reads the header information :returns: a pandas dataframe with the respective SAM columns: 'QNAME','FLAG','RNAME','POS','MAPQ','CIGAR','RNEXT','PNEXT','TLEN','SEQ','QUAL' and a list of the headers if header=True """ if header==True: f=open(SAMfile,"r+") head=[] for line in f.readlines(): if line[0]=="@": head.append(line) else: continue f.close() sam=pd.read_table(SAMfile,sep="this_gives_one_column",comment="@",header=None) sam=pd.DataFrame(sam[0].str.split("\t").tolist()) acols=[0,1,2,3,4,5,6,7,8,9] sam_=sam[acols] samcols=sam.columns.tolist() bcols=[ s for s in samcols if s not in acols ] sam_[10]=sam[bcols[0]] if len(bcols) > 1: for c in bcols[1:]: sam_[10]=sam_[10].astype(str) sam[c]=sam[c].astype(str) sam_[10]=sam_[10]+"\t"+sam[c] sam_.columns=['QNAME','FLAG','RNAME','POS','MAPQ','CIGAR','RNEXT','PNEXT','TLEN','SEQ','QUAL'] if header==True: return sam_, head else: return sam_
[ "def", "readSAM", "(", "SAMfile", ",", "header", "=", "False", ")", ":", "if", "header", "==", "True", ":", "f", "=", "open", "(", "SAMfile", ",", "\"r+\"", ")", "head", "=", "[", "]", "for", "line", "in", "f", ".", "readlines", "(", ")", ":", "if", "line", "[", "0", "]", "==", "\"@\"", ":", "head", ".", "append", "(", "line", ")", "else", ":", "continue", "f", ".", "close", "(", ")", "sam", "=", "pd", ".", "read_table", "(", "SAMfile", ",", "sep", "=", "\"this_gives_one_column\"", ",", "comment", "=", "\"@\"", ",", "header", "=", "None", ")", "sam", "=", "pd", ".", "DataFrame", "(", "sam", "[", "0", "]", ".", "str", ".", "split", "(", "\"\\t\"", ")", ".", "tolist", "(", ")", ")", "acols", "=", "[", "0", ",", "1", ",", "2", ",", "3", ",", "4", ",", "5", ",", "6", ",", "7", ",", "8", ",", "9", "]", "sam_", "=", "sam", "[", "acols", "]", "samcols", "=", "sam", ".", "columns", ".", "tolist", "(", ")", "bcols", "=", "[", "s", "for", "s", "in", "samcols", "if", "s", "not", "in", "acols", "]", "sam_", "[", "10", "]", "=", "sam", "[", "bcols", "[", "0", "]", "]", "if", "len", "(", "bcols", ")", ">", "1", ":", "for", "c", "in", "bcols", "[", "1", ":", "]", ":", "sam_", "[", "10", "]", "=", "sam_", "[", "10", "]", ".", "astype", "(", "str", ")", "sam", "[", "c", "]", "=", "sam", "[", "c", "]", ".", "astype", "(", "str", ")", "sam_", "[", "10", "]", "=", "sam_", "[", "10", "]", "+", "\"\\t\"", "+", "sam", "[", "c", "]", "sam_", ".", "columns", "=", "[", "'QNAME'", ",", "'FLAG'", ",", "'RNAME'", ",", "'POS'", ",", "'MAPQ'", ",", "'CIGAR'", ",", "'RNEXT'", ",", "'PNEXT'", ",", "'TLEN'", ",", "'SEQ'", ",", "'QUAL'", "]", "if", "header", "==", "True", ":", "return", "sam_", ",", "head", "else", ":", "return", "sam_" ]
Reads and parses a sam file. :param SAMfile: /path/to/file.sam :param header: logical, if True, reads the header information :returns: a pandas dataframe with the respective SAM columns: 'QNAME','FLAG','RNAME','POS','MAPQ','CIGAR','RNEXT','PNEXT','TLEN','SEQ','QUAL' and a list of the headers if header=True
[ "Reads", "and", "parses", "a", "sam", "file", "." ]
887808a7a2c1504f39ce8d8cb36c15c1721cd29f
https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/sam.py#L4-L42
train
mpg-age-bioinformatics/AGEpy
AGEpy/sam.py
SAMflags
def SAMflags(x): """ Explains a SAM flag. :param x: flag :returns: complete SAM flag explanaition """ flags=[] if x & 1: l="1: Read paired" else: l="0: Read unpaired" flags.append(l) if x & 2 : l="1: Read mapped in proper pair" else: l="0: Read not mapped in proper pair" flags.append(l) if x & 4 : l="1: Read unmapped" else: l="0: Read mapped" flags.append(l) if x & 8 : l="1: Mate unmapped" else: l="0: Mate mapped" flags.append(l) if x & 16 : l="1: Read reverse strand" else: l="0: Read direct strand" flags.append(l) if x & 32 : l="1: Mate reverse strand" else: l="0: Mate direct strand" flags.append(l) if x & 64 : l="1: First in pair" else: l="0: Second in pair" flags.append(l) if x & 128 : l="1: Second in pair" else: l="0: First in pair" flags.append(l) if x & 256 : l="1: Not primary alignment" else: l="0: Primary alignment" flags.append(l) if x & 512 : l="1: Read fails platform/vendor quality checks" else: l="0: Read passes platform/vendor quality checks" flags.append(l) if x & 1024 : l="1: Read is PCR or optical duplicate" else: l="0: Read is not PCR or optical duplicate" flags.append(l) if x & 2048 : l="1: Supplementary alignment" else: l="0: Not supplementary alignment" flags.append(l) return flags
python
def SAMflags(x): """ Explains a SAM flag. :param x: flag :returns: complete SAM flag explanaition """ flags=[] if x & 1: l="1: Read paired" else: l="0: Read unpaired" flags.append(l) if x & 2 : l="1: Read mapped in proper pair" else: l="0: Read not mapped in proper pair" flags.append(l) if x & 4 : l="1: Read unmapped" else: l="0: Read mapped" flags.append(l) if x & 8 : l="1: Mate unmapped" else: l="0: Mate mapped" flags.append(l) if x & 16 : l="1: Read reverse strand" else: l="0: Read direct strand" flags.append(l) if x & 32 : l="1: Mate reverse strand" else: l="0: Mate direct strand" flags.append(l) if x & 64 : l="1: First in pair" else: l="0: Second in pair" flags.append(l) if x & 128 : l="1: Second in pair" else: l="0: First in pair" flags.append(l) if x & 256 : l="1: Not primary alignment" else: l="0: Primary alignment" flags.append(l) if x & 512 : l="1: Read fails platform/vendor quality checks" else: l="0: Read passes platform/vendor quality checks" flags.append(l) if x & 1024 : l="1: Read is PCR or optical duplicate" else: l="0: Read is not PCR or optical duplicate" flags.append(l) if x & 2048 : l="1: Supplementary alignment" else: l="0: Not supplementary alignment" flags.append(l) return flags
[ "def", "SAMflags", "(", "x", ")", ":", "flags", "=", "[", "]", "if", "x", "&", "1", ":", "l", "=", "\"1: Read paired\"", "else", ":", "l", "=", "\"0: Read unpaired\"", "flags", ".", "append", "(", "l", ")", "if", "x", "&", "2", ":", "l", "=", "\"1: Read mapped in proper pair\"", "else", ":", "l", "=", "\"0: Read not mapped in proper pair\"", "flags", ".", "append", "(", "l", ")", "if", "x", "&", "4", ":", "l", "=", "\"1: Read unmapped\"", "else", ":", "l", "=", "\"0: Read mapped\"", "flags", ".", "append", "(", "l", ")", "if", "x", "&", "8", ":", "l", "=", "\"1: Mate unmapped\"", "else", ":", "l", "=", "\"0: Mate mapped\"", "flags", ".", "append", "(", "l", ")", "if", "x", "&", "16", ":", "l", "=", "\"1: Read reverse strand\"", "else", ":", "l", "=", "\"0: Read direct strand\"", "flags", ".", "append", "(", "l", ")", "if", "x", "&", "32", ":", "l", "=", "\"1: Mate reverse strand\"", "else", ":", "l", "=", "\"0: Mate direct strand\"", "flags", ".", "append", "(", "l", ")", "if", "x", "&", "64", ":", "l", "=", "\"1: First in pair\"", "else", ":", "l", "=", "\"0: Second in pair\"", "flags", ".", "append", "(", "l", ")", "if", "x", "&", "128", ":", "l", "=", "\"1: Second in pair\"", "else", ":", "l", "=", "\"0: First in pair\"", "flags", ".", "append", "(", "l", ")", "if", "x", "&", "256", ":", "l", "=", "\"1: Not primary alignment\"", "else", ":", "l", "=", "\"0: Primary alignment\"", "flags", ".", "append", "(", "l", ")", "if", "x", "&", "512", ":", "l", "=", "\"1: Read fails platform/vendor quality checks\"", "else", ":", "l", "=", "\"0: Read passes platform/vendor quality checks\"", "flags", ".", "append", "(", "l", ")", "if", "x", "&", "1024", ":", "l", "=", "\"1: Read is PCR or optical duplicate\"", "else", ":", "l", "=", "\"0: Read is not PCR or optical duplicate\"", "flags", ".", "append", "(", "l", ")", "if", "x", "&", "2048", ":", "l", "=", "\"1: Supplementary alignment\"", "else", ":", "l", "=", "\"0: Not supplementary alignment\"", "flags", ".", "append", "(", "l", ")", "return", "flags" ]
Explains a SAM flag. :param x: flag :returns: complete SAM flag explanaition
[ "Explains", "a", "SAM", "flag", "." ]
887808a7a2c1504f39ce8d8cb36c15c1721cd29f
https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/sam.py#L83-L165
train
acutesoftware/AIKIF
aikif/bias.py
Bias.get_bias_details
def get_bias_details(self): """ returns a string representation of the bias details """ res = 'Bias File Details\n' for b in self.bias_details: if len(b) > 2: res += b[0].ljust(35) res += b[1].ljust(35) res += b[2].ljust(9) res += '\n' return res
python
def get_bias_details(self): """ returns a string representation of the bias details """ res = 'Bias File Details\n' for b in self.bias_details: if len(b) > 2: res += b[0].ljust(35) res += b[1].ljust(35) res += b[2].ljust(9) res += '\n' return res
[ "def", "get_bias_details", "(", "self", ")", ":", "res", "=", "'Bias File Details\\n'", "for", "b", "in", "self", ".", "bias_details", ":", "if", "len", "(", "b", ")", ">", "2", ":", "res", "+=", "b", "[", "0", "]", ".", "ljust", "(", "35", ")", "res", "+=", "b", "[", "1", "]", ".", "ljust", "(", "35", ")", "res", "+=", "b", "[", "2", "]", ".", "ljust", "(", "9", ")", "res", "+=", "'\\n'", "return", "res" ]
returns a string representation of the bias details
[ "returns", "a", "string", "representation", "of", "the", "bias", "details" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/bias.py#L85-L96
train
acutesoftware/AIKIF
aikif/bias.py
Bias._read_bias_rating
def _read_bias_rating(self, short_filename): """ read the bias file based on the short_filename and return as a dictionary """ res = {} full_name = os.path.join(root_fldr, 'aikif', 'data', 'ref', short_filename) lg.record_process('bias.py','reading ' + full_name) with open(full_name, 'r') as f: for line in f: if line.strip('') == '': break bias_line = [] cols = line.split(',') bias_line.extend([short_filename]) for col in cols: bias_line.extend([col.strip('"').strip('\n')]) self.bias_details.append(bias_line)
python
def _read_bias_rating(self, short_filename): """ read the bias file based on the short_filename and return as a dictionary """ res = {} full_name = os.path.join(root_fldr, 'aikif', 'data', 'ref', short_filename) lg.record_process('bias.py','reading ' + full_name) with open(full_name, 'r') as f: for line in f: if line.strip('') == '': break bias_line = [] cols = line.split(',') bias_line.extend([short_filename]) for col in cols: bias_line.extend([col.strip('"').strip('\n')]) self.bias_details.append(bias_line)
[ "def", "_read_bias_rating", "(", "self", ",", "short_filename", ")", ":", "res", "=", "{", "}", "full_name", "=", "os", ".", "path", ".", "join", "(", "root_fldr", ",", "'aikif'", ",", "'data'", ",", "'ref'", ",", "short_filename", ")", "lg", ".", "record_process", "(", "'bias.py'", ",", "'reading '", "+", "full_name", ")", "with", "open", "(", "full_name", ",", "'r'", ")", "as", "f", ":", "for", "line", "in", "f", ":", "if", "line", ".", "strip", "(", "''", ")", "==", "''", ":", "break", "bias_line", "=", "[", "]", "cols", "=", "line", ".", "split", "(", "','", ")", "bias_line", ".", "extend", "(", "[", "short_filename", "]", ")", "for", "col", "in", "cols", ":", "bias_line", ".", "extend", "(", "[", "col", ".", "strip", "(", "'\"'", ")", ".", "strip", "(", "'\\n'", ")", "]", ")", "self", ".", "bias_details", ".", "append", "(", "bias_line", ")" ]
read the bias file based on the short_filename and return as a dictionary
[ "read", "the", "bias", "file", "based", "on", "the", "short_filename", "and", "return", "as", "a", "dictionary" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/bias.py#L119-L137
train
acutesoftware/AIKIF
aikif/config.py
get_root_folder
def get_root_folder(): """ returns the home folder and program root depending on OS """ locations = { 'linux':{'hme':'/home/duncan/', 'core_folder':'/home/duncan/dev/src/python/AIKIF'}, 'win32':{'hme':'T:\\user\\', 'core_folder':'T:\\user\\dev\\src\\python\\AIKIF'}, 'cygwin':{'hme':os.getcwd() + os.sep, 'core_folder':os.getcwd()}, 'darwin':{'hme':os.getcwd() + os.sep, 'core_folder':os.getcwd()} } hme = locations[sys.platform]['hme'] core_folder = locations[sys.platform]['core_folder'] if not os.path.exists(core_folder): hme = os.getcwd() core_folder = os.getcwd() print('config.py : running on CI build (or you need to modify the paths in config.py)') return hme, core_folder
python
def get_root_folder(): """ returns the home folder and program root depending on OS """ locations = { 'linux':{'hme':'/home/duncan/', 'core_folder':'/home/duncan/dev/src/python/AIKIF'}, 'win32':{'hme':'T:\\user\\', 'core_folder':'T:\\user\\dev\\src\\python\\AIKIF'}, 'cygwin':{'hme':os.getcwd() + os.sep, 'core_folder':os.getcwd()}, 'darwin':{'hme':os.getcwd() + os.sep, 'core_folder':os.getcwd()} } hme = locations[sys.platform]['hme'] core_folder = locations[sys.platform]['core_folder'] if not os.path.exists(core_folder): hme = os.getcwd() core_folder = os.getcwd() print('config.py : running on CI build (or you need to modify the paths in config.py)') return hme, core_folder
[ "def", "get_root_folder", "(", ")", ":", "locations", "=", "{", "'linux'", ":", "{", "'hme'", ":", "'/home/duncan/'", ",", "'core_folder'", ":", "'/home/duncan/dev/src/python/AIKIF'", "}", ",", "'win32'", ":", "{", "'hme'", ":", "'T:\\\\user\\\\'", ",", "'core_folder'", ":", "'T:\\\\user\\\\dev\\\\src\\\\python\\\\AIKIF'", "}", ",", "'cygwin'", ":", "{", "'hme'", ":", "os", ".", "getcwd", "(", ")", "+", "os", ".", "sep", ",", "'core_folder'", ":", "os", ".", "getcwd", "(", ")", "}", ",", "'darwin'", ":", "{", "'hme'", ":", "os", ".", "getcwd", "(", ")", "+", "os", ".", "sep", ",", "'core_folder'", ":", "os", ".", "getcwd", "(", ")", "}", "}", "hme", "=", "locations", "[", "sys", ".", "platform", "]", "[", "'hme'", "]", "core_folder", "=", "locations", "[", "sys", ".", "platform", "]", "[", "'core_folder'", "]", "if", "not", "os", ".", "path", ".", "exists", "(", "core_folder", ")", ":", "hme", "=", "os", ".", "getcwd", "(", ")", "core_folder", "=", "os", ".", "getcwd", "(", ")", "print", "(", "'config.py : running on CI build (or you need to modify the paths in config.py)'", ")", "return", "hme", ",", "core_folder" ]
returns the home folder and program root depending on OS
[ "returns", "the", "home", "folder", "and", "program", "root", "depending", "on", "OS" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/config.py#L28-L45
train
acutesoftware/AIKIF
aikif/config.py
read_credentials
def read_credentials(fname): """ read a simple text file from a private location to get username and password """ with open(fname, 'r') as f: username = f.readline().strip('\n') password = f.readline().strip('\n') return username, password
python
def read_credentials(fname): """ read a simple text file from a private location to get username and password """ with open(fname, 'r') as f: username = f.readline().strip('\n') password = f.readline().strip('\n') return username, password
[ "def", "read_credentials", "(", "fname", ")", ":", "with", "open", "(", "fname", ",", "'r'", ")", "as", "f", ":", "username", "=", "f", ".", "readline", "(", ")", ".", "strip", "(", "'\\n'", ")", "password", "=", "f", ".", "readline", "(", ")", ".", "strip", "(", "'\\n'", ")", "return", "username", ",", "password" ]
read a simple text file from a private location to get username and password
[ "read", "a", "simple", "text", "file", "from", "a", "private", "location", "to", "get", "username", "and", "password" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/config.py#L84-L92
train
acutesoftware/AIKIF
aikif/config.py
show_config
def show_config(): """ module intended to be imported in most AIKIF utils to manage folder paths, user settings, etc. Modify the parameters at the top of this file to suit """ res = '' res += '\n---------- Folder Locations ---------\n' for k,v in fldrs.items(): res += str(k) + ' = ' + str(v) + '\n' res += '\n---------- Logfiles ---------\n' for k,v in logs.items(): res += str(k) + ' = ' + str(v) + '\n' res += '\n---------- Parameters ---------\n' for k,v in params.items(): res += str(k) + ' = ' + str(v) + '\n' print("\nusage from other programs - returns " + fldr_root()) return res
python
def show_config(): """ module intended to be imported in most AIKIF utils to manage folder paths, user settings, etc. Modify the parameters at the top of this file to suit """ res = '' res += '\n---------- Folder Locations ---------\n' for k,v in fldrs.items(): res += str(k) + ' = ' + str(v) + '\n' res += '\n---------- Logfiles ---------\n' for k,v in logs.items(): res += str(k) + ' = ' + str(v) + '\n' res += '\n---------- Parameters ---------\n' for k,v in params.items(): res += str(k) + ' = ' + str(v) + '\n' print("\nusage from other programs - returns " + fldr_root()) return res
[ "def", "show_config", "(", ")", ":", "res", "=", "''", "res", "+=", "'\\n---------- Folder Locations ---------\\n'", "for", "k", ",", "v", "in", "fldrs", ".", "items", "(", ")", ":", "res", "+=", "str", "(", "k", ")", "+", "' = '", "+", "str", "(", "v", ")", "+", "'\\n'", "res", "+=", "'\\n---------- Logfiles ---------\\n'", "for", "k", ",", "v", "in", "logs", ".", "items", "(", ")", ":", "res", "+=", "str", "(", "k", ")", "+", "' = '", "+", "str", "(", "v", ")", "+", "'\\n'", "res", "+=", "'\\n---------- Parameters ---------\\n'", "for", "k", ",", "v", "in", "params", ".", "items", "(", ")", ":", "res", "+=", "str", "(", "k", ")", "+", "' = '", "+", "str", "(", "v", ")", "+", "'\\n'", "print", "(", "\"\\nusage from other programs - returns \"", "+", "fldr_root", "(", ")", ")", "return", "res" ]
module intended to be imported in most AIKIF utils to manage folder paths, user settings, etc. Modify the parameters at the top of this file to suit
[ "module", "intended", "to", "be", "imported", "in", "most", "AIKIF", "utils", "to", "manage", "folder", "paths", "user", "settings", "etc", ".", "Modify", "the", "parameters", "at", "the", "top", "of", "this", "file", "to", "suit" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/config.py#L95-L114
train
mpg-age-bioinformatics/AGEpy
AGEpy/meme.py
filterMotifs
def filterMotifs(memeFile,outFile, minSites): """ Selectes motifs from a meme file based on the number of sites. :param memeFile: MEME file to be read :param outFile: MEME file to be written :param minSites: minimum number of sites each motif needs to have to be valid :returns: nothing """ with open(memeFile, "r") as mF: oldMEME=mF.readlines() newMEME=oldMEME[:7] i=7 while i < len(oldMEME): if oldMEME[i].split(" ")[0] == "MOTIF": print(oldMEME[i].split("\n")[0], int(oldMEME[i+2].split("nsites= ")[1].split(" ")[0])) sys.stdout.flush() if int(oldMEME[i+2].split("nsites= ")[1].split(" ")[0]) > minSites: newMEME.append(oldMEME[i]) f=i+1 while oldMEME[f].split(" ")[0] != "MOTIF": newMEME.append(oldMEME[f]) f=f+1 i=i+1 else: i=i+1 else: i=i+1 with open(outFile, "w+") as out: out.write("".join(newMEME) ) return newMEME
python
def filterMotifs(memeFile,outFile, minSites): """ Selectes motifs from a meme file based on the number of sites. :param memeFile: MEME file to be read :param outFile: MEME file to be written :param minSites: minimum number of sites each motif needs to have to be valid :returns: nothing """ with open(memeFile, "r") as mF: oldMEME=mF.readlines() newMEME=oldMEME[:7] i=7 while i < len(oldMEME): if oldMEME[i].split(" ")[0] == "MOTIF": print(oldMEME[i].split("\n")[0], int(oldMEME[i+2].split("nsites= ")[1].split(" ")[0])) sys.stdout.flush() if int(oldMEME[i+2].split("nsites= ")[1].split(" ")[0]) > minSites: newMEME.append(oldMEME[i]) f=i+1 while oldMEME[f].split(" ")[0] != "MOTIF": newMEME.append(oldMEME[f]) f=f+1 i=i+1 else: i=i+1 else: i=i+1 with open(outFile, "w+") as out: out.write("".join(newMEME) ) return newMEME
[ "def", "filterMotifs", "(", "memeFile", ",", "outFile", ",", "minSites", ")", ":", "with", "open", "(", "memeFile", ",", "\"r\"", ")", "as", "mF", ":", "oldMEME", "=", "mF", ".", "readlines", "(", ")", "newMEME", "=", "oldMEME", "[", ":", "7", "]", "i", "=", "7", "while", "i", "<", "len", "(", "oldMEME", ")", ":", "if", "oldMEME", "[", "i", "]", ".", "split", "(", "\" \"", ")", "[", "0", "]", "==", "\"MOTIF\"", ":", "print", "(", "oldMEME", "[", "i", "]", ".", "split", "(", "\"\\n\"", ")", "[", "0", "]", ",", "int", "(", "oldMEME", "[", "i", "+", "2", "]", ".", "split", "(", "\"nsites= \"", ")", "[", "1", "]", ".", "split", "(", "\" \"", ")", "[", "0", "]", ")", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "if", "int", "(", "oldMEME", "[", "i", "+", "2", "]", ".", "split", "(", "\"nsites= \"", ")", "[", "1", "]", ".", "split", "(", "\" \"", ")", "[", "0", "]", ")", ">", "minSites", ":", "newMEME", ".", "append", "(", "oldMEME", "[", "i", "]", ")", "f", "=", "i", "+", "1", "while", "oldMEME", "[", "f", "]", ".", "split", "(", "\" \"", ")", "[", "0", "]", "!=", "\"MOTIF\"", ":", "newMEME", ".", "append", "(", "oldMEME", "[", "f", "]", ")", "f", "=", "f", "+", "1", "i", "=", "i", "+", "1", "else", ":", "i", "=", "i", "+", "1", "else", ":", "i", "=", "i", "+", "1", "with", "open", "(", "outFile", ",", "\"w+\"", ")", "as", "out", ":", "out", ".", "write", "(", "\"\"", ".", "join", "(", "newMEME", ")", ")", "return", "newMEME" ]
Selectes motifs from a meme file based on the number of sites. :param memeFile: MEME file to be read :param outFile: MEME file to be written :param minSites: minimum number of sites each motif needs to have to be valid :returns: nothing
[ "Selectes", "motifs", "from", "a", "meme", "file", "based", "on", "the", "number", "of", "sites", "." ]
887808a7a2c1504f39ce8d8cb36c15c1721cd29f
https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/meme.py#L2-L35
train
acutesoftware/AIKIF
aikif/toolbox/parse_desc.py
Parse._read_file
def _read_file(self): """ reads the file and cleans into standard text ready for parsing """ self.raw = [] with open(self.fname, 'r') as f: for line in f: #print(line) if line.startswith('#'): pass # comment elif line.strip('\n') == '': pass # space else: self.raw.append(line.strip('\n'))
python
def _read_file(self): """ reads the file and cleans into standard text ready for parsing """ self.raw = [] with open(self.fname, 'r') as f: for line in f: #print(line) if line.startswith('#'): pass # comment elif line.strip('\n') == '': pass # space else: self.raw.append(line.strip('\n'))
[ "def", "_read_file", "(", "self", ")", ":", "self", ".", "raw", "=", "[", "]", "with", "open", "(", "self", ".", "fname", ",", "'r'", ")", "as", "f", ":", "for", "line", "in", "f", ":", "if", "line", ".", "startswith", "(", "'#'", ")", ":", "pass", "elif", "line", ".", "strip", "(", "'\\n'", ")", "==", "''", ":", "pass", "else", ":", "self", ".", "raw", ".", "append", "(", "line", ".", "strip", "(", "'\\n'", ")", ")" ]
reads the file and cleans into standard text ready for parsing
[ "reads", "the", "file", "and", "cleans", "into", "standard", "text", "ready", "for", "parsing" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/parse_desc.py#L46-L59
train
OpenHydrology/floodestimation
floodestimation/settings.py
Config.reset
def reset(self): """ Restore the default configuration and remove the user's config file. """ # Delete user config file try: os.remove(self._user_config_file) except FileNotFoundError: pass # Empty and refill the config object for section_name in self.sections(): self.remove_section(section_name) self.read_defaults()
python
def reset(self): """ Restore the default configuration and remove the user's config file. """ # Delete user config file try: os.remove(self._user_config_file) except FileNotFoundError: pass # Empty and refill the config object for section_name in self.sections(): self.remove_section(section_name) self.read_defaults()
[ "def", "reset", "(", "self", ")", ":", "try", ":", "os", ".", "remove", "(", "self", ".", "_user_config_file", ")", "except", "FileNotFoundError", ":", "pass", "for", "section_name", "in", "self", ".", "sections", "(", ")", ":", "self", ".", "remove_section", "(", "section_name", ")", "self", ".", "read_defaults", "(", ")" ]
Restore the default configuration and remove the user's config file.
[ "Restore", "the", "default", "configuration", "and", "remove", "the", "user", "s", "config", "file", "." ]
782da7c5abd1348923129efe89fb70003ebb088c
https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/settings.py#L49-L63
train
OpenHydrology/floodestimation
floodestimation/settings.py
Config.save
def save(self): """ Write data to user config file. """ with open(self._user_config_file, 'w', encoding='utf-8') as f: self.write(f)
python
def save(self): """ Write data to user config file. """ with open(self._user_config_file, 'w', encoding='utf-8') as f: self.write(f)
[ "def", "save", "(", "self", ")", ":", "with", "open", "(", "self", ".", "_user_config_file", ",", "'w'", ",", "encoding", "=", "'utf-8'", ")", "as", "f", ":", "self", ".", "write", "(", "f", ")" ]
Write data to user config file.
[ "Write", "data", "to", "user", "config", "file", "." ]
782da7c5abd1348923129efe89fb70003ebb088c
https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/settings.py#L87-L92
train
cdgriffith/puremagic
puremagic/main.py
_magic_data
def _magic_data(filename=os.path.join(here, 'magic_data.json')): """ Read the magic file""" with open(filename) as f: data = json.load(f) headers = [_create_puremagic(x) for x in data['headers']] footers = [_create_puremagic(x) for x in data['footers']] return headers, footers
python
def _magic_data(filename=os.path.join(here, 'magic_data.json')): """ Read the magic file""" with open(filename) as f: data = json.load(f) headers = [_create_puremagic(x) for x in data['headers']] footers = [_create_puremagic(x) for x in data['footers']] return headers, footers
[ "def", "_magic_data", "(", "filename", "=", "os", ".", "path", ".", "join", "(", "here", ",", "'magic_data.json'", ")", ")", ":", "with", "open", "(", "filename", ")", "as", "f", ":", "data", "=", "json", ".", "load", "(", "f", ")", "headers", "=", "[", "_create_puremagic", "(", "x", ")", "for", "x", "in", "data", "[", "'headers'", "]", "]", "footers", "=", "[", "_create_puremagic", "(", "x", ")", "for", "x", "in", "data", "[", "'footers'", "]", "]", "return", "headers", ",", "footers" ]
Read the magic file
[ "Read", "the", "magic", "file" ]
ae2c4c400930b8a19519e787f61dd779db7e415b
https://github.com/cdgriffith/puremagic/blob/ae2c4c400930b8a19519e787f61dd779db7e415b/puremagic/main.py#L39-L45
train
cdgriffith/puremagic
puremagic/main.py
_max_lengths
def _max_lengths(): """ The length of the largest magic string + its offset""" max_header_length = max([len(x.byte_match) + x.offset for x in magic_header_array]) max_footer_length = max([len(x.byte_match) + abs(x.offset) for x in magic_footer_array]) return max_header_length, max_footer_length
python
def _max_lengths(): """ The length of the largest magic string + its offset""" max_header_length = max([len(x.byte_match) + x.offset for x in magic_header_array]) max_footer_length = max([len(x.byte_match) + abs(x.offset) for x in magic_footer_array]) return max_header_length, max_footer_length
[ "def", "_max_lengths", "(", ")", ":", "max_header_length", "=", "max", "(", "[", "len", "(", "x", ".", "byte_match", ")", "+", "x", ".", "offset", "for", "x", "in", "magic_header_array", "]", ")", "max_footer_length", "=", "max", "(", "[", "len", "(", "x", ".", "byte_match", ")", "+", "abs", "(", "x", ".", "offset", ")", "for", "x", "in", "magic_footer_array", "]", ")", "return", "max_header_length", ",", "max_footer_length" ]
The length of the largest magic string + its offset
[ "The", "length", "of", "the", "largest", "magic", "string", "+", "its", "offset" ]
ae2c4c400930b8a19519e787f61dd779db7e415b
https://github.com/cdgriffith/puremagic/blob/ae2c4c400930b8a19519e787f61dd779db7e415b/puremagic/main.py#L59-L65
train
cdgriffith/puremagic
puremagic/main.py
_confidence
def _confidence(matches, ext=None): """ Rough confidence based on string length and file extension""" results = [] for match in matches: con = (0.8 if len(match.extension) > 9 else float("0.{0}".format(len(match.extension)))) if ext == match.extension: con = 0.9 results.append( PureMagicWithConfidence(confidence=con, **match._asdict())) return sorted(results, key=lambda x: x.confidence, reverse=True)
python
def _confidence(matches, ext=None): """ Rough confidence based on string length and file extension""" results = [] for match in matches: con = (0.8 if len(match.extension) > 9 else float("0.{0}".format(len(match.extension)))) if ext == match.extension: con = 0.9 results.append( PureMagicWithConfidence(confidence=con, **match._asdict())) return sorted(results, key=lambda x: x.confidence, reverse=True)
[ "def", "_confidence", "(", "matches", ",", "ext", "=", "None", ")", ":", "results", "=", "[", "]", "for", "match", "in", "matches", ":", "con", "=", "(", "0.8", "if", "len", "(", "match", ".", "extension", ")", ">", "9", "else", "float", "(", "\"0.{0}\"", ".", "format", "(", "len", "(", "match", ".", "extension", ")", ")", ")", ")", "if", "ext", "==", "match", ".", "extension", ":", "con", "=", "0.9", "results", ".", "append", "(", "PureMagicWithConfidence", "(", "confidence", "=", "con", ",", "**", "match", ".", "_asdict", "(", ")", ")", ")", "return", "sorted", "(", "results", ",", "key", "=", "lambda", "x", ":", "x", ".", "confidence", ",", "reverse", "=", "True", ")" ]
Rough confidence based on string length and file extension
[ "Rough", "confidence", "based", "on", "string", "length", "and", "file", "extension" ]
ae2c4c400930b8a19519e787f61dd779db7e415b
https://github.com/cdgriffith/puremagic/blob/ae2c4c400930b8a19519e787f61dd779db7e415b/puremagic/main.py#L68-L78
train
cdgriffith/puremagic
puremagic/main.py
_identify_all
def _identify_all(header, footer, ext=None): """ Attempt to identify 'data' by its magic numbers""" # Capture the length of the data # That way we do not try to identify bytes that don't exist matches = list() for magic_row in magic_header_array: start = magic_row.offset end = magic_row.offset + len(magic_row.byte_match) if end > len(header): continue if header[start:end] == magic_row.byte_match: matches.append(magic_row) for magic_row in magic_footer_array: start = magic_row.offset if footer[start:] == magic_row.byte_match: matches.append(magic_row) if not matches: raise PureError("Could not identify file") return _confidence(matches, ext)
python
def _identify_all(header, footer, ext=None): """ Attempt to identify 'data' by its magic numbers""" # Capture the length of the data # That way we do not try to identify bytes that don't exist matches = list() for magic_row in magic_header_array: start = magic_row.offset end = magic_row.offset + len(magic_row.byte_match) if end > len(header): continue if header[start:end] == magic_row.byte_match: matches.append(magic_row) for magic_row in magic_footer_array: start = magic_row.offset if footer[start:] == magic_row.byte_match: matches.append(magic_row) if not matches: raise PureError("Could not identify file") return _confidence(matches, ext)
[ "def", "_identify_all", "(", "header", ",", "footer", ",", "ext", "=", "None", ")", ":", "matches", "=", "list", "(", ")", "for", "magic_row", "in", "magic_header_array", ":", "start", "=", "magic_row", ".", "offset", "end", "=", "magic_row", ".", "offset", "+", "len", "(", "magic_row", ".", "byte_match", ")", "if", "end", ">", "len", "(", "header", ")", ":", "continue", "if", "header", "[", "start", ":", "end", "]", "==", "magic_row", ".", "byte_match", ":", "matches", ".", "append", "(", "magic_row", ")", "for", "magic_row", "in", "magic_footer_array", ":", "start", "=", "magic_row", ".", "offset", "if", "footer", "[", "start", ":", "]", "==", "magic_row", ".", "byte_match", ":", "matches", ".", "append", "(", "magic_row", ")", "if", "not", "matches", ":", "raise", "PureError", "(", "\"Could not identify file\"", ")", "return", "_confidence", "(", "matches", ",", "ext", ")" ]
Attempt to identify 'data' by its magic numbers
[ "Attempt", "to", "identify", "data", "by", "its", "magic", "numbers" ]
ae2c4c400930b8a19519e787f61dd779db7e415b
https://github.com/cdgriffith/puremagic/blob/ae2c4c400930b8a19519e787f61dd779db7e415b/puremagic/main.py#L81-L102
train
cdgriffith/puremagic
puremagic/main.py
_magic
def _magic(header, footer, mime, ext=None): """ Discover what type of file it is based on the incoming string """ if not header: raise ValueError("Input was empty") info = _identify_all(header, footer, ext)[0] if mime: return info.mime_type return info.extension if not \ isinstance(info.extension, list) else info[0].extension
python
def _magic(header, footer, mime, ext=None): """ Discover what type of file it is based on the incoming string """ if not header: raise ValueError("Input was empty") info = _identify_all(header, footer, ext)[0] if mime: return info.mime_type return info.extension if not \ isinstance(info.extension, list) else info[0].extension
[ "def", "_magic", "(", "header", ",", "footer", ",", "mime", ",", "ext", "=", "None", ")", ":", "if", "not", "header", ":", "raise", "ValueError", "(", "\"Input was empty\"", ")", "info", "=", "_identify_all", "(", "header", ",", "footer", ",", "ext", ")", "[", "0", "]", "if", "mime", ":", "return", "info", ".", "mime_type", "return", "info", ".", "extension", "if", "not", "isinstance", "(", "info", ".", "extension", ",", "list", ")", "else", "info", "[", "0", "]", ".", "extension" ]
Discover what type of file it is based on the incoming string
[ "Discover", "what", "type", "of", "file", "it", "is", "based", "on", "the", "incoming", "string" ]
ae2c4c400930b8a19519e787f61dd779db7e415b
https://github.com/cdgriffith/puremagic/blob/ae2c4c400930b8a19519e787f61dd779db7e415b/puremagic/main.py#L105-L113
train
cdgriffith/puremagic
puremagic/main.py
_file_details
def _file_details(filename): """ Grab the start and end of the file""" max_head, max_foot = _max_lengths() with open(filename, "rb") as fin: head = fin.read(max_head) try: fin.seek(-max_foot, os.SEEK_END) except IOError: fin.seek(0) foot = fin.read() return head, foot
python
def _file_details(filename): """ Grab the start and end of the file""" max_head, max_foot = _max_lengths() with open(filename, "rb") as fin: head = fin.read(max_head) try: fin.seek(-max_foot, os.SEEK_END) except IOError: fin.seek(0) foot = fin.read() return head, foot
[ "def", "_file_details", "(", "filename", ")", ":", "max_head", ",", "max_foot", "=", "_max_lengths", "(", ")", "with", "open", "(", "filename", ",", "\"rb\"", ")", "as", "fin", ":", "head", "=", "fin", ".", "read", "(", "max_head", ")", "try", ":", "fin", ".", "seek", "(", "-", "max_foot", ",", "os", ".", "SEEK_END", ")", "except", "IOError", ":", "fin", ".", "seek", "(", "0", ")", "foot", "=", "fin", ".", "read", "(", ")", "return", "head", ",", "foot" ]
Grab the start and end of the file
[ "Grab", "the", "start", "and", "end", "of", "the", "file" ]
ae2c4c400930b8a19519e787f61dd779db7e415b
https://github.com/cdgriffith/puremagic/blob/ae2c4c400930b8a19519e787f61dd779db7e415b/puremagic/main.py#L116-L126
train
cdgriffith/puremagic
puremagic/main.py
ext_from_filename
def ext_from_filename(filename): """ Scan a filename for it's extension. :param filename: string of the filename :return: the extension off the end (empty string if it can't find one) """ try: base, ext = filename.lower().rsplit(".", 1) except ValueError: return '' ext = ".{0}".format(ext) all_exts = [x.extension for x in chain(magic_header_array, magic_footer_array)] if base[-4:].startswith("."): # For double extensions like like .tar.gz long_ext = base[-4:] + ext if long_ext in all_exts: return long_ext return ext
python
def ext_from_filename(filename): """ Scan a filename for it's extension. :param filename: string of the filename :return: the extension off the end (empty string if it can't find one) """ try: base, ext = filename.lower().rsplit(".", 1) except ValueError: return '' ext = ".{0}".format(ext) all_exts = [x.extension for x in chain(magic_header_array, magic_footer_array)] if base[-4:].startswith("."): # For double extensions like like .tar.gz long_ext = base[-4:] + ext if long_ext in all_exts: return long_ext return ext
[ "def", "ext_from_filename", "(", "filename", ")", ":", "try", ":", "base", ",", "ext", "=", "filename", ".", "lower", "(", ")", ".", "rsplit", "(", "\".\"", ",", "1", ")", "except", "ValueError", ":", "return", "''", "ext", "=", "\".{0}\"", ".", "format", "(", "ext", ")", "all_exts", "=", "[", "x", ".", "extension", "for", "x", "in", "chain", "(", "magic_header_array", ",", "magic_footer_array", ")", "]", "if", "base", "[", "-", "4", ":", "]", ".", "startswith", "(", "\".\"", ")", ":", "long_ext", "=", "base", "[", "-", "4", ":", "]", "+", "ext", "if", "long_ext", "in", "all_exts", ":", "return", "long_ext", "return", "ext" ]
Scan a filename for it's extension. :param filename: string of the filename :return: the extension off the end (empty string if it can't find one)
[ "Scan", "a", "filename", "for", "it", "s", "extension", "." ]
ae2c4c400930b8a19519e787f61dd779db7e415b
https://github.com/cdgriffith/puremagic/blob/ae2c4c400930b8a19519e787f61dd779db7e415b/puremagic/main.py#L135-L154
train
cdgriffith/puremagic
puremagic/main.py
from_file
def from_file(filename, mime=False): """ Opens file, attempts to identify content based off magic number and will return the file extension. If mime is True it will return the mime type instead. :param filename: path to file :param mime: Return mime, not extension :return: guessed extension or mime """ head, foot = _file_details(filename) return _magic(head, foot, mime, ext_from_filename(filename))
python
def from_file(filename, mime=False): """ Opens file, attempts to identify content based off magic number and will return the file extension. If mime is True it will return the mime type instead. :param filename: path to file :param mime: Return mime, not extension :return: guessed extension or mime """ head, foot = _file_details(filename) return _magic(head, foot, mime, ext_from_filename(filename))
[ "def", "from_file", "(", "filename", ",", "mime", "=", "False", ")", ":", "head", ",", "foot", "=", "_file_details", "(", "filename", ")", "return", "_magic", "(", "head", ",", "foot", ",", "mime", ",", "ext_from_filename", "(", "filename", ")", ")" ]
Opens file, attempts to identify content based off magic number and will return the file extension. If mime is True it will return the mime type instead. :param filename: path to file :param mime: Return mime, not extension :return: guessed extension or mime
[ "Opens", "file", "attempts", "to", "identify", "content", "based", "off", "magic", "number", "and", "will", "return", "the", "file", "extension", ".", "If", "mime", "is", "True", "it", "will", "return", "the", "mime", "type", "instead", "." ]
ae2c4c400930b8a19519e787f61dd779db7e415b
https://github.com/cdgriffith/puremagic/blob/ae2c4c400930b8a19519e787f61dd779db7e415b/puremagic/main.py#L157-L168
train
cdgriffith/puremagic
puremagic/main.py
from_string
def from_string(string, mime=False, filename=None): """ Reads in string, attempts to identify content based off magic number and will return the file extension. If mime is True it will return the mime type instead. If filename is provided it will be used in the computation. :param string: string representation to check :param mime: Return mime, not extension :param filename: original filename :return: guessed extension or mime """ head, foot = _string_details(string) ext = ext_from_filename(filename) if filename else None return _magic(head, foot, mime, ext)
python
def from_string(string, mime=False, filename=None): """ Reads in string, attempts to identify content based off magic number and will return the file extension. If mime is True it will return the mime type instead. If filename is provided it will be used in the computation. :param string: string representation to check :param mime: Return mime, not extension :param filename: original filename :return: guessed extension or mime """ head, foot = _string_details(string) ext = ext_from_filename(filename) if filename else None return _magic(head, foot, mime, ext)
[ "def", "from_string", "(", "string", ",", "mime", "=", "False", ",", "filename", "=", "None", ")", ":", "head", ",", "foot", "=", "_string_details", "(", "string", ")", "ext", "=", "ext_from_filename", "(", "filename", ")", "if", "filename", "else", "None", "return", "_magic", "(", "head", ",", "foot", ",", "mime", ",", "ext", ")" ]
Reads in string, attempts to identify content based off magic number and will return the file extension. If mime is True it will return the mime type instead. If filename is provided it will be used in the computation. :param string: string representation to check :param mime: Return mime, not extension :param filename: original filename :return: guessed extension or mime
[ "Reads", "in", "string", "attempts", "to", "identify", "content", "based", "off", "magic", "number", "and", "will", "return", "the", "file", "extension", ".", "If", "mime", "is", "True", "it", "will", "return", "the", "mime", "type", "instead", ".", "If", "filename", "is", "provided", "it", "will", "be", "used", "in", "the", "computation", "." ]
ae2c4c400930b8a19519e787f61dd779db7e415b
https://github.com/cdgriffith/puremagic/blob/ae2c4c400930b8a19519e787f61dd779db7e415b/puremagic/main.py#L171-L184
train
mpg-age-bioinformatics/AGEpy
AGEpy/gtf.py
retrieve_GTF_field
def retrieve_GTF_field(field,gtf): """ Returns a field of choice from the attribute column of the GTF :param field: field to be retrieved :returns: a Pandas dataframe with one columns containing the field of choice """ inGTF=gtf.copy() def splits(x): l=x.split(";") l=[ s.split(" ") for s in l] res=np.nan for s in l: if field in s: if '"' in s[-1]: res=s[-1][1:-1] else: res=s[-1] return res inGTF[field]=inGTF['attribute'].apply(lambda x: splits(x)) return inGTF[[field]]
python
def retrieve_GTF_field(field,gtf): """ Returns a field of choice from the attribute column of the GTF :param field: field to be retrieved :returns: a Pandas dataframe with one columns containing the field of choice """ inGTF=gtf.copy() def splits(x): l=x.split(";") l=[ s.split(" ") for s in l] res=np.nan for s in l: if field in s: if '"' in s[-1]: res=s[-1][1:-1] else: res=s[-1] return res inGTF[field]=inGTF['attribute'].apply(lambda x: splits(x)) return inGTF[[field]]
[ "def", "retrieve_GTF_field", "(", "field", ",", "gtf", ")", ":", "inGTF", "=", "gtf", ".", "copy", "(", ")", "def", "splits", "(", "x", ")", ":", "l", "=", "x", ".", "split", "(", "\";\"", ")", "l", "=", "[", "s", ".", "split", "(", "\" \"", ")", "for", "s", "in", "l", "]", "res", "=", "np", ".", "nan", "for", "s", "in", "l", ":", "if", "field", "in", "s", ":", "if", "'\"'", "in", "s", "[", "-", "1", "]", ":", "res", "=", "s", "[", "-", "1", "]", "[", "1", ":", "-", "1", "]", "else", ":", "res", "=", "s", "[", "-", "1", "]", "return", "res", "inGTF", "[", "field", "]", "=", "inGTF", "[", "'attribute'", "]", ".", "apply", "(", "lambda", "x", ":", "splits", "(", "x", ")", ")", "return", "inGTF", "[", "[", "field", "]", "]" ]
Returns a field of choice from the attribute column of the GTF :param field: field to be retrieved :returns: a Pandas dataframe with one columns containing the field of choice
[ "Returns", "a", "field", "of", "choice", "from", "the", "attribute", "column", "of", "the", "GTF" ]
887808a7a2c1504f39ce8d8cb36c15c1721cd29f
https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/gtf.py#L18-L40
train
mpg-age-bioinformatics/AGEpy
AGEpy/gtf.py
attributesGTF
def attributesGTF(inGTF): """ List the type of attributes in a the attribute section of a GTF file :param inGTF: GTF dataframe to be analysed :returns: a list of attributes present in the attribute section """ df=pd.DataFrame(inGTF['attribute'].str.split(";").tolist()) desc=[] for i in df.columns.tolist(): val=df[[i]].dropna() val=pd.DataFrame(val[i].str.split(' "').tolist())[0] val=list(set(val)) for v in val: if len(v) > 0: l=v.split(" ") if len(l)>1: l=l[1] else: l=l[0] desc.append(l) desc=list(set(desc)) finaldesc=[] for d in desc: if len(d) > 0: finaldesc.append(d) return finaldesc
python
def attributesGTF(inGTF): """ List the type of attributes in a the attribute section of a GTF file :param inGTF: GTF dataframe to be analysed :returns: a list of attributes present in the attribute section """ df=pd.DataFrame(inGTF['attribute'].str.split(";").tolist()) desc=[] for i in df.columns.tolist(): val=df[[i]].dropna() val=pd.DataFrame(val[i].str.split(' "').tolist())[0] val=list(set(val)) for v in val: if len(v) > 0: l=v.split(" ") if len(l)>1: l=l[1] else: l=l[0] desc.append(l) desc=list(set(desc)) finaldesc=[] for d in desc: if len(d) > 0: finaldesc.append(d) return finaldesc
[ "def", "attributesGTF", "(", "inGTF", ")", ":", "df", "=", "pd", ".", "DataFrame", "(", "inGTF", "[", "'attribute'", "]", ".", "str", ".", "split", "(", "\";\"", ")", ".", "tolist", "(", ")", ")", "desc", "=", "[", "]", "for", "i", "in", "df", ".", "columns", ".", "tolist", "(", ")", ":", "val", "=", "df", "[", "[", "i", "]", "]", ".", "dropna", "(", ")", "val", "=", "pd", ".", "DataFrame", "(", "val", "[", "i", "]", ".", "str", ".", "split", "(", "' \"'", ")", ".", "tolist", "(", ")", ")", "[", "0", "]", "val", "=", "list", "(", "set", "(", "val", ")", ")", "for", "v", "in", "val", ":", "if", "len", "(", "v", ")", ">", "0", ":", "l", "=", "v", ".", "split", "(", "\" \"", ")", "if", "len", "(", "l", ")", ">", "1", ":", "l", "=", "l", "[", "1", "]", "else", ":", "l", "=", "l", "[", "0", "]", "desc", ".", "append", "(", "l", ")", "desc", "=", "list", "(", "set", "(", "desc", ")", ")", "finaldesc", "=", "[", "]", "for", "d", "in", "desc", ":", "if", "len", "(", "d", ")", ">", "0", ":", "finaldesc", ".", "append", "(", "d", ")", "return", "finaldesc" ]
List the type of attributes in a the attribute section of a GTF file :param inGTF: GTF dataframe to be analysed :returns: a list of attributes present in the attribute section
[ "List", "the", "type", "of", "attributes", "in", "a", "the", "attribute", "section", "of", "a", "GTF", "file" ]
887808a7a2c1504f39ce8d8cb36c15c1721cd29f
https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/gtf.py#L42-L69
train
mpg-age-bioinformatics/AGEpy
AGEpy/gtf.py
parseGTF
def parseGTF(inGTF): """ Reads an extracts all attributes in the attributes section of a GTF and constructs a new dataframe wiht one collumn per attribute instead of the attributes column :param inGTF: GTF dataframe to be parsed :returns: a dataframe of the orignal input GTF with attributes parsed. """ desc=attributesGTF(inGTF) ref=inGTF.copy() ref.reset_index(inplace=True, drop=True) df=ref.drop(['attribute'],axis=1).copy() for d in desc: field=retrieve_GTF_field(d,ref) df=pd.concat([df,field],axis=1) return df
python
def parseGTF(inGTF): """ Reads an extracts all attributes in the attributes section of a GTF and constructs a new dataframe wiht one collumn per attribute instead of the attributes column :param inGTF: GTF dataframe to be parsed :returns: a dataframe of the orignal input GTF with attributes parsed. """ desc=attributesGTF(inGTF) ref=inGTF.copy() ref.reset_index(inplace=True, drop=True) df=ref.drop(['attribute'],axis=1).copy() for d in desc: field=retrieve_GTF_field(d,ref) df=pd.concat([df,field],axis=1) return df
[ "def", "parseGTF", "(", "inGTF", ")", ":", "desc", "=", "attributesGTF", "(", "inGTF", ")", "ref", "=", "inGTF", ".", "copy", "(", ")", "ref", ".", "reset_index", "(", "inplace", "=", "True", ",", "drop", "=", "True", ")", "df", "=", "ref", ".", "drop", "(", "[", "'attribute'", "]", ",", "axis", "=", "1", ")", ".", "copy", "(", ")", "for", "d", "in", "desc", ":", "field", "=", "retrieve_GTF_field", "(", "d", ",", "ref", ")", "df", "=", "pd", ".", "concat", "(", "[", "df", ",", "field", "]", ",", "axis", "=", "1", ")", "return", "df" ]
Reads an extracts all attributes in the attributes section of a GTF and constructs a new dataframe wiht one collumn per attribute instead of the attributes column :param inGTF: GTF dataframe to be parsed :returns: a dataframe of the orignal input GTF with attributes parsed.
[ "Reads", "an", "extracts", "all", "attributes", "in", "the", "attributes", "section", "of", "a", "GTF", "and", "constructs", "a", "new", "dataframe", "wiht", "one", "collumn", "per", "attribute", "instead", "of", "the", "attributes", "column" ]
887808a7a2c1504f39ce8d8cb36c15c1721cd29f
https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/gtf.py#L71-L87
train
mpg-age-bioinformatics/AGEpy
AGEpy/gtf.py
writeGTF
def writeGTF(inGTF,file_path): """ Write a GTF dataframe into a file :param inGTF: GTF dataframe to be written. It should either have 9 columns with the last one being the "attributes" section or more than 9 columns where all columns after the 8th will be colapsed into one. :param file_path: path/to/the/file.gtf :returns: nothing """ cols=inGTF.columns.tolist() if len(cols) == 9: if 'attribute' in cols: df=inGTF else: df=inGTF[cols[:8]] df['attribute']="" for c in cols[8:]: if c == cols[len(cols)-1]: df['attribute']=df['attribute']+c+' "'+inGTF[c].astype(str)+'";' else: df['attribute']=df['attribute']+c+' "'+inGTF[c].astype(str)+'"; ' df.to_csv(file_path, sep="\t",header=None,index=None,quoting=csv.QUOTE_NONE)
python
def writeGTF(inGTF,file_path): """ Write a GTF dataframe into a file :param inGTF: GTF dataframe to be written. It should either have 9 columns with the last one being the "attributes" section or more than 9 columns where all columns after the 8th will be colapsed into one. :param file_path: path/to/the/file.gtf :returns: nothing """ cols=inGTF.columns.tolist() if len(cols) == 9: if 'attribute' in cols: df=inGTF else: df=inGTF[cols[:8]] df['attribute']="" for c in cols[8:]: if c == cols[len(cols)-1]: df['attribute']=df['attribute']+c+' "'+inGTF[c].astype(str)+'";' else: df['attribute']=df['attribute']+c+' "'+inGTF[c].astype(str)+'"; ' df.to_csv(file_path, sep="\t",header=None,index=None,quoting=csv.QUOTE_NONE)
[ "def", "writeGTF", "(", "inGTF", ",", "file_path", ")", ":", "cols", "=", "inGTF", ".", "columns", ".", "tolist", "(", ")", "if", "len", "(", "cols", ")", "==", "9", ":", "if", "'attribute'", "in", "cols", ":", "df", "=", "inGTF", "else", ":", "df", "=", "inGTF", "[", "cols", "[", ":", "8", "]", "]", "df", "[", "'attribute'", "]", "=", "\"\"", "for", "c", "in", "cols", "[", "8", ":", "]", ":", "if", "c", "==", "cols", "[", "len", "(", "cols", ")", "-", "1", "]", ":", "df", "[", "'attribute'", "]", "=", "df", "[", "'attribute'", "]", "+", "c", "+", "' \"'", "+", "inGTF", "[", "c", "]", ".", "astype", "(", "str", ")", "+", "'\";'", "else", ":", "df", "[", "'attribute'", "]", "=", "df", "[", "'attribute'", "]", "+", "c", "+", "' \"'", "+", "inGTF", "[", "c", "]", ".", "astype", "(", "str", ")", "+", "'\"; '", "df", ".", "to_csv", "(", "file_path", ",", "sep", "=", "\"\\t\"", ",", "header", "=", "None", ",", "index", "=", "None", ",", "quoting", "=", "csv", ".", "QUOTE_NONE", ")" ]
Write a GTF dataframe into a file :param inGTF: GTF dataframe to be written. It should either have 9 columns with the last one being the "attributes" section or more than 9 columns where all columns after the 8th will be colapsed into one. :param file_path: path/to/the/file.gtf :returns: nothing
[ "Write", "a", "GTF", "dataframe", "into", "a", "file" ]
887808a7a2c1504f39ce8d8cb36c15c1721cd29f
https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/gtf.py#L89-L109
train
mpg-age-bioinformatics/AGEpy
AGEpy/gtf.py
GTFtoBED
def GTFtoBED(inGTF,name): """ Transform a GTF dataframe into a bed dataframe :param inGTF: GTF dataframe for transformation :param name: field of the GTF data frame to be use for the bed 'name' positon returns: a bed dataframe with the corresponding bed fiels: 'chrom','chromStart','chromEnd','name','score','strand' """ bed=inGTF.copy() bed.reset_index(inplace=True, drop=True) if name not in bed.columns.tolist(): field=retrieve_GTF_field(name, bed) bed=pd.concat([bed,field],axis=1) bed=bed[['seqname','start','end',name,'score','strand']] bed.columns=['chrom','chromStart','chromEnd','name','score','strand'] bed.drop_duplicates(inplace=True) bed.reset_index(inplace=True,drop=True) return bed
python
def GTFtoBED(inGTF,name): """ Transform a GTF dataframe into a bed dataframe :param inGTF: GTF dataframe for transformation :param name: field of the GTF data frame to be use for the bed 'name' positon returns: a bed dataframe with the corresponding bed fiels: 'chrom','chromStart','chromEnd','name','score','strand' """ bed=inGTF.copy() bed.reset_index(inplace=True, drop=True) if name not in bed.columns.tolist(): field=retrieve_GTF_field(name, bed) bed=pd.concat([bed,field],axis=1) bed=bed[['seqname','start','end',name,'score','strand']] bed.columns=['chrom','chromStart','chromEnd','name','score','strand'] bed.drop_duplicates(inplace=True) bed.reset_index(inplace=True,drop=True) return bed
[ "def", "GTFtoBED", "(", "inGTF", ",", "name", ")", ":", "bed", "=", "inGTF", ".", "copy", "(", ")", "bed", ".", "reset_index", "(", "inplace", "=", "True", ",", "drop", "=", "True", ")", "if", "name", "not", "in", "bed", ".", "columns", ".", "tolist", "(", ")", ":", "field", "=", "retrieve_GTF_field", "(", "name", ",", "bed", ")", "bed", "=", "pd", ".", "concat", "(", "[", "bed", ",", "field", "]", ",", "axis", "=", "1", ")", "bed", "=", "bed", "[", "[", "'seqname'", ",", "'start'", ",", "'end'", ",", "name", ",", "'score'", ",", "'strand'", "]", "]", "bed", ".", "columns", "=", "[", "'chrom'", ",", "'chromStart'", ",", "'chromEnd'", ",", "'name'", ",", "'score'", ",", "'strand'", "]", "bed", ".", "drop_duplicates", "(", "inplace", "=", "True", ")", "bed", ".", "reset_index", "(", "inplace", "=", "True", ",", "drop", "=", "True", ")", "return", "bed" ]
Transform a GTF dataframe into a bed dataframe :param inGTF: GTF dataframe for transformation :param name: field of the GTF data frame to be use for the bed 'name' positon returns: a bed dataframe with the corresponding bed fiels: 'chrom','chromStart','chromEnd','name','score','strand'
[ "Transform", "a", "GTF", "dataframe", "into", "a", "bed", "dataframe" ]
887808a7a2c1504f39ce8d8cb36c15c1721cd29f
https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/gtf.py#L111-L130
train
mpg-age-bioinformatics/AGEpy
AGEpy/gtf.py
MAPGenoToTrans
def MAPGenoToTrans(parsedGTF,feature): """ Gets all positions of all bases in an exon :param df: a Pandas dataframe with 'start','end', and 'strand' information for each entry. df must contain 'seqname','feature','start','end','strand','frame','gene_id', 'transcript_id','exon_id','exon_number'] :param feature: feature upon wich to generate the map, eg. 'exon' or 'transcript' :returns: a string with the comma separated positions of all bases in the exon """ GenTransMap=parsedGTF[parsedGTF["feature"]==feature] def getExonsPositions(df): start=int(df["start"]) stop=int(df["end"]) strand=df["strand"] r=range(start,stop+1) if strand=="-": r.sort(reverse=True) r=[ str(s) for s in r] return ",".join(r) GenTransMap["feature_bases"]=GenTransMap.apply(getExonsPositions, axis=1) GenTransMap=GenTransMap.sort_values(by=["transcript_id","exon_number"],ascending=True) def CombineExons(df): return pd.Series(dict( feature_bases = ','.join(df['feature_bases']) ) ) GenTransMap=GenTransMap.groupby("transcript_id").apply(CombineExons) GenTransMap=GenTransMap.to_dict().get("feature_bases") return GenTransMap
python
def MAPGenoToTrans(parsedGTF,feature): """ Gets all positions of all bases in an exon :param df: a Pandas dataframe with 'start','end', and 'strand' information for each entry. df must contain 'seqname','feature','start','end','strand','frame','gene_id', 'transcript_id','exon_id','exon_number'] :param feature: feature upon wich to generate the map, eg. 'exon' or 'transcript' :returns: a string with the comma separated positions of all bases in the exon """ GenTransMap=parsedGTF[parsedGTF["feature"]==feature] def getExonsPositions(df): start=int(df["start"]) stop=int(df["end"]) strand=df["strand"] r=range(start,stop+1) if strand=="-": r.sort(reverse=True) r=[ str(s) for s in r] return ",".join(r) GenTransMap["feature_bases"]=GenTransMap.apply(getExonsPositions, axis=1) GenTransMap=GenTransMap.sort_values(by=["transcript_id","exon_number"],ascending=True) def CombineExons(df): return pd.Series(dict( feature_bases = ','.join(df['feature_bases']) ) ) GenTransMap=GenTransMap.groupby("transcript_id").apply(CombineExons) GenTransMap=GenTransMap.to_dict().get("feature_bases") return GenTransMap
[ "def", "MAPGenoToTrans", "(", "parsedGTF", ",", "feature", ")", ":", "GenTransMap", "=", "parsedGTF", "[", "parsedGTF", "[", "\"feature\"", "]", "==", "feature", "]", "def", "getExonsPositions", "(", "df", ")", ":", "start", "=", "int", "(", "df", "[", "\"start\"", "]", ")", "stop", "=", "int", "(", "df", "[", "\"end\"", "]", ")", "strand", "=", "df", "[", "\"strand\"", "]", "r", "=", "range", "(", "start", ",", "stop", "+", "1", ")", "if", "strand", "==", "\"-\"", ":", "r", ".", "sort", "(", "reverse", "=", "True", ")", "r", "=", "[", "str", "(", "s", ")", "for", "s", "in", "r", "]", "return", "\",\"", ".", "join", "(", "r", ")", "GenTransMap", "[", "\"feature_bases\"", "]", "=", "GenTransMap", ".", "apply", "(", "getExonsPositions", ",", "axis", "=", "1", ")", "GenTransMap", "=", "GenTransMap", ".", "sort_values", "(", "by", "=", "[", "\"transcript_id\"", ",", "\"exon_number\"", "]", ",", "ascending", "=", "True", ")", "def", "CombineExons", "(", "df", ")", ":", "return", "pd", ".", "Series", "(", "dict", "(", "feature_bases", "=", "','", ".", "join", "(", "df", "[", "'feature_bases'", "]", ")", ")", ")", "GenTransMap", "=", "GenTransMap", ".", "groupby", "(", "\"transcript_id\"", ")", ".", "apply", "(", "CombineExons", ")", "GenTransMap", "=", "GenTransMap", ".", "to_dict", "(", ")", ".", "get", "(", "\"feature_bases\"", ")", "return", "GenTransMap" ]
Gets all positions of all bases in an exon :param df: a Pandas dataframe with 'start','end', and 'strand' information for each entry. df must contain 'seqname','feature','start','end','strand','frame','gene_id', 'transcript_id','exon_id','exon_number'] :param feature: feature upon wich to generate the map, eg. 'exon' or 'transcript' :returns: a string with the comma separated positions of all bases in the exon
[ "Gets", "all", "positions", "of", "all", "bases", "in", "an", "exon" ]
887808a7a2c1504f39ce8d8cb36c15c1721cd29f
https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/gtf.py#L132-L161
train
mpg-age-bioinformatics/AGEpy
AGEpy/gtf.py
GetTransPosition
def GetTransPosition(df,field,dic,refCol="transcript_id"): """ Maps a genome position to transcript positon" :param df: a Pandas dataframe :param field: the head of the column containing the genomic position :param dic: a dictionary containing for each transcript the respective bases eg. {ENST23923910:'234,235,236,1021,..'} :param refCol: header of the reference column with IDs, eg. 'transcript_id' :returns: position on transcript """ try: gen=str(int(df[field])) transid=df[refCol] bases=dic.get(transid).split(",") bases=bases.index(str(gen))+1 except: bases=np.nan return bases
python
def GetTransPosition(df,field,dic,refCol="transcript_id"): """ Maps a genome position to transcript positon" :param df: a Pandas dataframe :param field: the head of the column containing the genomic position :param dic: a dictionary containing for each transcript the respective bases eg. {ENST23923910:'234,235,236,1021,..'} :param refCol: header of the reference column with IDs, eg. 'transcript_id' :returns: position on transcript """ try: gen=str(int(df[field])) transid=df[refCol] bases=dic.get(transid).split(",") bases=bases.index(str(gen))+1 except: bases=np.nan return bases
[ "def", "GetTransPosition", "(", "df", ",", "field", ",", "dic", ",", "refCol", "=", "\"transcript_id\"", ")", ":", "try", ":", "gen", "=", "str", "(", "int", "(", "df", "[", "field", "]", ")", ")", "transid", "=", "df", "[", "refCol", "]", "bases", "=", "dic", ".", "get", "(", "transid", ")", ".", "split", "(", "\",\"", ")", "bases", "=", "bases", ".", "index", "(", "str", "(", "gen", ")", ")", "+", "1", "except", ":", "bases", "=", "np", ".", "nan", "return", "bases" ]
Maps a genome position to transcript positon" :param df: a Pandas dataframe :param field: the head of the column containing the genomic position :param dic: a dictionary containing for each transcript the respective bases eg. {ENST23923910:'234,235,236,1021,..'} :param refCol: header of the reference column with IDs, eg. 'transcript_id' :returns: position on transcript
[ "Maps", "a", "genome", "position", "to", "transcript", "positon" ]
887808a7a2c1504f39ce8d8cb36c15c1721cd29f
https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/gtf.py#L163-L181
train
acutesoftware/AIKIF
aikif/toolbox/network_tools.py
get_protected_page
def get_protected_page(url, user, pwd, filename): """ having problems with urllib on a specific site so trying requests """ import requests r = requests.get(url, auth=(user, pwd)) print(r.status_code) if r.status_code == 200: print('success') with open(filename, 'wb') as fd: for chunk in r.iter_content(4096): fd.write(chunk) lg.record_result("Success - downloaded " + url) else: lg.record_result('network_tools.get_protected_page:Failed to downloaded ' + url + ' (status code = ' + str(r.status_code) + ')')
python
def get_protected_page(url, user, pwd, filename): """ having problems with urllib on a specific site so trying requests """ import requests r = requests.get(url, auth=(user, pwd)) print(r.status_code) if r.status_code == 200: print('success') with open(filename, 'wb') as fd: for chunk in r.iter_content(4096): fd.write(chunk) lg.record_result("Success - downloaded " + url) else: lg.record_result('network_tools.get_protected_page:Failed to downloaded ' + url + ' (status code = ' + str(r.status_code) + ')')
[ "def", "get_protected_page", "(", "url", ",", "user", ",", "pwd", ",", "filename", ")", ":", "import", "requests", "r", "=", "requests", ".", "get", "(", "url", ",", "auth", "=", "(", "user", ",", "pwd", ")", ")", "print", "(", "r", ".", "status_code", ")", "if", "r", ".", "status_code", "==", "200", ":", "print", "(", "'success'", ")", "with", "open", "(", "filename", ",", "'wb'", ")", "as", "fd", ":", "for", "chunk", "in", "r", ".", "iter_content", "(", "4096", ")", ":", "fd", ".", "write", "(", "chunk", ")", "lg", ".", "record_result", "(", "\"Success - downloaded \"", "+", "url", ")", "else", ":", "lg", ".", "record_result", "(", "'network_tools.get_protected_page:Failed to downloaded '", "+", "url", "+", "' (status code = '", "+", "str", "(", "r", ".", "status_code", ")", "+", "')'", ")" ]
having problems with urllib on a specific site so trying requests
[ "having", "problems", "with", "urllib", "on", "a", "specific", "site", "so", "trying", "requests" ]
fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/network_tools.py#L67-L81
train
cox-labs/perseuspy
perseuspy/io/maxquant.py
read_rawFilesTable
def read_rawFilesTable(filename): """parse the 'rawFilesTable.txt' file into a pandas dataframe""" exp = pd.read_table(filename) expected_columns = {'File', 'Exists', 'Size', 'Data format', 'Parameter group', 'Experiment', 'Fraction'} found_columns = set(exp.columns) if len(expected_columns - found_columns) > 0: message = '\n'.join(['The raw files table has the wrong format!', 'It should contain columns:', ', '.join(sorted(expected_columns)), 'Found columns:', ', '.join(sorted(found_columns))]) raise ValueError(message) exp['Raw file'] = exp['File'].apply(path.basename).apply(path.splitext).str.get(0) exp['Experiment'] = exp['Experiment'].astype(str) return exp
python
def read_rawFilesTable(filename): """parse the 'rawFilesTable.txt' file into a pandas dataframe""" exp = pd.read_table(filename) expected_columns = {'File', 'Exists', 'Size', 'Data format', 'Parameter group', 'Experiment', 'Fraction'} found_columns = set(exp.columns) if len(expected_columns - found_columns) > 0: message = '\n'.join(['The raw files table has the wrong format!', 'It should contain columns:', ', '.join(sorted(expected_columns)), 'Found columns:', ', '.join(sorted(found_columns))]) raise ValueError(message) exp['Raw file'] = exp['File'].apply(path.basename).apply(path.splitext).str.get(0) exp['Experiment'] = exp['Experiment'].astype(str) return exp
[ "def", "read_rawFilesTable", "(", "filename", ")", ":", "exp", "=", "pd", ".", "read_table", "(", "filename", ")", "expected_columns", "=", "{", "'File'", ",", "'Exists'", ",", "'Size'", ",", "'Data format'", ",", "'Parameter group'", ",", "'Experiment'", ",", "'Fraction'", "}", "found_columns", "=", "set", "(", "exp", ".", "columns", ")", "if", "len", "(", "expected_columns", "-", "found_columns", ")", ">", "0", ":", "message", "=", "'\\n'", ".", "join", "(", "[", "'The raw files table has the wrong format!'", ",", "'It should contain columns:'", ",", "', '", ".", "join", "(", "sorted", "(", "expected_columns", ")", ")", ",", "'Found columns:'", ",", "', '", ".", "join", "(", "sorted", "(", "found_columns", ")", ")", "]", ")", "raise", "ValueError", "(", "message", ")", "exp", "[", "'Raw file'", "]", "=", "exp", "[", "'File'", "]", ".", "apply", "(", "path", ".", "basename", ")", ".", "apply", "(", "path", ".", "splitext", ")", ".", "str", ".", "get", "(", "0", ")", "exp", "[", "'Experiment'", "]", "=", "exp", "[", "'Experiment'", "]", ".", "astype", "(", "str", ")", "return", "exp" ]
parse the 'rawFilesTable.txt' file into a pandas dataframe
[ "parse", "the", "rawFilesTable", ".", "txt", "file", "into", "a", "pandas", "dataframe" ]
3809c1bd46512605f9e7ca7f97e026e4940ed604
https://github.com/cox-labs/perseuspy/blob/3809c1bd46512605f9e7ca7f97e026e4940ed604/perseuspy/io/maxquant.py#L8-L22
train
nocarryr/python-dispatch
pydispatch/utils.py
WeakMethodContainer.add_method
def add_method(self, m, **kwargs): """Add an instance method or function Args: m: The instance method or function to store """ if isinstance(m, types.FunctionType): self['function', id(m)] = m else: f, obj = get_method_vars(m) wrkey = (f, id(obj)) self[wrkey] = obj
python
def add_method(self, m, **kwargs): """Add an instance method or function Args: m: The instance method or function to store """ if isinstance(m, types.FunctionType): self['function', id(m)] = m else: f, obj = get_method_vars(m) wrkey = (f, id(obj)) self[wrkey] = obj
[ "def", "add_method", "(", "self", ",", "m", ",", "**", "kwargs", ")", ":", "if", "isinstance", "(", "m", ",", "types", ".", "FunctionType", ")", ":", "self", "[", "'function'", ",", "id", "(", "m", ")", "]", "=", "m", "else", ":", "f", ",", "obj", "=", "get_method_vars", "(", "m", ")", "wrkey", "=", "(", "f", ",", "id", "(", "obj", ")", ")", "self", "[", "wrkey", "]", "=", "obj" ]
Add an instance method or function Args: m: The instance method or function to store
[ "Add", "an", "instance", "method", "or", "function" ]
7c5ca03835c922cbfdfd62772c9e560062c954c7
https://github.com/nocarryr/python-dispatch/blob/7c5ca03835c922cbfdfd62772c9e560062c954c7/pydispatch/utils.py#L49-L60
train
nocarryr/python-dispatch
pydispatch/utils.py
WeakMethodContainer.del_method
def del_method(self, m): """Remove an instance method or function if it exists Args: m: The instance method or function to remove """ if isinstance(m, types.FunctionType) and not iscoroutinefunction(m): wrkey = ('function', id(m)) else: f, obj = get_method_vars(m) wrkey = (f, id(obj)) if wrkey in self: del self[wrkey]
python
def del_method(self, m): """Remove an instance method or function if it exists Args: m: The instance method or function to remove """ if isinstance(m, types.FunctionType) and not iscoroutinefunction(m): wrkey = ('function', id(m)) else: f, obj = get_method_vars(m) wrkey = (f, id(obj)) if wrkey in self: del self[wrkey]
[ "def", "del_method", "(", "self", ",", "m", ")", ":", "if", "isinstance", "(", "m", ",", "types", ".", "FunctionType", ")", "and", "not", "iscoroutinefunction", "(", "m", ")", ":", "wrkey", "=", "(", "'function'", ",", "id", "(", "m", ")", ")", "else", ":", "f", ",", "obj", "=", "get_method_vars", "(", "m", ")", "wrkey", "=", "(", "f", ",", "id", "(", "obj", ")", ")", "if", "wrkey", "in", "self", ":", "del", "self", "[", "wrkey", "]" ]
Remove an instance method or function if it exists Args: m: The instance method or function to remove
[ "Remove", "an", "instance", "method", "or", "function", "if", "it", "exists" ]
7c5ca03835c922cbfdfd62772c9e560062c954c7
https://github.com/nocarryr/python-dispatch/blob/7c5ca03835c922cbfdfd62772c9e560062c954c7/pydispatch/utils.py#L61-L73
train