repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
acutesoftware/AIKIF | aikif/web_app/page_search.py | search_aikif | def search_aikif(txt, formatHTML=True):
"""
search for text - currently this looks in all folders in the
root of AIKIF but that also contains binaries so will need to
use the agent_filelist.py to specify the list of folders.
NOTE - this needs to use indexes rather than full search each time
"""
results = []
num_found = 0
import aikif.lib.cls_filelist as mod_fl
my_files = mod_fl.FileList([aikif_folder ], ['*.*'], ['*.pyc'])
files = my_files.get_list()
for f in files:
try:
num_found = 0
with open(f, 'r') as cur:
line_num = 0
for line in cur:
line_num += 1
if txt in line:
num_found += 1
if formatHTML is True:
results.append(format_result(line, line_num, txt))
else:
results.append([f, line, line_num, txt])
if num_found > 0:
if formatHTML is True:
results.append('<h3>' + f + ' = ' + str(num_found) + ' results</h3>')
else:
print(f + ' = ' + str(num_found) + '')
except Exception:
results.append('problem with file ' + f)
if len(results) == 0:
results.append("No results")
return results | python | def search_aikif(txt, formatHTML=True):
"""
search for text - currently this looks in all folders in the
root of AIKIF but that also contains binaries so will need to
use the agent_filelist.py to specify the list of folders.
NOTE - this needs to use indexes rather than full search each time
"""
results = []
num_found = 0
import aikif.lib.cls_filelist as mod_fl
my_files = mod_fl.FileList([aikif_folder ], ['*.*'], ['*.pyc'])
files = my_files.get_list()
for f in files:
try:
num_found = 0
with open(f, 'r') as cur:
line_num = 0
for line in cur:
line_num += 1
if txt in line:
num_found += 1
if formatHTML is True:
results.append(format_result(line, line_num, txt))
else:
results.append([f, line, line_num, txt])
if num_found > 0:
if formatHTML is True:
results.append('<h3>' + f + ' = ' + str(num_found) + ' results</h3>')
else:
print(f + ' = ' + str(num_found) + '')
except Exception:
results.append('problem with file ' + f)
if len(results) == 0:
results.append("No results")
return results | [
"def",
"search_aikif",
"(",
"txt",
",",
"formatHTML",
"=",
"True",
")",
":",
"results",
"=",
"[",
"]",
"num_found",
"=",
"0",
"import",
"aikif",
".",
"lib",
".",
"cls_filelist",
"as",
"mod_fl",
"my_files",
"=",
"mod_fl",
".",
"FileList",
"(",
"[",
"aikif_folder",
"]",
",",
"[",
"'*.*'",
"]",
",",
"[",
"'*.pyc'",
"]",
")",
"files",
"=",
"my_files",
".",
"get_list",
"(",
")",
"for",
"f",
"in",
"files",
":",
"try",
":",
"num_found",
"=",
"0",
"with",
"open",
"(",
"f",
",",
"'r'",
")",
"as",
"cur",
":",
"line_num",
"=",
"0",
"for",
"line",
"in",
"cur",
":",
"line_num",
"+=",
"1",
"if",
"txt",
"in",
"line",
":",
"num_found",
"+=",
"1",
"if",
"formatHTML",
"is",
"True",
":",
"results",
".",
"append",
"(",
"format_result",
"(",
"line",
",",
"line_num",
",",
"txt",
")",
")",
"else",
":",
"results",
".",
"append",
"(",
"[",
"f",
",",
"line",
",",
"line_num",
",",
"txt",
"]",
")",
"if",
"num_found",
">",
"0",
":",
"if",
"formatHTML",
"is",
"True",
":",
"results",
".",
"append",
"(",
"'<h3>'",
"+",
"f",
"+",
"' = '",
"+",
"str",
"(",
"num_found",
")",
"+",
"' results</h3>'",
")",
"else",
":",
"print",
"(",
"f",
"+",
"' = '",
"+",
"str",
"(",
"num_found",
")",
"+",
"''",
")",
"except",
"Exception",
":",
"results",
".",
"append",
"(",
"'problem with file '",
"+",
"f",
")",
"if",
"len",
"(",
"results",
")",
"==",
"0",
":",
"results",
".",
"append",
"(",
"\"No results\"",
")",
"return",
"results"
] | search for text - currently this looks in all folders in the
root of AIKIF but that also contains binaries so will need to
use the agent_filelist.py to specify the list of folders.
NOTE - this needs to use indexes rather than full search each time | [
"search",
"for",
"text",
"-",
"currently",
"this",
"looks",
"in",
"all",
"folders",
"in",
"the",
"root",
"of",
"AIKIF",
"but",
"that",
"also",
"contains",
"binaries",
"so",
"will",
"need",
"to",
"use",
"the",
"agent_filelist",
".",
"py",
"to",
"specify",
"the",
"list",
"of",
"folders",
".",
"NOTE",
"-",
"this",
"needs",
"to",
"use",
"indexes",
"rather",
"than",
"full",
"search",
"each",
"time"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/web_app/page_search.py#L23-L58 | train |
acutesoftware/AIKIF | aikif/web_app/page_search.py | format_result | def format_result(line, line_num, txt):
""" highlight the search result """
return ' ' + str(line_num) + ': ' + line.replace(txt, '<span style="background-color: #FFFF00">' + txt + '</span>') | python | def format_result(line, line_num, txt):
""" highlight the search result """
return ' ' + str(line_num) + ': ' + line.replace(txt, '<span style="background-color: #FFFF00">' + txt + '</span>') | [
"def",
"format_result",
"(",
"line",
",",
"line_num",
",",
"txt",
")",
":",
"return",
"' '",
"+",
"str",
"(",
"line_num",
")",
"+",
"': '",
"+",
"line",
".",
"replace",
"(",
"txt",
",",
"'<span style=\"background-color: #FFFF00\">'",
"+",
"txt",
"+",
"'</span>'",
")"
] | highlight the search result | [
"highlight",
"the",
"search",
"result"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/web_app/page_search.py#L60-L63 | train |
acutesoftware/AIKIF | aikif/environments/happiness.py | TEST | def TEST():
"""
Modules for testing happiness of 'persons' in 'worlds'
based on simplistic preferences. Just a toy - dont take seriously
----- WORLD SUMMARY for : Mars -----
population = 0
tax_rate = 0.0
tradition = 0.9
equity = 0.0
Preferences for Rover
tax_min = 0.0
equity = 0.0
tax_max = 0.9
tradition = 0.9
Rover is Indifferent in Mars (0)
DETAILS
tax: Economic = 0.1 -> 0.3
tradition: Personal = 0.3 -> 0.9
equity: Personal = 0.1 -> 0.9
growth: Economic = 0.01 -> 0.09
"""
w = World('Mars', [0, 0.0, 0.9, 0.0])
print(w)
p = Person('Rover', {'tax_min':0.0, 'tax_max':0.9,'tradition':0.9, 'equity':0.0})
print(p)
h = Happiness(p,w)
#h.add_factor(HappinessFactors(name, type, min, max))
h.add_factor(HappinessFactors('tax', 'Economic', 0.1, 0.3))
h.add_factor(HappinessFactors('tradition', 'Personal', 0.3, 0.9))
h.add_factor(HappinessFactors('equity', 'Personal', 0.1, 0.9))
h.add_factor(HappinessFactors('growth', 'Economic', 0.01, 0.09))
print(h.show_details()) | python | def TEST():
"""
Modules for testing happiness of 'persons' in 'worlds'
based on simplistic preferences. Just a toy - dont take seriously
----- WORLD SUMMARY for : Mars -----
population = 0
tax_rate = 0.0
tradition = 0.9
equity = 0.0
Preferences for Rover
tax_min = 0.0
equity = 0.0
tax_max = 0.9
tradition = 0.9
Rover is Indifferent in Mars (0)
DETAILS
tax: Economic = 0.1 -> 0.3
tradition: Personal = 0.3 -> 0.9
equity: Personal = 0.1 -> 0.9
growth: Economic = 0.01 -> 0.09
"""
w = World('Mars', [0, 0.0, 0.9, 0.0])
print(w)
p = Person('Rover', {'tax_min':0.0, 'tax_max':0.9,'tradition':0.9, 'equity':0.0})
print(p)
h = Happiness(p,w)
#h.add_factor(HappinessFactors(name, type, min, max))
h.add_factor(HappinessFactors('tax', 'Economic', 0.1, 0.3))
h.add_factor(HappinessFactors('tradition', 'Personal', 0.3, 0.9))
h.add_factor(HappinessFactors('equity', 'Personal', 0.1, 0.9))
h.add_factor(HappinessFactors('growth', 'Economic', 0.01, 0.09))
print(h.show_details()) | [
"def",
"TEST",
"(",
")",
":",
"w",
"=",
"World",
"(",
"'Mars'",
",",
"[",
"0",
",",
"0.0",
",",
"0.9",
",",
"0.0",
"]",
")",
"print",
"(",
"w",
")",
"p",
"=",
"Person",
"(",
"'Rover'",
",",
"{",
"'tax_min'",
":",
"0.0",
",",
"'tax_max'",
":",
"0.9",
",",
"'tradition'",
":",
"0.9",
",",
"'equity'",
":",
"0.0",
"}",
")",
"print",
"(",
"p",
")",
"h",
"=",
"Happiness",
"(",
"p",
",",
"w",
")",
"h",
".",
"add_factor",
"(",
"HappinessFactors",
"(",
"'tax'",
",",
"'Economic'",
",",
"0.1",
",",
"0.3",
")",
")",
"h",
".",
"add_factor",
"(",
"HappinessFactors",
"(",
"'tradition'",
",",
"'Personal'",
",",
"0.3",
",",
"0.9",
")",
")",
"h",
".",
"add_factor",
"(",
"HappinessFactors",
"(",
"'equity'",
",",
"'Personal'",
",",
"0.1",
",",
"0.9",
")",
")",
"h",
".",
"add_factor",
"(",
"HappinessFactors",
"(",
"'growth'",
",",
"'Economic'",
",",
"0.01",
",",
"0.09",
")",
")",
"print",
"(",
"h",
".",
"show_details",
"(",
")",
")"
] | Modules for testing happiness of 'persons' in 'worlds'
based on simplistic preferences. Just a toy - dont take seriously
----- WORLD SUMMARY for : Mars -----
population = 0
tax_rate = 0.0
tradition = 0.9
equity = 0.0
Preferences for Rover
tax_min = 0.0
equity = 0.0
tax_max = 0.9
tradition = 0.9
Rover is Indifferent in Mars (0)
DETAILS
tax: Economic = 0.1 -> 0.3
tradition: Personal = 0.3 -> 0.9
equity: Personal = 0.1 -> 0.9
growth: Economic = 0.01 -> 0.09 | [
"Modules",
"for",
"testing",
"happiness",
"of",
"persons",
"in",
"worlds",
"based",
"on",
"simplistic",
"preferences",
".",
"Just",
"a",
"toy",
"-",
"dont",
"take",
"seriously"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/environments/happiness.py#L3-L39 | train |
acutesoftware/AIKIF | aikif/environments/happiness.py | WorldFinder.solve | def solve(self, max_worlds=10000, silent=False):
"""
find the best world to make people happy
"""
self.num_worlds = 0
num_unhappy = 0
for tax_rate in range(self.tax_range[0],self.tax_range[1]):
for equity in range(self.equity_range[0],self.equity_range[1]):
for tradition in range(self.tradition_range[0],self.tradition_range[1]):
self.num_worlds += 1
if self.num_worlds > max_worlds:
break
w = World(str(self.num_worlds).zfill(6), [5000, tax_rate/10, tradition/10, equity/10])
world_happiness = 0
num_unhappy = 0
for person in self.all_people:
wh = Happiness(person, w)
world_happiness += wh.rating
if wh.rating < 0:
num_unhappy += 1
if world_happiness > self.net_happiness:
self.net_happiness = world_happiness
self.unhappy_people = num_unhappy
if not silent:
print('found better world - ' + w.nme + ' = ' + str(world_happiness) + ' - total unhappy_people = ' + str(self.unhappy_people)) | python | def solve(self, max_worlds=10000, silent=False):
"""
find the best world to make people happy
"""
self.num_worlds = 0
num_unhappy = 0
for tax_rate in range(self.tax_range[0],self.tax_range[1]):
for equity in range(self.equity_range[0],self.equity_range[1]):
for tradition in range(self.tradition_range[0],self.tradition_range[1]):
self.num_worlds += 1
if self.num_worlds > max_worlds:
break
w = World(str(self.num_worlds).zfill(6), [5000, tax_rate/10, tradition/10, equity/10])
world_happiness = 0
num_unhappy = 0
for person in self.all_people:
wh = Happiness(person, w)
world_happiness += wh.rating
if wh.rating < 0:
num_unhappy += 1
if world_happiness > self.net_happiness:
self.net_happiness = world_happiness
self.unhappy_people = num_unhappy
if not silent:
print('found better world - ' + w.nme + ' = ' + str(world_happiness) + ' - total unhappy_people = ' + str(self.unhappy_people)) | [
"def",
"solve",
"(",
"self",
",",
"max_worlds",
"=",
"10000",
",",
"silent",
"=",
"False",
")",
":",
"self",
".",
"num_worlds",
"=",
"0",
"num_unhappy",
"=",
"0",
"for",
"tax_rate",
"in",
"range",
"(",
"self",
".",
"tax_range",
"[",
"0",
"]",
",",
"self",
".",
"tax_range",
"[",
"1",
"]",
")",
":",
"for",
"equity",
"in",
"range",
"(",
"self",
".",
"equity_range",
"[",
"0",
"]",
",",
"self",
".",
"equity_range",
"[",
"1",
"]",
")",
":",
"for",
"tradition",
"in",
"range",
"(",
"self",
".",
"tradition_range",
"[",
"0",
"]",
",",
"self",
".",
"tradition_range",
"[",
"1",
"]",
")",
":",
"self",
".",
"num_worlds",
"+=",
"1",
"if",
"self",
".",
"num_worlds",
">",
"max_worlds",
":",
"break",
"w",
"=",
"World",
"(",
"str",
"(",
"self",
".",
"num_worlds",
")",
".",
"zfill",
"(",
"6",
")",
",",
"[",
"5000",
",",
"tax_rate",
"/",
"10",
",",
"tradition",
"/",
"10",
",",
"equity",
"/",
"10",
"]",
")",
"world_happiness",
"=",
"0",
"num_unhappy",
"=",
"0",
"for",
"person",
"in",
"self",
".",
"all_people",
":",
"wh",
"=",
"Happiness",
"(",
"person",
",",
"w",
")",
"world_happiness",
"+=",
"wh",
".",
"rating",
"if",
"wh",
".",
"rating",
"<",
"0",
":",
"num_unhappy",
"+=",
"1",
"if",
"world_happiness",
">",
"self",
".",
"net_happiness",
":",
"self",
".",
"net_happiness",
"=",
"world_happiness",
"self",
".",
"unhappy_people",
"=",
"num_unhappy",
"if",
"not",
"silent",
":",
"print",
"(",
"'found better world - '",
"+",
"w",
".",
"nme",
"+",
"' = '",
"+",
"str",
"(",
"world_happiness",
")",
"+",
"' - total unhappy_people = '",
"+",
"str",
"(",
"self",
".",
"unhappy_people",
")",
")"
] | find the best world to make people happy | [
"find",
"the",
"best",
"world",
"to",
"make",
"people",
"happy"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/environments/happiness.py#L129-L153 | train |
acutesoftware/AIKIF | aikif/environments/happiness.py | Happiness.show_details | def show_details(self):
"""
extended print details of happiness parameters
"""
res = str(self)
res += '\nDETAILS\n'
for f in self.factors:
res += str(f)
return res | python | def show_details(self):
"""
extended print details of happiness parameters
"""
res = str(self)
res += '\nDETAILS\n'
for f in self.factors:
res += str(f)
return res | [
"def",
"show_details",
"(",
"self",
")",
":",
"res",
"=",
"str",
"(",
"self",
")",
"res",
"+=",
"'\\nDETAILS\\n'",
"for",
"f",
"in",
"self",
".",
"factors",
":",
"res",
"+=",
"str",
"(",
"f",
")",
"return",
"res"
] | extended print details of happiness parameters | [
"extended",
"print",
"details",
"of",
"happiness",
"parameters"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/environments/happiness.py#L222-L231 | train |
acutesoftware/AIKIF | aikif/environments/happiness.py | Value.match_value_to_text | def match_value_to_text(self, text):
"""
this is going to be the tricky bit - probably not possible
to get the 'exact' rating for a value. Will need to do sentiment
analysis of the text to see how it matches the rating. Even that
sounds like it wont work - maybe a ML algorithm would do it, but
that requires a large body of text already matched to values - and
values aren't even defined as far as I have found.
UPDATE - this could work if we assume values can be single words,
eg tax=0.3, freedom=0.7, healthcare=0.3, welfare=0.3 etc
"""
if self.nme in text:
res = 0.8
else:
res = 0.2
return self.nme + ' = ' + str(res) + ' match against ' + text | python | def match_value_to_text(self, text):
"""
this is going to be the tricky bit - probably not possible
to get the 'exact' rating for a value. Will need to do sentiment
analysis of the text to see how it matches the rating. Even that
sounds like it wont work - maybe a ML algorithm would do it, but
that requires a large body of text already matched to values - and
values aren't even defined as far as I have found.
UPDATE - this could work if we assume values can be single words,
eg tax=0.3, freedom=0.7, healthcare=0.3, welfare=0.3 etc
"""
if self.nme in text:
res = 0.8
else:
res = 0.2
return self.nme + ' = ' + str(res) + ' match against ' + text | [
"def",
"match_value_to_text",
"(",
"self",
",",
"text",
")",
":",
"if",
"self",
".",
"nme",
"in",
"text",
":",
"res",
"=",
"0.8",
"else",
":",
"res",
"=",
"0.2",
"return",
"self",
".",
"nme",
"+",
"' = '",
"+",
"str",
"(",
"res",
")",
"+",
"' match against '",
"+",
"text"
] | this is going to be the tricky bit - probably not possible
to get the 'exact' rating for a value. Will need to do sentiment
analysis of the text to see how it matches the rating. Even that
sounds like it wont work - maybe a ML algorithm would do it, but
that requires a large body of text already matched to values - and
values aren't even defined as far as I have found.
UPDATE - this could work if we assume values can be single words,
eg tax=0.3, freedom=0.7, healthcare=0.3, welfare=0.3 etc | [
"this",
"is",
"going",
"to",
"be",
"the",
"tricky",
"bit",
"-",
"probably",
"not",
"possible",
"to",
"get",
"the",
"exact",
"rating",
"for",
"a",
"value",
".",
"Will",
"need",
"to",
"do",
"sentiment",
"analysis",
"of",
"the",
"text",
"to",
"see",
"how",
"it",
"matches",
"the",
"rating",
".",
"Even",
"that",
"sounds",
"like",
"it",
"wont",
"work",
"-",
"maybe",
"a",
"ML",
"algorithm",
"would",
"do",
"it",
"but",
"that",
"requires",
"a",
"large",
"body",
"of",
"text",
"already",
"matched",
"to",
"values",
"-",
"and",
"values",
"aren",
"t",
"even",
"defined",
"as",
"far",
"as",
"I",
"have",
"found",
"."
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/environments/happiness.py#L310-L329 | train |
acutesoftware/AIKIF | aikif/web_app/web_utils.py | list2html | def list2html(lst):
"""
convert a list to html using table formatting
"""
txt = '<TABLE width=100% border=0>'
for l in lst:
txt += '<TR>\n'
if type(l) is str:
txt+= '<TD>' + l + '</TD>\n'
elif type(l) is list:
txt+= '<TD>'
for i in l:
txt += i + ', '
txt+= '</TD>'
else:
txt+= '<TD>' + str(l) + '</TD>\n'
txt += '</TR>\n'
txt += '</TABLE><BR>\n'
return txt | python | def list2html(lst):
"""
convert a list to html using table formatting
"""
txt = '<TABLE width=100% border=0>'
for l in lst:
txt += '<TR>\n'
if type(l) is str:
txt+= '<TD>' + l + '</TD>\n'
elif type(l) is list:
txt+= '<TD>'
for i in l:
txt += i + ', '
txt+= '</TD>'
else:
txt+= '<TD>' + str(l) + '</TD>\n'
txt += '</TR>\n'
txt += '</TABLE><BR>\n'
return txt | [
"def",
"list2html",
"(",
"lst",
")",
":",
"txt",
"=",
"'<TABLE width=100% border=0>'",
"for",
"l",
"in",
"lst",
":",
"txt",
"+=",
"'<TR>\\n'",
"if",
"type",
"(",
"l",
")",
"is",
"str",
":",
"txt",
"+=",
"'<TD>'",
"+",
"l",
"+",
"'</TD>\\n'",
"elif",
"type",
"(",
"l",
")",
"is",
"list",
":",
"txt",
"+=",
"'<TD>'",
"for",
"i",
"in",
"l",
":",
"txt",
"+=",
"i",
"+",
"', '",
"txt",
"+=",
"'</TD>'",
"else",
":",
"txt",
"+=",
"'<TD>'",
"+",
"str",
"(",
"l",
")",
"+",
"'</TD>\\n'",
"txt",
"+=",
"'</TR>\\n'",
"txt",
"+=",
"'</TABLE><BR>\\n'",
"return",
"txt"
] | convert a list to html using table formatting | [
"convert",
"a",
"list",
"to",
"html",
"using",
"table",
"formatting"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/web_app/web_utils.py#L10-L28 | train |
acutesoftware/AIKIF | aikif/web_app/web_utils.py | build_edit_form | def build_edit_form(title, id, cols, return_page):
"""
returns the html for a simple edit form
"""
txt = '<H3>' + title + '<H3>'
txt += '<form action="' + return_page + '" method="POST">\n' # return_page = /agents
txt += ' updating id:' + str(id) + '\n<BR>'
txt += ' <input type="hidden" name="rec_id" readonly value="' + str(id) + '"> '
txt += ' <TABLE width=80% valign=top border=1>'
for col_num, col in enumerate(cols):
txt += ' <TR>\n'
txt += ' <TD><div id="form_label">' + col + '</div></TD>\n'
txt += ' <TD><div id="form_input"><input type="text" name="col_' + str(col_num) + '"></div></TD>\n'
txt += ' </TR>\n'
txt += ' <TR><TD></TD>\n'
txt += ' <TD>\n'
txt += ' <input type="submit" name="update-form" value="Save Changes">\n'
txt += ' <input type="submit" name="delete-form" value="Delete">\n'
txt += ' <input type="submit" name="add-form" value="Add">\n'
txt += ' </TD></TR></TABLE>'
txt += '</form>\n'
return txt | python | def build_edit_form(title, id, cols, return_page):
"""
returns the html for a simple edit form
"""
txt = '<H3>' + title + '<H3>'
txt += '<form action="' + return_page + '" method="POST">\n' # return_page = /agents
txt += ' updating id:' + str(id) + '\n<BR>'
txt += ' <input type="hidden" name="rec_id" readonly value="' + str(id) + '"> '
txt += ' <TABLE width=80% valign=top border=1>'
for col_num, col in enumerate(cols):
txt += ' <TR>\n'
txt += ' <TD><div id="form_label">' + col + '</div></TD>\n'
txt += ' <TD><div id="form_input"><input type="text" name="col_' + str(col_num) + '"></div></TD>\n'
txt += ' </TR>\n'
txt += ' <TR><TD></TD>\n'
txt += ' <TD>\n'
txt += ' <input type="submit" name="update-form" value="Save Changes">\n'
txt += ' <input type="submit" name="delete-form" value="Delete">\n'
txt += ' <input type="submit" name="add-form" value="Add">\n'
txt += ' </TD></TR></TABLE>'
txt += '</form>\n'
return txt | [
"def",
"build_edit_form",
"(",
"title",
",",
"id",
",",
"cols",
",",
"return_page",
")",
":",
"txt",
"=",
"'<H3>'",
"+",
"title",
"+",
"'<H3>'",
"txt",
"+=",
"'<form action=\"'",
"+",
"return_page",
"+",
"'\" method=\"POST\">\\n'",
"txt",
"+=",
"' updating id:'",
"+",
"str",
"(",
"id",
")",
"+",
"'\\n<BR>'",
"txt",
"+=",
"' <input type=\"hidden\" name=\"rec_id\" readonly value=\"'",
"+",
"str",
"(",
"id",
")",
"+",
"'\"> '",
"txt",
"+=",
"' <TABLE width=80% valign=top border=1>'",
"for",
"col_num",
",",
"col",
"in",
"enumerate",
"(",
"cols",
")",
":",
"txt",
"+=",
"' <TR>\\n'",
"txt",
"+=",
"' <TD><div id=\"form_label\">'",
"+",
"col",
"+",
"'</div></TD>\\n'",
"txt",
"+=",
"' <TD><div id=\"form_input\"><input type=\"text\" name=\"col_'",
"+",
"str",
"(",
"col_num",
")",
"+",
"'\"></div></TD>\\n'",
"txt",
"+=",
"' </TR>\\n'",
"txt",
"+=",
"' <TR><TD></TD>\\n'",
"txt",
"+=",
"' <TD>\\n'",
"txt",
"+=",
"' <input type=\"submit\" name=\"update-form\" value=\"Save Changes\">\\n'",
"txt",
"+=",
"' <input type=\"submit\" name=\"delete-form\" value=\"Delete\">\\n'",
"txt",
"+=",
"' <input type=\"submit\" name=\"add-form\" value=\"Add\">\\n'",
"txt",
"+=",
"' </TD></TR></TABLE>'",
"txt",
"+=",
"'</form>\\n'",
"return",
"txt"
] | returns the html for a simple edit form | [
"returns",
"the",
"html",
"for",
"a",
"simple",
"edit",
"form"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/web_app/web_utils.py#L67-L89 | train |
acutesoftware/AIKIF | aikif/web_app/web_utils.py | build_html_listbox | def build_html_listbox(lst, nme):
"""
returns the html to display a listbox
"""
res = '<select name="' + nme + '" multiple="multiple">\n'
for l in lst:
res += ' <option>' + str(l) + '</option>\n'
res += '</select>\n'
return res | python | def build_html_listbox(lst, nme):
"""
returns the html to display a listbox
"""
res = '<select name="' + nme + '" multiple="multiple">\n'
for l in lst:
res += ' <option>' + str(l) + '</option>\n'
res += '</select>\n'
return res | [
"def",
"build_html_listbox",
"(",
"lst",
",",
"nme",
")",
":",
"res",
"=",
"'<select name=\"'",
"+",
"nme",
"+",
"'\" multiple=\"multiple\">\\n'",
"for",
"l",
"in",
"lst",
":",
"res",
"+=",
"' <option>'",
"+",
"str",
"(",
"l",
")",
"+",
"'</option>\\n'",
"res",
"+=",
"'</select>\\n'",
"return",
"res"
] | returns the html to display a listbox | [
"returns",
"the",
"html",
"to",
"display",
"a",
"listbox"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/web_app/web_utils.py#L91-L100 | train |
acutesoftware/AIKIF | aikif/web_app/web_utils.py | build_data_list | def build_data_list(lst):
"""
returns the html with supplied list as a HTML listbox
"""
txt = '<H3>' + List + '<H3><UL>'
for i in lst:
txt += '<LI>' + i + '</LI>'
txt += '<UL>'
return txt | python | def build_data_list(lst):
"""
returns the html with supplied list as a HTML listbox
"""
txt = '<H3>' + List + '<H3><UL>'
for i in lst:
txt += '<LI>' + i + '</LI>'
txt += '<UL>'
return txt | [
"def",
"build_data_list",
"(",
"lst",
")",
":",
"txt",
"=",
"'<H3>'",
"+",
"List",
"+",
"'<H3><UL>'",
"for",
"i",
"in",
"lst",
":",
"txt",
"+=",
"'<LI>'",
"+",
"i",
"+",
"'</LI>'",
"txt",
"+=",
"'<UL>'",
"return",
"txt"
] | returns the html with supplied list as a HTML listbox | [
"returns",
"the",
"html",
"with",
"supplied",
"list",
"as",
"a",
"HTML",
"listbox"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/web_app/web_utils.py#L102-L111 | train |
acutesoftware/AIKIF | aikif/web_app/web_utils.py | filelist2html | def filelist2html(lst, fldr, hasHeader='N'):
"""
formats a standard filelist to htmk using table formats
"""
txt = '<TABLE width=100% border=0>'
numRows = 1
if lst:
for l in lst:
if hasHeader == 'Y':
if numRows == 1:
td_begin = '<TH>'
td_end = '</TH>'
else:
td_begin = '<TD>'
td_end = '</TD>'
else:
td_begin = '<TD>'
td_end = '</TD>'
numRows += 1
txt += '<TR>'
if type(l) is str:
txt += td_begin + link_file(l, fldr) + td_end
elif type(l) is list:
txt += td_begin
for i in l:
txt+= link_file(i, fldr) + '; '
txt += td_end
else:
txt += td_begin + str(l) + td_end
txt += '</TR>\n'
txt += '</TABLE><BR>\n'
return txt | python | def filelist2html(lst, fldr, hasHeader='N'):
"""
formats a standard filelist to htmk using table formats
"""
txt = '<TABLE width=100% border=0>'
numRows = 1
if lst:
for l in lst:
if hasHeader == 'Y':
if numRows == 1:
td_begin = '<TH>'
td_end = '</TH>'
else:
td_begin = '<TD>'
td_end = '</TD>'
else:
td_begin = '<TD>'
td_end = '</TD>'
numRows += 1
txt += '<TR>'
if type(l) is str:
txt += td_begin + link_file(l, fldr) + td_end
elif type(l) is list:
txt += td_begin
for i in l:
txt+= link_file(i, fldr) + '; '
txt += td_end
else:
txt += td_begin + str(l) + td_end
txt += '</TR>\n'
txt += '</TABLE><BR>\n'
return txt | [
"def",
"filelist2html",
"(",
"lst",
",",
"fldr",
",",
"hasHeader",
"=",
"'N'",
")",
":",
"txt",
"=",
"'<TABLE width=100% border=0>'",
"numRows",
"=",
"1",
"if",
"lst",
":",
"for",
"l",
"in",
"lst",
":",
"if",
"hasHeader",
"==",
"'Y'",
":",
"if",
"numRows",
"==",
"1",
":",
"td_begin",
"=",
"'<TH>'",
"td_end",
"=",
"'</TH>'",
"else",
":",
"td_begin",
"=",
"'<TD>'",
"td_end",
"=",
"'</TD>'",
"else",
":",
"td_begin",
"=",
"'<TD>'",
"td_end",
"=",
"'</TD>'",
"numRows",
"+=",
"1",
"txt",
"+=",
"'<TR>'",
"if",
"type",
"(",
"l",
")",
"is",
"str",
":",
"txt",
"+=",
"td_begin",
"+",
"link_file",
"(",
"l",
",",
"fldr",
")",
"+",
"td_end",
"elif",
"type",
"(",
"l",
")",
"is",
"list",
":",
"txt",
"+=",
"td_begin",
"for",
"i",
"in",
"l",
":",
"txt",
"+=",
"link_file",
"(",
"i",
",",
"fldr",
")",
"+",
"'; '",
"txt",
"+=",
"td_end",
"else",
":",
"txt",
"+=",
"td_begin",
"+",
"str",
"(",
"l",
")",
"+",
"td_end",
"txt",
"+=",
"'</TR>\\n'",
"txt",
"+=",
"'</TABLE><BR>\\n'",
"return",
"txt"
] | formats a standard filelist to htmk using table formats | [
"formats",
"a",
"standard",
"filelist",
"to",
"htmk",
"using",
"table",
"formats"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/web_app/web_utils.py#L114-L145 | train |
acutesoftware/AIKIF | aikif/web_app/web_utils.py | link_file | def link_file(f, fldr):
"""
creates a html link for a file using folder fldr
"""
fname = os.path.join(fldr,f)
if os.path.isfile(fname):
return '<a href="/aikif/data/core/' + f + '">' + f + '</a>'
else:
return f | python | def link_file(f, fldr):
"""
creates a html link for a file using folder fldr
"""
fname = os.path.join(fldr,f)
if os.path.isfile(fname):
return '<a href="/aikif/data/core/' + f + '">' + f + '</a>'
else:
return f | [
"def",
"link_file",
"(",
"f",
",",
"fldr",
")",
":",
"fname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"fldr",
",",
"f",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"fname",
")",
":",
"return",
"'<a href=\"/aikif/data/core/'",
"+",
"f",
"+",
"'\">'",
"+",
"f",
"+",
"'</a>'",
"else",
":",
"return",
"f"
] | creates a html link for a file using folder fldr | [
"creates",
"a",
"html",
"link",
"for",
"a",
"file",
"using",
"folder",
"fldr"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/web_app/web_utils.py#L147-L155 | train |
acutesoftware/AIKIF | aikif/web_app/web_utils.py | dict_to_htmlrow | def dict_to_htmlrow(d):
"""
converts a dictionary to a HTML table row
"""
res = "<TR>\n"
for k, v in d.items():
if type(v) == str:
res = res + '<TD><p>' + k + ':</p></TD><TD><p>' + v + '</p></TD>'
else:
res = res + '<TD><p>' + k + ':</p></TD><TD><p>' + str(v) + '</p></TD>'
res += '</TR>\n'
return res | python | def dict_to_htmlrow(d):
"""
converts a dictionary to a HTML table row
"""
res = "<TR>\n"
for k, v in d.items():
if type(v) == str:
res = res + '<TD><p>' + k + ':</p></TD><TD><p>' + v + '</p></TD>'
else:
res = res + '<TD><p>' + k + ':</p></TD><TD><p>' + str(v) + '</p></TD>'
res += '</TR>\n'
return res | [
"def",
"dict_to_htmlrow",
"(",
"d",
")",
":",
"res",
"=",
"\"<TR>\\n\"",
"for",
"k",
",",
"v",
"in",
"d",
".",
"items",
"(",
")",
":",
"if",
"type",
"(",
"v",
")",
"==",
"str",
":",
"res",
"=",
"res",
"+",
"'<TD><p>'",
"+",
"k",
"+",
"':</p></TD><TD><p>'",
"+",
"v",
"+",
"'</p></TD>'",
"else",
":",
"res",
"=",
"res",
"+",
"'<TD><p>'",
"+",
"k",
"+",
"':</p></TD><TD><p>'",
"+",
"str",
"(",
"v",
")",
"+",
"'</p></TD>'",
"res",
"+=",
"'</TR>\\n'",
"return",
"res"
] | converts a dictionary to a HTML table row | [
"converts",
"a",
"dictionary",
"to",
"a",
"HTML",
"table",
"row"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/web_app/web_utils.py#L157-L168 | train |
acutesoftware/AIKIF | aikif/web_app/web_utils.py | read_csv_to_html_table | def read_csv_to_html_table(csvFile, hasHeader='N'):
"""
reads a CSV file and converts it to HTML
"""
txt = '<table class="as-table as-table-zebra as-table-horizontal">'
with open(csvFile, "r") as f: #
numRows = 1
for row in f:
if hasHeader == 'Y':
if numRows == 1:
td_begin = '<TH>'
td_end = '</TH>'
else:
td_begin = '<TD>'
td_end = '</TD>'
else:
td_begin = '<TD>'
td_end = '</TD>'
cols = row.split(',')
numRows += 1
txt += "<TR>"
for col in cols:
txt += td_begin
try:
colString = col
except Exception:
colString = '<font color=red>Error decoding column data</font>'
txt += colString.strip('"')
txt += td_end
txt += "</TR>\n"
txt += "</TABLE>\n\n"
return txt | python | def read_csv_to_html_table(csvFile, hasHeader='N'):
"""
reads a CSV file and converts it to HTML
"""
txt = '<table class="as-table as-table-zebra as-table-horizontal">'
with open(csvFile, "r") as f: #
numRows = 1
for row in f:
if hasHeader == 'Y':
if numRows == 1:
td_begin = '<TH>'
td_end = '</TH>'
else:
td_begin = '<TD>'
td_end = '</TD>'
else:
td_begin = '<TD>'
td_end = '</TD>'
cols = row.split(',')
numRows += 1
txt += "<TR>"
for col in cols:
txt += td_begin
try:
colString = col
except Exception:
colString = '<font color=red>Error decoding column data</font>'
txt += colString.strip('"')
txt += td_end
txt += "</TR>\n"
txt += "</TABLE>\n\n"
return txt | [
"def",
"read_csv_to_html_table",
"(",
"csvFile",
",",
"hasHeader",
"=",
"'N'",
")",
":",
"txt",
"=",
"'<table class=\"as-table as-table-zebra as-table-horizontal\">'",
"with",
"open",
"(",
"csvFile",
",",
"\"r\"",
")",
"as",
"f",
":",
"numRows",
"=",
"1",
"for",
"row",
"in",
"f",
":",
"if",
"hasHeader",
"==",
"'Y'",
":",
"if",
"numRows",
"==",
"1",
":",
"td_begin",
"=",
"'<TH>'",
"td_end",
"=",
"'</TH>'",
"else",
":",
"td_begin",
"=",
"'<TD>'",
"td_end",
"=",
"'</TD>'",
"else",
":",
"td_begin",
"=",
"'<TD>'",
"td_end",
"=",
"'</TD>'",
"cols",
"=",
"row",
".",
"split",
"(",
"','",
")",
"numRows",
"+=",
"1",
"txt",
"+=",
"\"<TR>\"",
"for",
"col",
"in",
"cols",
":",
"txt",
"+=",
"td_begin",
"try",
":",
"colString",
"=",
"col",
"except",
"Exception",
":",
"colString",
"=",
"'<font color=red>Error decoding column data</font>'",
"txt",
"+=",
"colString",
".",
"strip",
"(",
"'\"'",
")",
"txt",
"+=",
"td_end",
"txt",
"+=",
"\"</TR>\\n\"",
"txt",
"+=",
"\"</TABLE>\\n\\n\"",
"return",
"txt"
] | reads a CSV file and converts it to HTML | [
"reads",
"a",
"CSV",
"file",
"and",
"converts",
"it",
"to",
"HTML"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/web_app/web_utils.py#L170-L202 | train |
acutesoftware/AIKIF | aikif/web_app/web_utils.py | read_csv_to_html_list | def read_csv_to_html_list(csvFile):
"""
reads a CSV file and converts it to a HTML List
"""
txt = ''
with open(csvFile) as csv_file:
for row in csv.reader(csv_file, delimiter=','):
txt += '<div id="table_row">'
for col in row:
txt += " "
try:
txt += col
except Exception:
txt += 'Error'
txt += " "
txt += "</div>\n"
return txt | python | def read_csv_to_html_list(csvFile):
"""
reads a CSV file and converts it to a HTML List
"""
txt = ''
with open(csvFile) as csv_file:
for row in csv.reader(csv_file, delimiter=','):
txt += '<div id="table_row">'
for col in row:
txt += " "
try:
txt += col
except Exception:
txt += 'Error'
txt += " "
txt += "</div>\n"
return txt | [
"def",
"read_csv_to_html_list",
"(",
"csvFile",
")",
":",
"txt",
"=",
"''",
"with",
"open",
"(",
"csvFile",
")",
"as",
"csv_file",
":",
"for",
"row",
"in",
"csv",
".",
"reader",
"(",
"csv_file",
",",
"delimiter",
"=",
"','",
")",
":",
"txt",
"+=",
"'<div id=\"table_row\">'",
"for",
"col",
"in",
"row",
":",
"txt",
"+=",
"\" \"",
"try",
":",
"txt",
"+=",
"col",
"except",
"Exception",
":",
"txt",
"+=",
"'Error'",
"txt",
"+=",
"\" \"",
"txt",
"+=",
"\"</div>\\n\"",
"return",
"txt"
] | reads a CSV file and converts it to a HTML List | [
"reads",
"a",
"CSV",
"file",
"and",
"converts",
"it",
"to",
"a",
"HTML",
"List"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/web_app/web_utils.py#L206-L222 | train |
acutesoftware/AIKIF | aikif/agents/explore/agent_explore_grid.py | ExploreAgent.do_your_job | def do_your_job(self):
"""
the goal of the explore agent is to move to the
target while avoiding blockages on the grid.
This function is messy and needs to be looked at.
It currently has a bug in that the backtrack oscillates
so need a new method of doing this - probably checking if
previously backtracked in that direction for those coords, ie
keep track of cells visited and number of times visited?
"""
y,x = self.get_intended_direction() # first find out where we should go
if self.target_x == self.current_x and self.target_y == self.current_y:
#print(self.name + " : TARGET ACQUIRED")
if len(self.results) == 0:
self.results.append("TARGET ACQUIRED")
self.lg_mv(2, self.name + ": TARGET ACQUIRED" )
return
self.num_steps += 1
# first try is to move on the x axis in a simple greedy search
accessible = ['\\', '-', '|', '/', '.']
# randomly move in Y direction instead of X if all paths clear
if y != 0 and x != 0 and self.backtrack == [0,0]:
if random.randint(1,10) > 6:
if self.grd.get_tile(self.current_y + y, self.current_x) in accessible:
self.current_y += y
self.lg_mv(3, self.name + ": randomly moving Y axis " + str(self.num_steps) )
return
if x == 1:
if self.grd.get_tile(self.current_y, self.current_x + 1) in accessible:
self.current_x += 1
self.lg_mv(3, self.name + ": move# " + str(self.num_steps) + " - moving West" )
return
elif x == -1:
if self.grd.get_tile(self.current_y, self.current_x - 1) in accessible:
self.current_x -= 1
self.lg_mv(3, self.name + ": move# " + str(self.num_steps) + " - moving East" )
return
elif y == 1:
if self.grd.get_tile(self.current_y + 1, self.current_x) in accessible:
self.current_y += 1
self.lg_mv(3, self.name + ": move# " + str(self.num_steps) + " - moving South" )
return
elif y == -1:
if self.grd.get_tile(self.current_y - 1, self.current_x) in accessible:
self.current_y -= 1
self.lg_mv(3, self.name + ": move# " + str(self.num_steps) + " - moving North")
return
self.grd.set_tile(self.start_y, self.start_x, 'A')
self.grd.save(os.path.join(os.getcwd(), 'agent.txt')) | python | def do_your_job(self):
"""
the goal of the explore agent is to move to the
target while avoiding blockages on the grid.
This function is messy and needs to be looked at.
It currently has a bug in that the backtrack oscillates
so need a new method of doing this - probably checking if
previously backtracked in that direction for those coords, ie
keep track of cells visited and number of times visited?
"""
y,x = self.get_intended_direction() # first find out where we should go
if self.target_x == self.current_x and self.target_y == self.current_y:
#print(self.name + " : TARGET ACQUIRED")
if len(self.results) == 0:
self.results.append("TARGET ACQUIRED")
self.lg_mv(2, self.name + ": TARGET ACQUIRED" )
return
self.num_steps += 1
# first try is to move on the x axis in a simple greedy search
accessible = ['\\', '-', '|', '/', '.']
# randomly move in Y direction instead of X if all paths clear
if y != 0 and x != 0 and self.backtrack == [0,0]:
if random.randint(1,10) > 6:
if self.grd.get_tile(self.current_y + y, self.current_x) in accessible:
self.current_y += y
self.lg_mv(3, self.name + ": randomly moving Y axis " + str(self.num_steps) )
return
if x == 1:
if self.grd.get_tile(self.current_y, self.current_x + 1) in accessible:
self.current_x += 1
self.lg_mv(3, self.name + ": move# " + str(self.num_steps) + " - moving West" )
return
elif x == -1:
if self.grd.get_tile(self.current_y, self.current_x - 1) in accessible:
self.current_x -= 1
self.lg_mv(3, self.name + ": move# " + str(self.num_steps) + " - moving East" )
return
elif y == 1:
if self.grd.get_tile(self.current_y + 1, self.current_x) in accessible:
self.current_y += 1
self.lg_mv(3, self.name + ": move# " + str(self.num_steps) + " - moving South" )
return
elif y == -1:
if self.grd.get_tile(self.current_y - 1, self.current_x) in accessible:
self.current_y -= 1
self.lg_mv(3, self.name + ": move# " + str(self.num_steps) + " - moving North")
return
self.grd.set_tile(self.start_y, self.start_x, 'A')
self.grd.save(os.path.join(os.getcwd(), 'agent.txt')) | [
"def",
"do_your_job",
"(",
"self",
")",
":",
"y",
",",
"x",
"=",
"self",
".",
"get_intended_direction",
"(",
")",
"if",
"self",
".",
"target_x",
"==",
"self",
".",
"current_x",
"and",
"self",
".",
"target_y",
"==",
"self",
".",
"current_y",
":",
"if",
"len",
"(",
"self",
".",
"results",
")",
"==",
"0",
":",
"self",
".",
"results",
".",
"append",
"(",
"\"TARGET ACQUIRED\"",
")",
"self",
".",
"lg_mv",
"(",
"2",
",",
"self",
".",
"name",
"+",
"\": TARGET ACQUIRED\"",
")",
"return",
"self",
".",
"num_steps",
"+=",
"1",
"accessible",
"=",
"[",
"'\\\\'",
",",
"'-'",
",",
"'|'",
",",
"'/'",
",",
"'.'",
"]",
"if",
"y",
"!=",
"0",
"and",
"x",
"!=",
"0",
"and",
"self",
".",
"backtrack",
"==",
"[",
"0",
",",
"0",
"]",
":",
"if",
"random",
".",
"randint",
"(",
"1",
",",
"10",
")",
">",
"6",
":",
"if",
"self",
".",
"grd",
".",
"get_tile",
"(",
"self",
".",
"current_y",
"+",
"y",
",",
"self",
".",
"current_x",
")",
"in",
"accessible",
":",
"self",
".",
"current_y",
"+=",
"y",
"self",
".",
"lg_mv",
"(",
"3",
",",
"self",
".",
"name",
"+",
"\": randomly moving Y axis \"",
"+",
"str",
"(",
"self",
".",
"num_steps",
")",
")",
"return",
"if",
"x",
"==",
"1",
":",
"if",
"self",
".",
"grd",
".",
"get_tile",
"(",
"self",
".",
"current_y",
",",
"self",
".",
"current_x",
"+",
"1",
")",
"in",
"accessible",
":",
"self",
".",
"current_x",
"+=",
"1",
"self",
".",
"lg_mv",
"(",
"3",
",",
"self",
".",
"name",
"+",
"\": move# \"",
"+",
"str",
"(",
"self",
".",
"num_steps",
")",
"+",
"\" - moving West\"",
")",
"return",
"elif",
"x",
"==",
"-",
"1",
":",
"if",
"self",
".",
"grd",
".",
"get_tile",
"(",
"self",
".",
"current_y",
",",
"self",
".",
"current_x",
"-",
"1",
")",
"in",
"accessible",
":",
"self",
".",
"current_x",
"-=",
"1",
"self",
".",
"lg_mv",
"(",
"3",
",",
"self",
".",
"name",
"+",
"\": move# \"",
"+",
"str",
"(",
"self",
".",
"num_steps",
")",
"+",
"\" - moving East\"",
")",
"return",
"elif",
"y",
"==",
"1",
":",
"if",
"self",
".",
"grd",
".",
"get_tile",
"(",
"self",
".",
"current_y",
"+",
"1",
",",
"self",
".",
"current_x",
")",
"in",
"accessible",
":",
"self",
".",
"current_y",
"+=",
"1",
"self",
".",
"lg_mv",
"(",
"3",
",",
"self",
".",
"name",
"+",
"\": move# \"",
"+",
"str",
"(",
"self",
".",
"num_steps",
")",
"+",
"\" - moving South\"",
")",
"return",
"elif",
"y",
"==",
"-",
"1",
":",
"if",
"self",
".",
"grd",
".",
"get_tile",
"(",
"self",
".",
"current_y",
"-",
"1",
",",
"self",
".",
"current_x",
")",
"in",
"accessible",
":",
"self",
".",
"current_y",
"-=",
"1",
"self",
".",
"lg_mv",
"(",
"3",
",",
"self",
".",
"name",
"+",
"\": move# \"",
"+",
"str",
"(",
"self",
".",
"num_steps",
")",
"+",
"\" - moving North\"",
")",
"return",
"self",
".",
"grd",
".",
"set_tile",
"(",
"self",
".",
"start_y",
",",
"self",
".",
"start_x",
",",
"'A'",
")",
"self",
".",
"grd",
".",
"save",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"getcwd",
"(",
")",
",",
"'agent.txt'",
")",
")"
] | the goal of the explore agent is to move to the
target while avoiding blockages on the grid.
This function is messy and needs to be looked at.
It currently has a bug in that the backtrack oscillates
so need a new method of doing this - probably checking if
previously backtracked in that direction for those coords, ie
keep track of cells visited and number of times visited? | [
"the",
"goal",
"of",
"the",
"explore",
"agent",
"is",
"to",
"move",
"to",
"the",
"target",
"while",
"avoiding",
"blockages",
"on",
"the",
"grid",
".",
"This",
"function",
"is",
"messy",
"and",
"needs",
"to",
"be",
"looked",
"at",
".",
"It",
"currently",
"has",
"a",
"bug",
"in",
"that",
"the",
"backtrack",
"oscillates",
"so",
"need",
"a",
"new",
"method",
"of",
"doing",
"this",
"-",
"probably",
"checking",
"if",
"previously",
"backtracked",
"in",
"that",
"direction",
"for",
"those",
"coords",
"ie",
"keep",
"track",
"of",
"cells",
"visited",
"and",
"number",
"of",
"times",
"visited?"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/agents/explore/agent_explore_grid.py#L43-L95 | train |
acutesoftware/AIKIF | aikif/agents/explore/agent_explore_grid.py | ExploreAgent.lg_mv | def lg_mv(self, log_lvl, txt):
"""
wrapper for debugging print and log methods
"""
if log_lvl <= self.LOG_LEVEL:
print(txt + str(self.current_y) + "," + str(self.current_x)) | python | def lg_mv(self, log_lvl, txt):
"""
wrapper for debugging print and log methods
"""
if log_lvl <= self.LOG_LEVEL:
print(txt + str(self.current_y) + "," + str(self.current_x)) | [
"def",
"lg_mv",
"(",
"self",
",",
"log_lvl",
",",
"txt",
")",
":",
"if",
"log_lvl",
"<=",
"self",
".",
"LOG_LEVEL",
":",
"print",
"(",
"txt",
"+",
"str",
"(",
"self",
".",
"current_y",
")",
"+",
"\",\"",
"+",
"str",
"(",
"self",
".",
"current_x",
")",
")"
] | wrapper for debugging print and log methods | [
"wrapper",
"for",
"debugging",
"print",
"and",
"log",
"methods"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/agents/explore/agent_explore_grid.py#L98-L103 | train |
acutesoftware/AIKIF | aikif/agents/explore/agent_explore_grid.py | ExploreAgent.get_intended_direction | def get_intended_direction(self):
"""
returns a Y,X value showing which direction the
agent should move in order to get to the target
"""
x = 0
y = 0
if self.target_x == self.current_x and self.target_y == self.current_y:
return y,x # target already acquired
if self.target_y > self.current_y:
y = 1
elif self.target_y < self.current_y:
y = -1
if self.target_x > self.current_x:
x = 1
elif self.target_x < self.current_x:
x = -1
return y,x | python | def get_intended_direction(self):
"""
returns a Y,X value showing which direction the
agent should move in order to get to the target
"""
x = 0
y = 0
if self.target_x == self.current_x and self.target_y == self.current_y:
return y,x # target already acquired
if self.target_y > self.current_y:
y = 1
elif self.target_y < self.current_y:
y = -1
if self.target_x > self.current_x:
x = 1
elif self.target_x < self.current_x:
x = -1
return y,x | [
"def",
"get_intended_direction",
"(",
"self",
")",
":",
"x",
"=",
"0",
"y",
"=",
"0",
"if",
"self",
".",
"target_x",
"==",
"self",
".",
"current_x",
"and",
"self",
".",
"target_y",
"==",
"self",
".",
"current_y",
":",
"return",
"y",
",",
"x",
"if",
"self",
".",
"target_y",
">",
"self",
".",
"current_y",
":",
"y",
"=",
"1",
"elif",
"self",
".",
"target_y",
"<",
"self",
".",
"current_y",
":",
"y",
"=",
"-",
"1",
"if",
"self",
".",
"target_x",
">",
"self",
".",
"current_x",
":",
"x",
"=",
"1",
"elif",
"self",
".",
"target_x",
"<",
"self",
".",
"current_x",
":",
"x",
"=",
"-",
"1",
"return",
"y",
",",
"x"
] | returns a Y,X value showing which direction the
agent should move in order to get to the target | [
"returns",
"a",
"Y",
"X",
"value",
"showing",
"which",
"direction",
"the",
"agent",
"should",
"move",
"in",
"order",
"to",
"get",
"to",
"the",
"target"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/agents/explore/agent_explore_grid.py#L105-L122 | train |
acutesoftware/AIKIF | aikif/agents/explore/agent_explore_grid.py | ExploreAgent.show_status | def show_status(self):
"""
dumps the status of the agent
"""
txt = 'Agent Status:\n'
print(txt)
txt += "start_x = " + str(self.start_x) + "\n"
txt += "start_y = " + str(self.start_y) + "\n"
txt += "target_x = " + str(self.target_x) + "\n"
txt += "target_y = " + str(self.target_y) + "\n"
txt += "current_x = " + str(self.current_x) + "\n"
txt += "current_y = " + str(self.current_y) + "\n"
print(self.grd)
return txt | python | def show_status(self):
"""
dumps the status of the agent
"""
txt = 'Agent Status:\n'
print(txt)
txt += "start_x = " + str(self.start_x) + "\n"
txt += "start_y = " + str(self.start_y) + "\n"
txt += "target_x = " + str(self.target_x) + "\n"
txt += "target_y = " + str(self.target_y) + "\n"
txt += "current_x = " + str(self.current_x) + "\n"
txt += "current_y = " + str(self.current_y) + "\n"
print(self.grd)
return txt | [
"def",
"show_status",
"(",
"self",
")",
":",
"txt",
"=",
"'Agent Status:\\n'",
"print",
"(",
"txt",
")",
"txt",
"+=",
"\"start_x = \"",
"+",
"str",
"(",
"self",
".",
"start_x",
")",
"+",
"\"\\n\"",
"txt",
"+=",
"\"start_y = \"",
"+",
"str",
"(",
"self",
".",
"start_y",
")",
"+",
"\"\\n\"",
"txt",
"+=",
"\"target_x = \"",
"+",
"str",
"(",
"self",
".",
"target_x",
")",
"+",
"\"\\n\"",
"txt",
"+=",
"\"target_y = \"",
"+",
"str",
"(",
"self",
".",
"target_y",
")",
"+",
"\"\\n\"",
"txt",
"+=",
"\"current_x = \"",
"+",
"str",
"(",
"self",
".",
"current_x",
")",
"+",
"\"\\n\"",
"txt",
"+=",
"\"current_y = \"",
"+",
"str",
"(",
"self",
".",
"current_y",
")",
"+",
"\"\\n\"",
"print",
"(",
"self",
".",
"grd",
")",
"return",
"txt"
] | dumps the status of the agent | [
"dumps",
"the",
"status",
"of",
"the",
"agent"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/agents/explore/agent_explore_grid.py#L135-L150 | train |
acutesoftware/AIKIF | aikif/toolbox/audio_tools.py | get_audio_metadata_old | def get_audio_metadata_old(fname):
""" retrieve the metadata from an MP3 file """
audio_dict = {}
print("IDv2 tag info for %s:" % fname)
try:
audio = mutagenx.id3.ID3(fname, translate=False)
except StandardError as err:
print("ERROR = " + str(err))
#else:
#print(audio.pprint().encode("utf-8", "replace"))
#for frame in audio.values():
# print(repr(frame))
try:
audio_dict["title"] = audio["title"]
except KeyError:
print("No title")
try:
audio_dict["artist"] = audio["artist"] # tags['TPE1']
except KeyError:
print("No artist")
try:
audio_dict["album"] = audio["album"]
except KeyError:
print("No album")
try:
audio_dict["length"] = audio["length"]
except KeyError:
print("No length")
#pprint.pprint(audio.tags)
return audio_dict | python | def get_audio_metadata_old(fname):
""" retrieve the metadata from an MP3 file """
audio_dict = {}
print("IDv2 tag info for %s:" % fname)
try:
audio = mutagenx.id3.ID3(fname, translate=False)
except StandardError as err:
print("ERROR = " + str(err))
#else:
#print(audio.pprint().encode("utf-8", "replace"))
#for frame in audio.values():
# print(repr(frame))
try:
audio_dict["title"] = audio["title"]
except KeyError:
print("No title")
try:
audio_dict["artist"] = audio["artist"] # tags['TPE1']
except KeyError:
print("No artist")
try:
audio_dict["album"] = audio["album"]
except KeyError:
print("No album")
try:
audio_dict["length"] = audio["length"]
except KeyError:
print("No length")
#pprint.pprint(audio.tags)
return audio_dict | [
"def",
"get_audio_metadata_old",
"(",
"fname",
")",
":",
"audio_dict",
"=",
"{",
"}",
"print",
"(",
"\"IDv2 tag info for %s:\"",
"%",
"fname",
")",
"try",
":",
"audio",
"=",
"mutagenx",
".",
"id3",
".",
"ID3",
"(",
"fname",
",",
"translate",
"=",
"False",
")",
"except",
"StandardError",
"as",
"err",
":",
"print",
"(",
"\"ERROR = \"",
"+",
"str",
"(",
"err",
")",
")",
"try",
":",
"audio_dict",
"[",
"\"title\"",
"]",
"=",
"audio",
"[",
"\"title\"",
"]",
"except",
"KeyError",
":",
"print",
"(",
"\"No title\"",
")",
"try",
":",
"audio_dict",
"[",
"\"artist\"",
"]",
"=",
"audio",
"[",
"\"artist\"",
"]",
"except",
"KeyError",
":",
"print",
"(",
"\"No artist\"",
")",
"try",
":",
"audio_dict",
"[",
"\"album\"",
"]",
"=",
"audio",
"[",
"\"album\"",
"]",
"except",
"KeyError",
":",
"print",
"(",
"\"No album\"",
")",
"try",
":",
"audio_dict",
"[",
"\"length\"",
"]",
"=",
"audio",
"[",
"\"length\"",
"]",
"except",
"KeyError",
":",
"print",
"(",
"\"No length\"",
")",
"return",
"audio_dict"
] | retrieve the metadata from an MP3 file | [
"retrieve",
"the",
"metadata",
"from",
"an",
"MP3",
"file"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/audio_tools.py#L65-L100 | train |
Nachtfeuer/pipeline | spline/tools/table.py | calculate_columns | def calculate_columns(sequence):
"""
Find all row names and the maximum column widths.
Args:
columns (dict): the keys are the column name and the value the max length.
Returns:
dict: column names (key) and widths (value).
"""
columns = {}
for row in sequence:
for key in row.keys():
if key not in columns:
columns[key] = len(key)
value_length = len(str(row[key]))
if value_length > columns[key]:
columns[key] = value_length
return columns | python | def calculate_columns(sequence):
"""
Find all row names and the maximum column widths.
Args:
columns (dict): the keys are the column name and the value the max length.
Returns:
dict: column names (key) and widths (value).
"""
columns = {}
for row in sequence:
for key in row.keys():
if key not in columns:
columns[key] = len(key)
value_length = len(str(row[key]))
if value_length > columns[key]:
columns[key] = value_length
return columns | [
"def",
"calculate_columns",
"(",
"sequence",
")",
":",
"columns",
"=",
"{",
"}",
"for",
"row",
"in",
"sequence",
":",
"for",
"key",
"in",
"row",
".",
"keys",
"(",
")",
":",
"if",
"key",
"not",
"in",
"columns",
":",
"columns",
"[",
"key",
"]",
"=",
"len",
"(",
"key",
")",
"value_length",
"=",
"len",
"(",
"str",
"(",
"row",
"[",
"key",
"]",
")",
")",
"if",
"value_length",
">",
"columns",
"[",
"key",
"]",
":",
"columns",
"[",
"key",
"]",
"=",
"value_length",
"return",
"columns"
] | Find all row names and the maximum column widths.
Args:
columns (dict): the keys are the column name and the value the max length.
Returns:
dict: column names (key) and widths (value). | [
"Find",
"all",
"row",
"names",
"and",
"the",
"maximum",
"column",
"widths",
"."
] | 04ca18c4e95e4349532bb45b768206393e1f2c13 | https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/table.py#L5-L26 | train |
Nachtfeuer/pipeline | spline/tools/table.py | calculate_row_format | def calculate_row_format(columns, keys=None):
"""
Calculate row format.
Args:
columns (dict): the keys are the column name and the value the max length.
keys (list): optional list of keys to order columns as well as to filter for them.
Returns:
str: format for table row
"""
row_format = ''
if keys is None:
keys = columns.keys()
else:
keys = [key for key in keys if key in columns]
for key in keys:
if len(row_format) > 0:
row_format += "|"
row_format += "%%(%s)-%ds" % (key, columns[key])
return '|' + row_format + '|' | python | def calculate_row_format(columns, keys=None):
"""
Calculate row format.
Args:
columns (dict): the keys are the column name and the value the max length.
keys (list): optional list of keys to order columns as well as to filter for them.
Returns:
str: format for table row
"""
row_format = ''
if keys is None:
keys = columns.keys()
else:
keys = [key for key in keys if key in columns]
for key in keys:
if len(row_format) > 0:
row_format += "|"
row_format += "%%(%s)-%ds" % (key, columns[key])
return '|' + row_format + '|' | [
"def",
"calculate_row_format",
"(",
"columns",
",",
"keys",
"=",
"None",
")",
":",
"row_format",
"=",
"''",
"if",
"keys",
"is",
"None",
":",
"keys",
"=",
"columns",
".",
"keys",
"(",
")",
"else",
":",
"keys",
"=",
"[",
"key",
"for",
"key",
"in",
"keys",
"if",
"key",
"in",
"columns",
"]",
"for",
"key",
"in",
"keys",
":",
"if",
"len",
"(",
"row_format",
")",
">",
"0",
":",
"row_format",
"+=",
"\"|\"",
"row_format",
"+=",
"\"%%(%s)-%ds\"",
"%",
"(",
"key",
",",
"columns",
"[",
"key",
"]",
")",
"return",
"'|'",
"+",
"row_format",
"+",
"'|'"
] | Calculate row format.
Args:
columns (dict): the keys are the column name and the value the max length.
keys (list): optional list of keys to order columns as well as to filter for them.
Returns:
str: format for table row | [
"Calculate",
"row",
"format",
"."
] | 04ca18c4e95e4349532bb45b768206393e1f2c13 | https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/table.py#L29-L51 | train |
Nachtfeuer/pipeline | spline/tools/table.py | pprint | def pprint(sequence, keys=None):
"""
Print sequence as ascii table to stdout.
Args:
sequence (list or tuple): a sequence with a dictionary each entry.
keys (list): optional list of keys to order columns as well as to filter for them.
"""
if len(sequence) > 0:
columns = calculate_columns(sequence)
row_format = calculate_row_format(columns, keys)
header = row_format % dict([(key, key.title()) for key in columns])
separator = row_format % dict([(key, '-' * columns[key]) for key in columns])
print(separator)
print(header)
print(separator)
for row in sequence:
print(row_format % row)
print(separator) | python | def pprint(sequence, keys=None):
"""
Print sequence as ascii table to stdout.
Args:
sequence (list or tuple): a sequence with a dictionary each entry.
keys (list): optional list of keys to order columns as well as to filter for them.
"""
if len(sequence) > 0:
columns = calculate_columns(sequence)
row_format = calculate_row_format(columns, keys)
header = row_format % dict([(key, key.title()) for key in columns])
separator = row_format % dict([(key, '-' * columns[key]) for key in columns])
print(separator)
print(header)
print(separator)
for row in sequence:
print(row_format % row)
print(separator) | [
"def",
"pprint",
"(",
"sequence",
",",
"keys",
"=",
"None",
")",
":",
"if",
"len",
"(",
"sequence",
")",
">",
"0",
":",
"columns",
"=",
"calculate_columns",
"(",
"sequence",
")",
"row_format",
"=",
"calculate_row_format",
"(",
"columns",
",",
"keys",
")",
"header",
"=",
"row_format",
"%",
"dict",
"(",
"[",
"(",
"key",
",",
"key",
".",
"title",
"(",
")",
")",
"for",
"key",
"in",
"columns",
"]",
")",
"separator",
"=",
"row_format",
"%",
"dict",
"(",
"[",
"(",
"key",
",",
"'-'",
"*",
"columns",
"[",
"key",
"]",
")",
"for",
"key",
"in",
"columns",
"]",
")",
"print",
"(",
"separator",
")",
"print",
"(",
"header",
")",
"print",
"(",
"separator",
")",
"for",
"row",
"in",
"sequence",
":",
"print",
"(",
"row_format",
"%",
"row",
")",
"print",
"(",
"separator",
")"
] | Print sequence as ascii table to stdout.
Args:
sequence (list or tuple): a sequence with a dictionary each entry.
keys (list): optional list of keys to order columns as well as to filter for them. | [
"Print",
"sequence",
"as",
"ascii",
"table",
"to",
"stdout",
"."
] | 04ca18c4e95e4349532bb45b768206393e1f2c13 | https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/table.py#L54-L75 | train |
Nachtfeuer/pipeline | spline/matrix.py | matrix_worker | def matrix_worker(data):
"""
Run pipelines in parallel.
Args:
data(dict): parameters for the pipeline (model, options, ...).
Returns:
dict: with two fields: success True/False and captured output (list of str).
"""
matrix = data['matrix']
Logger.get_logger(__name__ + '.worker').info(
"Processing pipeline for matrix entry '%s'", matrix['name'])
env = matrix['env'].copy()
env.update({'PIPELINE_MATRIX': matrix['name']})
pipeline = Pipeline(model=data['model'], env=env, options=data['options'])
pipeline.hooks = data['hooks']
return pipeline.process(data['pipeline']) | python | def matrix_worker(data):
"""
Run pipelines in parallel.
Args:
data(dict): parameters for the pipeline (model, options, ...).
Returns:
dict: with two fields: success True/False and captured output (list of str).
"""
matrix = data['matrix']
Logger.get_logger(__name__ + '.worker').info(
"Processing pipeline for matrix entry '%s'", matrix['name'])
env = matrix['env'].copy()
env.update({'PIPELINE_MATRIX': matrix['name']})
pipeline = Pipeline(model=data['model'], env=env, options=data['options'])
pipeline.hooks = data['hooks']
return pipeline.process(data['pipeline']) | [
"def",
"matrix_worker",
"(",
"data",
")",
":",
"matrix",
"=",
"data",
"[",
"'matrix'",
"]",
"Logger",
".",
"get_logger",
"(",
"__name__",
"+",
"'.worker'",
")",
".",
"info",
"(",
"\"Processing pipeline for matrix entry '%s'\"",
",",
"matrix",
"[",
"'name'",
"]",
")",
"env",
"=",
"matrix",
"[",
"'env'",
"]",
".",
"copy",
"(",
")",
"env",
".",
"update",
"(",
"{",
"'PIPELINE_MATRIX'",
":",
"matrix",
"[",
"'name'",
"]",
"}",
")",
"pipeline",
"=",
"Pipeline",
"(",
"model",
"=",
"data",
"[",
"'model'",
"]",
",",
"env",
"=",
"env",
",",
"options",
"=",
"data",
"[",
"'options'",
"]",
")",
"pipeline",
".",
"hooks",
"=",
"data",
"[",
"'hooks'",
"]",
"return",
"pipeline",
".",
"process",
"(",
"data",
"[",
"'pipeline'",
"]",
")"
] | Run pipelines in parallel.
Args:
data(dict): parameters for the pipeline (model, options, ...).
Returns:
dict: with two fields: success True/False and captured output (list of str). | [
"Run",
"pipelines",
"in",
"parallel",
"."
] | 04ca18c4e95e4349532bb45b768206393e1f2c13 | https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/matrix.py#L32-L50 | train |
Nachtfeuer/pipeline | spline/matrix.py | Matrix.can_process_matrix | def can_process_matrix(entry, matrix_tags):
"""
Check given matrix tags to be in the given list of matric tags.
Args:
entry (dict): matrix item (in yaml).
matrix_tags (list): represents --matrix-tags defined by user in command line.
Returns:
bool: True when matrix entry can be processed.
"""
if len(matrix_tags) == 0:
return True
count = 0
if 'tags' in entry:
for tag in matrix_tags:
if tag in entry['tags']:
count += 1
return count > 0 | python | def can_process_matrix(entry, matrix_tags):
"""
Check given matrix tags to be in the given list of matric tags.
Args:
entry (dict): matrix item (in yaml).
matrix_tags (list): represents --matrix-tags defined by user in command line.
Returns:
bool: True when matrix entry can be processed.
"""
if len(matrix_tags) == 0:
return True
count = 0
if 'tags' in entry:
for tag in matrix_tags:
if tag in entry['tags']:
count += 1
return count > 0 | [
"def",
"can_process_matrix",
"(",
"entry",
",",
"matrix_tags",
")",
":",
"if",
"len",
"(",
"matrix_tags",
")",
"==",
"0",
":",
"return",
"True",
"count",
"=",
"0",
"if",
"'tags'",
"in",
"entry",
":",
"for",
"tag",
"in",
"matrix_tags",
":",
"if",
"tag",
"in",
"entry",
"[",
"'tags'",
"]",
":",
"count",
"+=",
"1",
"return",
"count",
">",
"0"
] | Check given matrix tags to be in the given list of matric tags.
Args:
entry (dict): matrix item (in yaml).
matrix_tags (list): represents --matrix-tags defined by user in command line.
Returns:
bool: True when matrix entry can be processed. | [
"Check",
"given",
"matrix",
"tags",
"to",
"be",
"in",
"the",
"given",
"list",
"of",
"matric",
"tags",
"."
] | 04ca18c4e95e4349532bb45b768206393e1f2c13 | https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/matrix.py#L115-L134 | train |
Nachtfeuer/pipeline | spline/matrix.py | Matrix.run_matrix_ordered | def run_matrix_ordered(self, process_data):
"""
Running pipelines one after the other.
Returns
dict: with two fields: success True/False and captured output (list of str).
"""
output = []
for entry in self.matrix:
env = entry['env'].copy()
env.update({'PIPELINE_MATRIX': entry['name']})
if Matrix.can_process_matrix(entry, process_data.options.matrix_tags):
self.logger.info("Processing pipeline for matrix entry '%s'", entry['name'])
pipeline = Pipeline(model=process_data.model, env=env,
options=process_data.options)
pipeline.hooks = process_data.hooks
result = pipeline.process(process_data.pipeline)
output += result['output']
if not result['success']:
return {'success': False, 'output': output}
return {'success': True, 'output': output} | python | def run_matrix_ordered(self, process_data):
"""
Running pipelines one after the other.
Returns
dict: with two fields: success True/False and captured output (list of str).
"""
output = []
for entry in self.matrix:
env = entry['env'].copy()
env.update({'PIPELINE_MATRIX': entry['name']})
if Matrix.can_process_matrix(entry, process_data.options.matrix_tags):
self.logger.info("Processing pipeline for matrix entry '%s'", entry['name'])
pipeline = Pipeline(model=process_data.model, env=env,
options=process_data.options)
pipeline.hooks = process_data.hooks
result = pipeline.process(process_data.pipeline)
output += result['output']
if not result['success']:
return {'success': False, 'output': output}
return {'success': True, 'output': output} | [
"def",
"run_matrix_ordered",
"(",
"self",
",",
"process_data",
")",
":",
"output",
"=",
"[",
"]",
"for",
"entry",
"in",
"self",
".",
"matrix",
":",
"env",
"=",
"entry",
"[",
"'env'",
"]",
".",
"copy",
"(",
")",
"env",
".",
"update",
"(",
"{",
"'PIPELINE_MATRIX'",
":",
"entry",
"[",
"'name'",
"]",
"}",
")",
"if",
"Matrix",
".",
"can_process_matrix",
"(",
"entry",
",",
"process_data",
".",
"options",
".",
"matrix_tags",
")",
":",
"self",
".",
"logger",
".",
"info",
"(",
"\"Processing pipeline for matrix entry '%s'\"",
",",
"entry",
"[",
"'name'",
"]",
")",
"pipeline",
"=",
"Pipeline",
"(",
"model",
"=",
"process_data",
".",
"model",
",",
"env",
"=",
"env",
",",
"options",
"=",
"process_data",
".",
"options",
")",
"pipeline",
".",
"hooks",
"=",
"process_data",
".",
"hooks",
"result",
"=",
"pipeline",
".",
"process",
"(",
"process_data",
".",
"pipeline",
")",
"output",
"+=",
"result",
"[",
"'output'",
"]",
"if",
"not",
"result",
"[",
"'success'",
"]",
":",
"return",
"{",
"'success'",
":",
"False",
",",
"'output'",
":",
"output",
"}",
"return",
"{",
"'success'",
":",
"True",
",",
"'output'",
":",
"output",
"}"
] | Running pipelines one after the other.
Returns
dict: with two fields: success True/False and captured output (list of str). | [
"Running",
"pipelines",
"one",
"after",
"the",
"other",
"."
] | 04ca18c4e95e4349532bb45b768206393e1f2c13 | https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/matrix.py#L136-L157 | train |
Nachtfeuer/pipeline | spline/matrix.py | Matrix.run_matrix_in_parallel | def run_matrix_in_parallel(self, process_data):
"""Running pipelines in parallel."""
worker_data = [{'matrix': entry, 'pipeline': process_data.pipeline,
'model': process_data.model, 'options': process_data.options,
'hooks': process_data.hooks} for entry in self.matrix
if Matrix.can_process_matrix(entry, process_data.options.matrix_tags)]
output = []
success = True
with closing(multiprocessing.Pool(multiprocessing.cpu_count())) as pool:
for result in pool.map(matrix_worker, worker_data):
output += result['output']
if not result['success']:
success = False
return {'success': success, 'output': output} | python | def run_matrix_in_parallel(self, process_data):
"""Running pipelines in parallel."""
worker_data = [{'matrix': entry, 'pipeline': process_data.pipeline,
'model': process_data.model, 'options': process_data.options,
'hooks': process_data.hooks} for entry in self.matrix
if Matrix.can_process_matrix(entry, process_data.options.matrix_tags)]
output = []
success = True
with closing(multiprocessing.Pool(multiprocessing.cpu_count())) as pool:
for result in pool.map(matrix_worker, worker_data):
output += result['output']
if not result['success']:
success = False
return {'success': success, 'output': output} | [
"def",
"run_matrix_in_parallel",
"(",
"self",
",",
"process_data",
")",
":",
"worker_data",
"=",
"[",
"{",
"'matrix'",
":",
"entry",
",",
"'pipeline'",
":",
"process_data",
".",
"pipeline",
",",
"'model'",
":",
"process_data",
".",
"model",
",",
"'options'",
":",
"process_data",
".",
"options",
",",
"'hooks'",
":",
"process_data",
".",
"hooks",
"}",
"for",
"entry",
"in",
"self",
".",
"matrix",
"if",
"Matrix",
".",
"can_process_matrix",
"(",
"entry",
",",
"process_data",
".",
"options",
".",
"matrix_tags",
")",
"]",
"output",
"=",
"[",
"]",
"success",
"=",
"True",
"with",
"closing",
"(",
"multiprocessing",
".",
"Pool",
"(",
"multiprocessing",
".",
"cpu_count",
"(",
")",
")",
")",
"as",
"pool",
":",
"for",
"result",
"in",
"pool",
".",
"map",
"(",
"matrix_worker",
",",
"worker_data",
")",
":",
"output",
"+=",
"result",
"[",
"'output'",
"]",
"if",
"not",
"result",
"[",
"'success'",
"]",
":",
"success",
"=",
"False",
"return",
"{",
"'success'",
":",
"success",
",",
"'output'",
":",
"output",
"}"
] | Running pipelines in parallel. | [
"Running",
"pipelines",
"in",
"parallel",
"."
] | 04ca18c4e95e4349532bb45b768206393e1f2c13 | https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/matrix.py#L159-L172 | train |
Nachtfeuer/pipeline | spline/matrix.py | Matrix.process | def process(self, process_data):
"""Process the pipeline per matrix item."""
if self.parallel and not process_data.options.dry_run:
return self.run_matrix_in_parallel(process_data)
return self.run_matrix_ordered(process_data) | python | def process(self, process_data):
"""Process the pipeline per matrix item."""
if self.parallel and not process_data.options.dry_run:
return self.run_matrix_in_parallel(process_data)
return self.run_matrix_ordered(process_data) | [
"def",
"process",
"(",
"self",
",",
"process_data",
")",
":",
"if",
"self",
".",
"parallel",
"and",
"not",
"process_data",
".",
"options",
".",
"dry_run",
":",
"return",
"self",
".",
"run_matrix_in_parallel",
"(",
"process_data",
")",
"return",
"self",
".",
"run_matrix_ordered",
"(",
"process_data",
")"
] | Process the pipeline per matrix item. | [
"Process",
"the",
"pipeline",
"per",
"matrix",
"item",
"."
] | 04ca18c4e95e4349532bb45b768206393e1f2c13 | https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/matrix.py#L174-L178 | train |
aartur/mschematool | mschematool/core.py | _sqlfile_to_statements | def _sqlfile_to_statements(sql):
"""
Takes a SQL string containing 0 or more statements and returns a
list of individual statements as strings. Comments and
empty statements are ignored.
"""
statements = (sqlparse.format(stmt, strip_comments=True).strip() for stmt in sqlparse.split(sql))
return [stmt for stmt in statements if stmt] | python | def _sqlfile_to_statements(sql):
"""
Takes a SQL string containing 0 or more statements and returns a
list of individual statements as strings. Comments and
empty statements are ignored.
"""
statements = (sqlparse.format(stmt, strip_comments=True).strip() for stmt in sqlparse.split(sql))
return [stmt for stmt in statements if stmt] | [
"def",
"_sqlfile_to_statements",
"(",
"sql",
")",
":",
"statements",
"=",
"(",
"sqlparse",
".",
"format",
"(",
"stmt",
",",
"strip_comments",
"=",
"True",
")",
".",
"strip",
"(",
")",
"for",
"stmt",
"in",
"sqlparse",
".",
"split",
"(",
"sql",
")",
")",
"return",
"[",
"stmt",
"for",
"stmt",
"in",
"statements",
"if",
"stmt",
"]"
] | Takes a SQL string containing 0 or more statements and returns a
list of individual statements as strings. Comments and
empty statements are ignored. | [
"Takes",
"a",
"SQL",
"string",
"containing",
"0",
"or",
"more",
"statements",
"and",
"returns",
"a",
"list",
"of",
"individual",
"statements",
"as",
"strings",
".",
"Comments",
"and",
"empty",
"statements",
"are",
"ignored",
"."
] | 57ec9541f80b44890294126eab92ce243c8833c4 | https://github.com/aartur/mschematool/blob/57ec9541f80b44890294126eab92ce243c8833c4/mschematool/core.py#L94-L101 | train |
aartur/mschematool | mschematool/core.py | MigrationsRepository.generate_migration_name | def generate_migration_name(self, name, suffix):
"""Returns a name of a new migration. It will usually be a filename with
a valid and unique name.
:param name: human-readable name of a migration
:param suffix: file suffix (extension) - eg. 'sql'
"""
return os.path.join(self.dir,
'm{datestr}_{name}.{suffix}'.format(
datestr=datetime.datetime.utcnow().strftime('%Y%m%d%H%M%S'),
name=name.replace(' ', '_'),
suffix=suffix)) | python | def generate_migration_name(self, name, suffix):
"""Returns a name of a new migration. It will usually be a filename with
a valid and unique name.
:param name: human-readable name of a migration
:param suffix: file suffix (extension) - eg. 'sql'
"""
return os.path.join(self.dir,
'm{datestr}_{name}.{suffix}'.format(
datestr=datetime.datetime.utcnow().strftime('%Y%m%d%H%M%S'),
name=name.replace(' ', '_'),
suffix=suffix)) | [
"def",
"generate_migration_name",
"(",
"self",
",",
"name",
",",
"suffix",
")",
":",
"return",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"dir",
",",
"'m{datestr}_{name}.{suffix}'",
".",
"format",
"(",
"datestr",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
".",
"strftime",
"(",
"'%Y%m%d%H%M%S'",
")",
",",
"name",
"=",
"name",
".",
"replace",
"(",
"' '",
",",
"'_'",
")",
",",
"suffix",
"=",
"suffix",
")",
")"
] | Returns a name of a new migration. It will usually be a filename with
a valid and unique name.
:param name: human-readable name of a migration
:param suffix: file suffix (extension) - eg. 'sql' | [
"Returns",
"a",
"name",
"of",
"a",
"new",
"migration",
".",
"It",
"will",
"usually",
"be",
"a",
"filename",
"with",
"a",
"valid",
"and",
"unique",
"name",
"."
] | 57ec9541f80b44890294126eab92ce243c8833c4 | https://github.com/aartur/mschematool/blob/57ec9541f80b44890294126eab92ce243c8833c4/mschematool/core.py#L118-L129 | train |
aartur/mschematool | mschematool/core.py | MigrationsExecutor._call_migrate | def _call_migrate(self, module, connection_param):
"""Subclasses should call this method instead of `module.migrate` directly,
to support `db_config` optional argument.
"""
args = [connection_param]
spec = inspect.getargspec(module.migrate)
if len(spec.args) == 2:
args.append(self.db_config)
return module.migrate(*args) | python | def _call_migrate(self, module, connection_param):
"""Subclasses should call this method instead of `module.migrate` directly,
to support `db_config` optional argument.
"""
args = [connection_param]
spec = inspect.getargspec(module.migrate)
if len(spec.args) == 2:
args.append(self.db_config)
return module.migrate(*args) | [
"def",
"_call_migrate",
"(",
"self",
",",
"module",
",",
"connection_param",
")",
":",
"args",
"=",
"[",
"connection_param",
"]",
"spec",
"=",
"inspect",
".",
"getargspec",
"(",
"module",
".",
"migrate",
")",
"if",
"len",
"(",
"spec",
".",
"args",
")",
"==",
"2",
":",
"args",
".",
"append",
"(",
"self",
".",
"db_config",
")",
"return",
"module",
".",
"migrate",
"(",
"*",
"args",
")"
] | Subclasses should call this method instead of `module.migrate` directly,
to support `db_config` optional argument. | [
"Subclasses",
"should",
"call",
"this",
"method",
"instead",
"of",
"module",
".",
"migrate",
"directly",
"to",
"support",
"db_config",
"optional",
"argument",
"."
] | 57ec9541f80b44890294126eab92ce243c8833c4 | https://github.com/aartur/mschematool/blob/57ec9541f80b44890294126eab92ce243c8833c4/mschematool/core.py#L232-L240 | train |
acutesoftware/AIKIF | aikif/dataTools/cls_data.py | Data._identify_datatype | def _identify_datatype(self, input_data):
"""
uses the input data, which may be a string, list, number
or file to work out how to load the data (this can be
overridden by passing the data_type on the command line
"""
if isinstance(input_data, (int, float)) :
self.data_type = 'number'
elif isinstance(input_data, (list)): #, set
self.data_type = 'list'
elif isinstance(input_data, dict):
self.data_type = 'dict'
elif type(input_data) is str:
if self.input_data[0:4] == 'http':
self.data_type = 'url'
elif os.path.exists(input_data):
self.data_type = 'file'
else:
self.data_type = 'str'
lg.record_result('_identify_datatype', self.name + ' is ' + self.data_type) | python | def _identify_datatype(self, input_data):
"""
uses the input data, which may be a string, list, number
or file to work out how to load the data (this can be
overridden by passing the data_type on the command line
"""
if isinstance(input_data, (int, float)) :
self.data_type = 'number'
elif isinstance(input_data, (list)): #, set
self.data_type = 'list'
elif isinstance(input_data, dict):
self.data_type = 'dict'
elif type(input_data) is str:
if self.input_data[0:4] == 'http':
self.data_type = 'url'
elif os.path.exists(input_data):
self.data_type = 'file'
else:
self.data_type = 'str'
lg.record_result('_identify_datatype', self.name + ' is ' + self.data_type) | [
"def",
"_identify_datatype",
"(",
"self",
",",
"input_data",
")",
":",
"if",
"isinstance",
"(",
"input_data",
",",
"(",
"int",
",",
"float",
")",
")",
":",
"self",
".",
"data_type",
"=",
"'number'",
"elif",
"isinstance",
"(",
"input_data",
",",
"(",
"list",
")",
")",
":",
"self",
".",
"data_type",
"=",
"'list'",
"elif",
"isinstance",
"(",
"input_data",
",",
"dict",
")",
":",
"self",
".",
"data_type",
"=",
"'dict'",
"elif",
"type",
"(",
"input_data",
")",
"is",
"str",
":",
"if",
"self",
".",
"input_data",
"[",
"0",
":",
"4",
"]",
"==",
"'http'",
":",
"self",
".",
"data_type",
"=",
"'url'",
"elif",
"os",
".",
"path",
".",
"exists",
"(",
"input_data",
")",
":",
"self",
".",
"data_type",
"=",
"'file'",
"else",
":",
"self",
".",
"data_type",
"=",
"'str'",
"lg",
".",
"record_result",
"(",
"'_identify_datatype'",
",",
"self",
".",
"name",
"+",
"' is '",
"+",
"self",
".",
"data_type",
")"
] | uses the input data, which may be a string, list, number
or file to work out how to load the data (this can be
overridden by passing the data_type on the command line | [
"uses",
"the",
"input",
"data",
"which",
"may",
"be",
"a",
"string",
"list",
"number",
"or",
"file",
"to",
"work",
"out",
"how",
"to",
"load",
"the",
"data",
"(",
"this",
"can",
"be",
"overridden",
"by",
"passing",
"the",
"data_type",
"on",
"the",
"command",
"line"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/dataTools/cls_data.py#L51-L71 | train |
acutesoftware/AIKIF | aikif/dataTools/cls_data.py | Data._calc_size_stats | def _calc_size_stats(self):
"""
get the size in bytes and num records of the content
"""
self.total_records = 0
self.total_length = 0
self.total_nodes = 0
if type(self.content['data']) is dict:
self.total_length += len(str(self.content['data']))
self.total_records += 1
self.total_nodes = sum(len(x) for x in self.content['data'].values())
elif hasattr(self.content['data'], '__iter__') and type(self.content['data']) is not str:
self._get_size_recursive(self.content['data'])
else:
self.total_records += 1
self.total_length += len(str(self.content['data']))
return str(self.total_records) + ' records [or ' + str(self.total_nodes) + ' nodes], taking ' + str(self.total_length) + ' bytes' | python | def _calc_size_stats(self):
"""
get the size in bytes and num records of the content
"""
self.total_records = 0
self.total_length = 0
self.total_nodes = 0
if type(self.content['data']) is dict:
self.total_length += len(str(self.content['data']))
self.total_records += 1
self.total_nodes = sum(len(x) for x in self.content['data'].values())
elif hasattr(self.content['data'], '__iter__') and type(self.content['data']) is not str:
self._get_size_recursive(self.content['data'])
else:
self.total_records += 1
self.total_length += len(str(self.content['data']))
return str(self.total_records) + ' records [or ' + str(self.total_nodes) + ' nodes], taking ' + str(self.total_length) + ' bytes' | [
"def",
"_calc_size_stats",
"(",
"self",
")",
":",
"self",
".",
"total_records",
"=",
"0",
"self",
".",
"total_length",
"=",
"0",
"self",
".",
"total_nodes",
"=",
"0",
"if",
"type",
"(",
"self",
".",
"content",
"[",
"'data'",
"]",
")",
"is",
"dict",
":",
"self",
".",
"total_length",
"+=",
"len",
"(",
"str",
"(",
"self",
".",
"content",
"[",
"'data'",
"]",
")",
")",
"self",
".",
"total_records",
"+=",
"1",
"self",
".",
"total_nodes",
"=",
"sum",
"(",
"len",
"(",
"x",
")",
"for",
"x",
"in",
"self",
".",
"content",
"[",
"'data'",
"]",
".",
"values",
"(",
")",
")",
"elif",
"hasattr",
"(",
"self",
".",
"content",
"[",
"'data'",
"]",
",",
"'__iter__'",
")",
"and",
"type",
"(",
"self",
".",
"content",
"[",
"'data'",
"]",
")",
"is",
"not",
"str",
":",
"self",
".",
"_get_size_recursive",
"(",
"self",
".",
"content",
"[",
"'data'",
"]",
")",
"else",
":",
"self",
".",
"total_records",
"+=",
"1",
"self",
".",
"total_length",
"+=",
"len",
"(",
"str",
"(",
"self",
".",
"content",
"[",
"'data'",
"]",
")",
")",
"return",
"str",
"(",
"self",
".",
"total_records",
")",
"+",
"' records [or '",
"+",
"str",
"(",
"self",
".",
"total_nodes",
")",
"+",
"' nodes], taking '",
"+",
"str",
"(",
"self",
".",
"total_length",
")",
"+",
"' bytes'"
] | get the size in bytes and num records of the content | [
"get",
"the",
"size",
"in",
"bytes",
"and",
"num",
"records",
"of",
"the",
"content"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/dataTools/cls_data.py#L120-L138 | train |
acutesoftware/AIKIF | aikif/dataTools/cls_data.py | Data._get_size_recursive | def _get_size_recursive(self, dat):
"""
recursively walk through a data set or json file
to get the total number of nodes
"""
self.total_records += 1
#self.total_nodes += 1
for rec in dat:
if hasattr(rec, '__iter__') and type(rec) is not str:
self._get_size_recursive(rec)
else:
self.total_nodes += 1
self.total_length += len(str(rec)) | python | def _get_size_recursive(self, dat):
"""
recursively walk through a data set or json file
to get the total number of nodes
"""
self.total_records += 1
#self.total_nodes += 1
for rec in dat:
if hasattr(rec, '__iter__') and type(rec) is not str:
self._get_size_recursive(rec)
else:
self.total_nodes += 1
self.total_length += len(str(rec)) | [
"def",
"_get_size_recursive",
"(",
"self",
",",
"dat",
")",
":",
"self",
".",
"total_records",
"+=",
"1",
"for",
"rec",
"in",
"dat",
":",
"if",
"hasattr",
"(",
"rec",
",",
"'__iter__'",
")",
"and",
"type",
"(",
"rec",
")",
"is",
"not",
"str",
":",
"self",
".",
"_get_size_recursive",
"(",
"rec",
")",
"else",
":",
"self",
".",
"total_nodes",
"+=",
"1",
"self",
".",
"total_length",
"+=",
"len",
"(",
"str",
"(",
"rec",
")",
")"
] | recursively walk through a data set or json file
to get the total number of nodes | [
"recursively",
"walk",
"through",
"a",
"data",
"set",
"or",
"json",
"file",
"to",
"get",
"the",
"total",
"number",
"of",
"nodes"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/dataTools/cls_data.py#L140-L152 | train |
staticdev/django-pagination-bootstrap | pagination_bootstrap/version.py | _make_version | def _make_version(major, minor, micro, releaselevel, serial):
"""Create a readable version string from version_info tuple components."""
assert releaselevel in ['alpha', 'beta', 'candidate', 'final']
version = "%d.%d" % (major, minor)
if micro:
version += ".%d" % (micro,)
if releaselevel != 'final':
short = {'alpha': 'a', 'beta': 'b', 'candidate': 'rc'}[releaselevel]
version += "%s%d" % (short, serial)
return version | python | def _make_version(major, minor, micro, releaselevel, serial):
"""Create a readable version string from version_info tuple components."""
assert releaselevel in ['alpha', 'beta', 'candidate', 'final']
version = "%d.%d" % (major, minor)
if micro:
version += ".%d" % (micro,)
if releaselevel != 'final':
short = {'alpha': 'a', 'beta': 'b', 'candidate': 'rc'}[releaselevel]
version += "%s%d" % (short, serial)
return version | [
"def",
"_make_version",
"(",
"major",
",",
"minor",
",",
"micro",
",",
"releaselevel",
",",
"serial",
")",
":",
"assert",
"releaselevel",
"in",
"[",
"'alpha'",
",",
"'beta'",
",",
"'candidate'",
",",
"'final'",
"]",
"version",
"=",
"\"%d.%d\"",
"%",
"(",
"major",
",",
"minor",
")",
"if",
"micro",
":",
"version",
"+=",
"\".%d\"",
"%",
"(",
"micro",
",",
")",
"if",
"releaselevel",
"!=",
"'final'",
":",
"short",
"=",
"{",
"'alpha'",
":",
"'a'",
",",
"'beta'",
":",
"'b'",
",",
"'candidate'",
":",
"'rc'",
"}",
"[",
"releaselevel",
"]",
"version",
"+=",
"\"%s%d\"",
"%",
"(",
"short",
",",
"serial",
")",
"return",
"version"
] | Create a readable version string from version_info tuple components. | [
"Create",
"a",
"readable",
"version",
"string",
"from",
"version_info",
"tuple",
"components",
"."
] | b4bf8352a364b223babbc5f33e14ecabd82c0886 | https://github.com/staticdev/django-pagination-bootstrap/blob/b4bf8352a364b223babbc5f33e14ecabd82c0886/pagination_bootstrap/version.py#L3-L12 | train |
staticdev/django-pagination-bootstrap | pagination_bootstrap/version.py | _make_url | def _make_url(major, minor, micro, releaselevel, serial):
"""Make the URL people should start at for this version of coverage.py."""
url = "https://django-pagination-bootstrap.readthedocs.io"
if releaselevel != 'final':
# For pre-releases, use a version-specific URL.
url += "/en/" + _make_version(major, minor, micro, releaselevel, serial)
return url | python | def _make_url(major, minor, micro, releaselevel, serial):
"""Make the URL people should start at for this version of coverage.py."""
url = "https://django-pagination-bootstrap.readthedocs.io"
if releaselevel != 'final':
# For pre-releases, use a version-specific URL.
url += "/en/" + _make_version(major, minor, micro, releaselevel, serial)
return url | [
"def",
"_make_url",
"(",
"major",
",",
"minor",
",",
"micro",
",",
"releaselevel",
",",
"serial",
")",
":",
"url",
"=",
"\"https://django-pagination-bootstrap.readthedocs.io\"",
"if",
"releaselevel",
"!=",
"'final'",
":",
"url",
"+=",
"\"/en/\"",
"+",
"_make_version",
"(",
"major",
",",
"minor",
",",
"micro",
",",
"releaselevel",
",",
"serial",
")",
"return",
"url"
] | Make the URL people should start at for this version of coverage.py. | [
"Make",
"the",
"URL",
"people",
"should",
"start",
"at",
"for",
"this",
"version",
"of",
"coverage",
".",
"py",
"."
] | b4bf8352a364b223babbc5f33e14ecabd82c0886 | https://github.com/staticdev/django-pagination-bootstrap/blob/b4bf8352a364b223babbc5f33e14ecabd82c0886/pagination_bootstrap/version.py#L15-L21 | train |
acutesoftware/AIKIF | aikif/lib/cls_filelist.py | FileList.get_list_of_paths | def get_list_of_paths(self):
"""
return a list of unique paths in the file list
"""
all_paths = []
for p in self.fl_metadata:
try:
all_paths.append(p['path'])
except:
try:
print('cls_filelist - no key path, ignoring folder ' + str(p))
except:
print('cls_filelist - no key path, ignoring odd character folder')
return list(set(all_paths)) | python | def get_list_of_paths(self):
"""
return a list of unique paths in the file list
"""
all_paths = []
for p in self.fl_metadata:
try:
all_paths.append(p['path'])
except:
try:
print('cls_filelist - no key path, ignoring folder ' + str(p))
except:
print('cls_filelist - no key path, ignoring odd character folder')
return list(set(all_paths)) | [
"def",
"get_list_of_paths",
"(",
"self",
")",
":",
"all_paths",
"=",
"[",
"]",
"for",
"p",
"in",
"self",
".",
"fl_metadata",
":",
"try",
":",
"all_paths",
".",
"append",
"(",
"p",
"[",
"'path'",
"]",
")",
"except",
":",
"try",
":",
"print",
"(",
"'cls_filelist - no key path, ignoring folder '",
"+",
"str",
"(",
"p",
")",
")",
"except",
":",
"print",
"(",
"'cls_filelist - no key path, ignoring odd character folder'",
")",
"return",
"list",
"(",
"set",
"(",
"all_paths",
")",
")"
] | return a list of unique paths in the file list | [
"return",
"a",
"list",
"of",
"unique",
"paths",
"in",
"the",
"file",
"list"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/lib/cls_filelist.py#L47-L61 | train |
acutesoftware/AIKIF | aikif/lib/cls_filelist.py | FileList.add_file_metadata | def add_file_metadata(self, fname):
"""
collects the files metadata - note that this will fail
with strange errors if network connection drops out to
shared folder, but it is better to stop the program
rather than do a try except otherwise you will get an
incomplete set of files.
"""
file_dict = {}
file_dict["fullfilename"] = fname
try:
file_dict["name"] = os.path.basename(fname)
file_dict["date"] = self.GetDateAsString(fname)
file_dict["size"] = os.path.getsize(fname)
file_dict["path"] = os.path.dirname(fname)
except IOError:
print('Error getting metadata for file')
self.fl_metadata.append(file_dict) | python | def add_file_metadata(self, fname):
"""
collects the files metadata - note that this will fail
with strange errors if network connection drops out to
shared folder, but it is better to stop the program
rather than do a try except otherwise you will get an
incomplete set of files.
"""
file_dict = {}
file_dict["fullfilename"] = fname
try:
file_dict["name"] = os.path.basename(fname)
file_dict["date"] = self.GetDateAsString(fname)
file_dict["size"] = os.path.getsize(fname)
file_dict["path"] = os.path.dirname(fname)
except IOError:
print('Error getting metadata for file')
self.fl_metadata.append(file_dict) | [
"def",
"add_file_metadata",
"(",
"self",
",",
"fname",
")",
":",
"file_dict",
"=",
"{",
"}",
"file_dict",
"[",
"\"fullfilename\"",
"]",
"=",
"fname",
"try",
":",
"file_dict",
"[",
"\"name\"",
"]",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"fname",
")",
"file_dict",
"[",
"\"date\"",
"]",
"=",
"self",
".",
"GetDateAsString",
"(",
"fname",
")",
"file_dict",
"[",
"\"size\"",
"]",
"=",
"os",
".",
"path",
".",
"getsize",
"(",
"fname",
")",
"file_dict",
"[",
"\"path\"",
"]",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"fname",
")",
"except",
"IOError",
":",
"print",
"(",
"'Error getting metadata for file'",
")",
"self",
".",
"fl_metadata",
".",
"append",
"(",
"file_dict",
")"
] | collects the files metadata - note that this will fail
with strange errors if network connection drops out to
shared folder, but it is better to stop the program
rather than do a try except otherwise you will get an
incomplete set of files. | [
"collects",
"the",
"files",
"metadata",
"-",
"note",
"that",
"this",
"will",
"fail",
"with",
"strange",
"errors",
"if",
"network",
"connection",
"drops",
"out",
"to",
"shared",
"folder",
"but",
"it",
"is",
"better",
"to",
"stop",
"the",
"program",
"rather",
"than",
"do",
"a",
"try",
"except",
"otherwise",
"you",
"will",
"get",
"an",
"incomplete",
"set",
"of",
"files",
"."
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/lib/cls_filelist.py#L100-L119 | train |
acutesoftware/AIKIF | aikif/lib/cls_filelist.py | FileList.print_file_details_as_csv | def print_file_details_as_csv(self, fname, col_headers):
""" saves as csv format """
line = ''
qu = '"'
d = ','
for fld in col_headers:
if fld == "fullfilename":
line = line + qu + fname + qu + d
if fld == "name":
line = line + qu + os.path.basename(fname) + qu + d
if fld == "date":
line = line + qu + self.GetDateAsString(fname) + qu + d
if fld == "size":
line = line + qu + self.get_size_as_string(fname) + qu + d
if fld == "path":
try:
line = line + qu + os.path.dirname(fname) + qu + d
except IOError:
line = line + qu + 'ERROR_PATH' + qu + d
return line | python | def print_file_details_as_csv(self, fname, col_headers):
""" saves as csv format """
line = ''
qu = '"'
d = ','
for fld in col_headers:
if fld == "fullfilename":
line = line + qu + fname + qu + d
if fld == "name":
line = line + qu + os.path.basename(fname) + qu + d
if fld == "date":
line = line + qu + self.GetDateAsString(fname) + qu + d
if fld == "size":
line = line + qu + self.get_size_as_string(fname) + qu + d
if fld == "path":
try:
line = line + qu + os.path.dirname(fname) + qu + d
except IOError:
line = line + qu + 'ERROR_PATH' + qu + d
return line | [
"def",
"print_file_details_as_csv",
"(",
"self",
",",
"fname",
",",
"col_headers",
")",
":",
"line",
"=",
"''",
"qu",
"=",
"'\"'",
"d",
"=",
"','",
"for",
"fld",
"in",
"col_headers",
":",
"if",
"fld",
"==",
"\"fullfilename\"",
":",
"line",
"=",
"line",
"+",
"qu",
"+",
"fname",
"+",
"qu",
"+",
"d",
"if",
"fld",
"==",
"\"name\"",
":",
"line",
"=",
"line",
"+",
"qu",
"+",
"os",
".",
"path",
".",
"basename",
"(",
"fname",
")",
"+",
"qu",
"+",
"d",
"if",
"fld",
"==",
"\"date\"",
":",
"line",
"=",
"line",
"+",
"qu",
"+",
"self",
".",
"GetDateAsString",
"(",
"fname",
")",
"+",
"qu",
"+",
"d",
"if",
"fld",
"==",
"\"size\"",
":",
"line",
"=",
"line",
"+",
"qu",
"+",
"self",
".",
"get_size_as_string",
"(",
"fname",
")",
"+",
"qu",
"+",
"d",
"if",
"fld",
"==",
"\"path\"",
":",
"try",
":",
"line",
"=",
"line",
"+",
"qu",
"+",
"os",
".",
"path",
".",
"dirname",
"(",
"fname",
")",
"+",
"qu",
"+",
"d",
"except",
"IOError",
":",
"line",
"=",
"line",
"+",
"qu",
"+",
"'ERROR_PATH'",
"+",
"qu",
"+",
"d",
"return",
"line"
] | saves as csv format | [
"saves",
"as",
"csv",
"format"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/lib/cls_filelist.py#L145-L165 | train |
acutesoftware/AIKIF | aikif/lib/cls_filelist.py | FileList.save_filelist | def save_filelist(self, opFile, opFormat, delim=',', qu='"'):
"""
uses a List of files and collects meta data on them and saves
to an text file as a list or with metadata depending on opFormat.
"""
op_folder = os.path.dirname(opFile)
if op_folder is not None: # short filename passed
if not os.path.exists(op_folder):
os.makedirs(op_folder)
with open(opFile,'w') as fout:
fout.write("fullFilename" + delim)
for colHeading in opFormat:
fout.write(colHeading + delim)
fout.write('\n')
for f in self.filelist:
line = qu + f + qu + delim
try:
for fld in opFormat:
if fld == "name":
line = line + qu + os.path.basename(f) + qu + delim
if fld == "date":
line = line + qu + self.GetDateAsString(f) + qu + delim
if fld == "size":
line = line + qu + str(os.path.getsize(f)) + qu + delim
if fld == "path":
line = line + qu + os.path.dirname(f) + qu + delim
except IOError:
line += '\n' # no metadata
try:
fout.write (str(line.encode('ascii', 'ignore').decode('utf-8')))
fout.write ('\n')
except IOError:
#print("Cant print line - cls_filelist line 304")
pass | python | def save_filelist(self, opFile, opFormat, delim=',', qu='"'):
"""
uses a List of files and collects meta data on them and saves
to an text file as a list or with metadata depending on opFormat.
"""
op_folder = os.path.dirname(opFile)
if op_folder is not None: # short filename passed
if not os.path.exists(op_folder):
os.makedirs(op_folder)
with open(opFile,'w') as fout:
fout.write("fullFilename" + delim)
for colHeading in opFormat:
fout.write(colHeading + delim)
fout.write('\n')
for f in self.filelist:
line = qu + f + qu + delim
try:
for fld in opFormat:
if fld == "name":
line = line + qu + os.path.basename(f) + qu + delim
if fld == "date":
line = line + qu + self.GetDateAsString(f) + qu + delim
if fld == "size":
line = line + qu + str(os.path.getsize(f)) + qu + delim
if fld == "path":
line = line + qu + os.path.dirname(f) + qu + delim
except IOError:
line += '\n' # no metadata
try:
fout.write (str(line.encode('ascii', 'ignore').decode('utf-8')))
fout.write ('\n')
except IOError:
#print("Cant print line - cls_filelist line 304")
pass | [
"def",
"save_filelist",
"(",
"self",
",",
"opFile",
",",
"opFormat",
",",
"delim",
"=",
"','",
",",
"qu",
"=",
"'\"'",
")",
":",
"op_folder",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"opFile",
")",
"if",
"op_folder",
"is",
"not",
"None",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"op_folder",
")",
":",
"os",
".",
"makedirs",
"(",
"op_folder",
")",
"with",
"open",
"(",
"opFile",
",",
"'w'",
")",
"as",
"fout",
":",
"fout",
".",
"write",
"(",
"\"fullFilename\"",
"+",
"delim",
")",
"for",
"colHeading",
"in",
"opFormat",
":",
"fout",
".",
"write",
"(",
"colHeading",
"+",
"delim",
")",
"fout",
".",
"write",
"(",
"'\\n'",
")",
"for",
"f",
"in",
"self",
".",
"filelist",
":",
"line",
"=",
"qu",
"+",
"f",
"+",
"qu",
"+",
"delim",
"try",
":",
"for",
"fld",
"in",
"opFormat",
":",
"if",
"fld",
"==",
"\"name\"",
":",
"line",
"=",
"line",
"+",
"qu",
"+",
"os",
".",
"path",
".",
"basename",
"(",
"f",
")",
"+",
"qu",
"+",
"delim",
"if",
"fld",
"==",
"\"date\"",
":",
"line",
"=",
"line",
"+",
"qu",
"+",
"self",
".",
"GetDateAsString",
"(",
"f",
")",
"+",
"qu",
"+",
"delim",
"if",
"fld",
"==",
"\"size\"",
":",
"line",
"=",
"line",
"+",
"qu",
"+",
"str",
"(",
"os",
".",
"path",
".",
"getsize",
"(",
"f",
")",
")",
"+",
"qu",
"+",
"delim",
"if",
"fld",
"==",
"\"path\"",
":",
"line",
"=",
"line",
"+",
"qu",
"+",
"os",
".",
"path",
".",
"dirname",
"(",
"f",
")",
"+",
"qu",
"+",
"delim",
"except",
"IOError",
":",
"line",
"+=",
"'\\n'",
"try",
":",
"fout",
".",
"write",
"(",
"str",
"(",
"line",
".",
"encode",
"(",
"'ascii'",
",",
"'ignore'",
")",
".",
"decode",
"(",
"'utf-8'",
")",
")",
")",
"fout",
".",
"write",
"(",
"'\\n'",
")",
"except",
"IOError",
":",
"pass"
] | uses a List of files and collects meta data on them and saves
to an text file as a list or with metadata depending on opFormat. | [
"uses",
"a",
"List",
"of",
"files",
"and",
"collects",
"meta",
"data",
"on",
"them",
"and",
"saves",
"to",
"an",
"text",
"file",
"as",
"a",
"list",
"or",
"with",
"metadata",
"depending",
"on",
"opFormat",
"."
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/lib/cls_filelist.py#L192-L228 | train |
acutesoftware/AIKIF | aikif/dataTools/cls_dataset.py | DataSet.login | def login(self, schema, username, password):
"""
connect here - use the other classes cls_oracle, cls_mysql, etc
otherwise this has the credentials used to access a share folder
"""
self.schema = schema
self.username = username
self.password = password
self.connection = schema | python | def login(self, schema, username, password):
"""
connect here - use the other classes cls_oracle, cls_mysql, etc
otherwise this has the credentials used to access a share folder
"""
self.schema = schema
self.username = username
self.password = password
self.connection = schema | [
"def",
"login",
"(",
"self",
",",
"schema",
",",
"username",
",",
"password",
")",
":",
"self",
".",
"schema",
"=",
"schema",
"self",
".",
"username",
"=",
"username",
"self",
".",
"password",
"=",
"password",
"self",
".",
"connection",
"=",
"schema"
] | connect here - use the other classes cls_oracle, cls_mysql, etc
otherwise this has the credentials used to access a share folder | [
"connect",
"here",
"-",
"use",
"the",
"other",
"classes",
"cls_oracle",
"cls_mysql",
"etc",
"otherwise",
"this",
"has",
"the",
"credentials",
"used",
"to",
"access",
"a",
"share",
"folder"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/dataTools/cls_dataset.py#L34-L42 | train |
Phelimb/atlas | mykatlas/typing/typer/presence.py | GeneCollectionTyper.type | def type(self, sequence_coverage_collection,
min_gene_percent_covg_threshold=99):
"""Types a collection of genes returning the most likely gene version
in the collection with it's genotype"""
best_versions = self.get_best_version(
sequence_coverage_collection.values(),
min_gene_percent_covg_threshold)
return [self.presence_typer.type(best_version)
for best_version in best_versions] | python | def type(self, sequence_coverage_collection,
min_gene_percent_covg_threshold=99):
"""Types a collection of genes returning the most likely gene version
in the collection with it's genotype"""
best_versions = self.get_best_version(
sequence_coverage_collection.values(),
min_gene_percent_covg_threshold)
return [self.presence_typer.type(best_version)
for best_version in best_versions] | [
"def",
"type",
"(",
"self",
",",
"sequence_coverage_collection",
",",
"min_gene_percent_covg_threshold",
"=",
"99",
")",
":",
"best_versions",
"=",
"self",
".",
"get_best_version",
"(",
"sequence_coverage_collection",
".",
"values",
"(",
")",
",",
"min_gene_percent_covg_threshold",
")",
"return",
"[",
"self",
".",
"presence_typer",
".",
"type",
"(",
"best_version",
")",
"for",
"best_version",
"in",
"best_versions",
"]"
] | Types a collection of genes returning the most likely gene version
in the collection with it's genotype | [
"Types",
"a",
"collection",
"of",
"genes",
"returning",
"the",
"most",
"likely",
"gene",
"version",
"in",
"the",
"collection",
"with",
"it",
"s",
"genotype"
] | 02e85497bb5ac423d6452a10dca11964582ac4d7 | https://github.com/Phelimb/atlas/blob/02e85497bb5ac423d6452a10dca11964582ac4d7/mykatlas/typing/typer/presence.py#L146-L154 | train |
acutesoftware/AIKIF | aikif/programs.py | Programs.list_all_python_programs | def list_all_python_programs(self):
"""
collects a filelist of all .py programs
"""
self.tot_lines = 0
self.tot_bytes = 0
self.tot_files = 0
self.tot_loc = 0
self.lstPrograms = []
fl = mod_fl.FileList([self.fldr], ['*.py'], ["__pycache__", "/venv/", "/venv2/", ".git"])
for fip in fl.get_list():
if '__init__.py' not in fip:
self.add(fip, 'TODO - add comment')
f = mod_file.TextFile(fip)
self.tot_lines += f.count_lines_in_file()
self.tot_loc += f.count_lines_of_code()
self.tot_bytes += f.size
self.tot_files += 1
print('All Python Program Statistics')
print('Files = ', self.tot_files, ' Bytes = ', self.tot_bytes, ' Lines = ', self.tot_lines, ' Lines of Code = ', self.tot_loc) | python | def list_all_python_programs(self):
"""
collects a filelist of all .py programs
"""
self.tot_lines = 0
self.tot_bytes = 0
self.tot_files = 0
self.tot_loc = 0
self.lstPrograms = []
fl = mod_fl.FileList([self.fldr], ['*.py'], ["__pycache__", "/venv/", "/venv2/", ".git"])
for fip in fl.get_list():
if '__init__.py' not in fip:
self.add(fip, 'TODO - add comment')
f = mod_file.TextFile(fip)
self.tot_lines += f.count_lines_in_file()
self.tot_loc += f.count_lines_of_code()
self.tot_bytes += f.size
self.tot_files += 1
print('All Python Program Statistics')
print('Files = ', self.tot_files, ' Bytes = ', self.tot_bytes, ' Lines = ', self.tot_lines, ' Lines of Code = ', self.tot_loc) | [
"def",
"list_all_python_programs",
"(",
"self",
")",
":",
"self",
".",
"tot_lines",
"=",
"0",
"self",
".",
"tot_bytes",
"=",
"0",
"self",
".",
"tot_files",
"=",
"0",
"self",
".",
"tot_loc",
"=",
"0",
"self",
".",
"lstPrograms",
"=",
"[",
"]",
"fl",
"=",
"mod_fl",
".",
"FileList",
"(",
"[",
"self",
".",
"fldr",
"]",
",",
"[",
"'*.py'",
"]",
",",
"[",
"\"__pycache__\"",
",",
"\"/venv/\"",
",",
"\"/venv2/\"",
",",
"\".git\"",
"]",
")",
"for",
"fip",
"in",
"fl",
".",
"get_list",
"(",
")",
":",
"if",
"'__init__.py'",
"not",
"in",
"fip",
":",
"self",
".",
"add",
"(",
"fip",
",",
"'TODO - add comment'",
")",
"f",
"=",
"mod_file",
".",
"TextFile",
"(",
"fip",
")",
"self",
".",
"tot_lines",
"+=",
"f",
".",
"count_lines_in_file",
"(",
")",
"self",
".",
"tot_loc",
"+=",
"f",
".",
"count_lines_of_code",
"(",
")",
"self",
".",
"tot_bytes",
"+=",
"f",
".",
"size",
"self",
".",
"tot_files",
"+=",
"1",
"print",
"(",
"'All Python Program Statistics'",
")",
"print",
"(",
"'Files = '",
",",
"self",
".",
"tot_files",
",",
"' Bytes = '",
",",
"self",
".",
"tot_bytes",
",",
"' Lines = '",
",",
"self",
".",
"tot_lines",
",",
"' Lines of Code = '",
",",
"self",
".",
"tot_loc",
")"
] | collects a filelist of all .py programs | [
"collects",
"a",
"filelist",
"of",
"all",
".",
"py",
"programs"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/programs.py#L38-L58 | train |
acutesoftware/AIKIF | aikif/programs.py | Programs.save | def save(self, fname=''):
"""
Save the list of items to AIKIF core and optionally to local file fname
"""
if fname != '':
with open(fname, 'w') as f:
for i in self.lstPrograms:
f.write(self.get_file_info_line(i, ','))
# save to standard AIKIF structure
filemap = mod_filemap.FileMap([], [])
#location_fileList = filemap.get_full_filename(filemap.find_type('LOCATION'), filemap.find_ontology('FILE-PROGRAM')[0])
object_fileList = filemap.get_full_filename(filemap.find_type('OBJECT'), filemap.find_ontology('FILE-PROGRAM')[0])
print('object_fileList = ' + object_fileList + '\n')
if os.path.exists(object_fileList):
os.remove(object_fileList)
self.lstPrograms.sort()
try:
with open(object_fileList, 'a') as f:
f.write('\n'.join([i[0] for i in self.lstPrograms]))
except Exception as ex:
print('ERROR = cant write to object_filelist ' , object_fileList, str(ex)) | python | def save(self, fname=''):
"""
Save the list of items to AIKIF core and optionally to local file fname
"""
if fname != '':
with open(fname, 'w') as f:
for i in self.lstPrograms:
f.write(self.get_file_info_line(i, ','))
# save to standard AIKIF structure
filemap = mod_filemap.FileMap([], [])
#location_fileList = filemap.get_full_filename(filemap.find_type('LOCATION'), filemap.find_ontology('FILE-PROGRAM')[0])
object_fileList = filemap.get_full_filename(filemap.find_type('OBJECT'), filemap.find_ontology('FILE-PROGRAM')[0])
print('object_fileList = ' + object_fileList + '\n')
if os.path.exists(object_fileList):
os.remove(object_fileList)
self.lstPrograms.sort()
try:
with open(object_fileList, 'a') as f:
f.write('\n'.join([i[0] for i in self.lstPrograms]))
except Exception as ex:
print('ERROR = cant write to object_filelist ' , object_fileList, str(ex)) | [
"def",
"save",
"(",
"self",
",",
"fname",
"=",
"''",
")",
":",
"if",
"fname",
"!=",
"''",
":",
"with",
"open",
"(",
"fname",
",",
"'w'",
")",
"as",
"f",
":",
"for",
"i",
"in",
"self",
".",
"lstPrograms",
":",
"f",
".",
"write",
"(",
"self",
".",
"get_file_info_line",
"(",
"i",
",",
"','",
")",
")",
"filemap",
"=",
"mod_filemap",
".",
"FileMap",
"(",
"[",
"]",
",",
"[",
"]",
")",
"object_fileList",
"=",
"filemap",
".",
"get_full_filename",
"(",
"filemap",
".",
"find_type",
"(",
"'OBJECT'",
")",
",",
"filemap",
".",
"find_ontology",
"(",
"'FILE-PROGRAM'",
")",
"[",
"0",
"]",
")",
"print",
"(",
"'object_fileList = '",
"+",
"object_fileList",
"+",
"'\\n'",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"object_fileList",
")",
":",
"os",
".",
"remove",
"(",
"object_fileList",
")",
"self",
".",
"lstPrograms",
".",
"sort",
"(",
")",
"try",
":",
"with",
"open",
"(",
"object_fileList",
",",
"'a'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"'\\n'",
".",
"join",
"(",
"[",
"i",
"[",
"0",
"]",
"for",
"i",
"in",
"self",
".",
"lstPrograms",
"]",
")",
")",
"except",
"Exception",
"as",
"ex",
":",
"print",
"(",
"'ERROR = cant write to object_filelist '",
",",
"object_fileList",
",",
"str",
"(",
"ex",
")",
")"
] | Save the list of items to AIKIF core and optionally to local file fname | [
"Save",
"the",
"list",
"of",
"items",
"to",
"AIKIF",
"core",
"and",
"optionally",
"to",
"local",
"file",
"fname"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/programs.py#L88-L112 | train |
acutesoftware/AIKIF | aikif/programs.py | Programs.collect_program_info | def collect_program_info(self, fname):
"""
gets details on the program, size, date, list of functions
and produces a Markdown file for documentation
"""
md = '#AIKIF Technical details\n'
md += 'Autogenerated list of programs with comments and progress\n'
md += '\nFilename | Comment | Date | Size\n'
md += '--- | --- | --- | ---\n'
for i in self.lstPrograms:
md += self.get_file_info_line(i, ' | ')
# save the details an Markdown file
with open(fname, 'w') as f:
f.write(md) | python | def collect_program_info(self, fname):
"""
gets details on the program, size, date, list of functions
and produces a Markdown file for documentation
"""
md = '#AIKIF Technical details\n'
md += 'Autogenerated list of programs with comments and progress\n'
md += '\nFilename | Comment | Date | Size\n'
md += '--- | --- | --- | ---\n'
for i in self.lstPrograms:
md += self.get_file_info_line(i, ' | ')
# save the details an Markdown file
with open(fname, 'w') as f:
f.write(md) | [
"def",
"collect_program_info",
"(",
"self",
",",
"fname",
")",
":",
"md",
"=",
"'#AIKIF Technical details\\n'",
"md",
"+=",
"'Autogenerated list of programs with comments and progress\\n'",
"md",
"+=",
"'\\nFilename | Comment | Date | Size\\n'",
"md",
"+=",
"'--- | --- | --- | ---\\n'",
"for",
"i",
"in",
"self",
".",
"lstPrograms",
":",
"md",
"+=",
"self",
".",
"get_file_info_line",
"(",
"i",
",",
"' | '",
")",
"with",
"open",
"(",
"fname",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"md",
")"
] | gets details on the program, size, date, list of functions
and produces a Markdown file for documentation | [
"gets",
"details",
"on",
"the",
"program",
"size",
"date",
"list",
"of",
"functions",
"and",
"produces",
"a",
"Markdown",
"file",
"for",
"documentation"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/programs.py#L140-L154 | train |
mpg-age-bioinformatics/AGEpy | AGEpy/david.py | id_nameDAVID | def id_nameDAVID(df,GTF=None,name_id=None):
"""
Given a DAVIDenrich output it converts ensembl gene ids to genes names and adds this column to the output
:param df: a dataframe output from DAVIDenrich
:param GTF: a GTF dataframe from readGTF()
:param name_id: instead of a gtf dataframe a dataframe with the columns 'gene_name' and 'gene_id' can be given as input
:returns: a pandas dataframe with a gene name column added to it.
"""
if name_id is None:
gene_name=retrieve_GTF_field('gene_name',GTF)
gene_id=retrieve_GTF_field('gene_id', GTF)
GTF=pd.concat([gene_name,gene_id],axis=1)
else:
GTF=name_id.copy()
df['Gene_names']="genes"
terms=df['termName'].tolist()
enrichN=pd.DataFrame()
for term in terms:
tmp=df[df['termName']==term]
tmp=tmp.reset_index(drop=True)
ids=tmp.xs(0)['geneIds']
ids=pd.DataFrame(data=ids.split(", "))
ids.columns=['geneIds']
ids['geneIds']=ids['geneIds'].map(str.lower)
GTF['gene_id']=GTF['gene_id'].astype(str)
GTF['gene_id']=GTF['gene_id'].map(str.lower)
ids=pd.merge(ids, GTF, how='left', left_on='geneIds', right_on='gene_id')
names=ids['gene_name'].tolist()
names= ', '.join(names)
tmp["Gene_names"]=names
#tmp=tmp.replace(to_replace=tmp.xs(0)['Gene_names'], value=names)
enrichN=pd.concat([enrichN, tmp])
enrichN=enrichN.reset_index(drop=True)
gene_names=enrichN[['Gene_names']]
gpos=enrichN.columns.get_loc("geneIds")
enrichN=enrichN.drop(['Gene_names'],axis=1)
cols=enrichN.columns.tolist()
enrichN=pd.concat([enrichN[cols[:gpos+1]],gene_names,enrichN[cols[gpos+1:]]],axis=1)
return enrichN | python | def id_nameDAVID(df,GTF=None,name_id=None):
"""
Given a DAVIDenrich output it converts ensembl gene ids to genes names and adds this column to the output
:param df: a dataframe output from DAVIDenrich
:param GTF: a GTF dataframe from readGTF()
:param name_id: instead of a gtf dataframe a dataframe with the columns 'gene_name' and 'gene_id' can be given as input
:returns: a pandas dataframe with a gene name column added to it.
"""
if name_id is None:
gene_name=retrieve_GTF_field('gene_name',GTF)
gene_id=retrieve_GTF_field('gene_id', GTF)
GTF=pd.concat([gene_name,gene_id],axis=1)
else:
GTF=name_id.copy()
df['Gene_names']="genes"
terms=df['termName'].tolist()
enrichN=pd.DataFrame()
for term in terms:
tmp=df[df['termName']==term]
tmp=tmp.reset_index(drop=True)
ids=tmp.xs(0)['geneIds']
ids=pd.DataFrame(data=ids.split(", "))
ids.columns=['geneIds']
ids['geneIds']=ids['geneIds'].map(str.lower)
GTF['gene_id']=GTF['gene_id'].astype(str)
GTF['gene_id']=GTF['gene_id'].map(str.lower)
ids=pd.merge(ids, GTF, how='left', left_on='geneIds', right_on='gene_id')
names=ids['gene_name'].tolist()
names= ', '.join(names)
tmp["Gene_names"]=names
#tmp=tmp.replace(to_replace=tmp.xs(0)['Gene_names'], value=names)
enrichN=pd.concat([enrichN, tmp])
enrichN=enrichN.reset_index(drop=True)
gene_names=enrichN[['Gene_names']]
gpos=enrichN.columns.get_loc("geneIds")
enrichN=enrichN.drop(['Gene_names'],axis=1)
cols=enrichN.columns.tolist()
enrichN=pd.concat([enrichN[cols[:gpos+1]],gene_names,enrichN[cols[gpos+1:]]],axis=1)
return enrichN | [
"def",
"id_nameDAVID",
"(",
"df",
",",
"GTF",
"=",
"None",
",",
"name_id",
"=",
"None",
")",
":",
"if",
"name_id",
"is",
"None",
":",
"gene_name",
"=",
"retrieve_GTF_field",
"(",
"'gene_name'",
",",
"GTF",
")",
"gene_id",
"=",
"retrieve_GTF_field",
"(",
"'gene_id'",
",",
"GTF",
")",
"GTF",
"=",
"pd",
".",
"concat",
"(",
"[",
"gene_name",
",",
"gene_id",
"]",
",",
"axis",
"=",
"1",
")",
"else",
":",
"GTF",
"=",
"name_id",
".",
"copy",
"(",
")",
"df",
"[",
"'Gene_names'",
"]",
"=",
"\"genes\"",
"terms",
"=",
"df",
"[",
"'termName'",
"]",
".",
"tolist",
"(",
")",
"enrichN",
"=",
"pd",
".",
"DataFrame",
"(",
")",
"for",
"term",
"in",
"terms",
":",
"tmp",
"=",
"df",
"[",
"df",
"[",
"'termName'",
"]",
"==",
"term",
"]",
"tmp",
"=",
"tmp",
".",
"reset_index",
"(",
"drop",
"=",
"True",
")",
"ids",
"=",
"tmp",
".",
"xs",
"(",
"0",
")",
"[",
"'geneIds'",
"]",
"ids",
"=",
"pd",
".",
"DataFrame",
"(",
"data",
"=",
"ids",
".",
"split",
"(",
"\", \"",
")",
")",
"ids",
".",
"columns",
"=",
"[",
"'geneIds'",
"]",
"ids",
"[",
"'geneIds'",
"]",
"=",
"ids",
"[",
"'geneIds'",
"]",
".",
"map",
"(",
"str",
".",
"lower",
")",
"GTF",
"[",
"'gene_id'",
"]",
"=",
"GTF",
"[",
"'gene_id'",
"]",
".",
"astype",
"(",
"str",
")",
"GTF",
"[",
"'gene_id'",
"]",
"=",
"GTF",
"[",
"'gene_id'",
"]",
".",
"map",
"(",
"str",
".",
"lower",
")",
"ids",
"=",
"pd",
".",
"merge",
"(",
"ids",
",",
"GTF",
",",
"how",
"=",
"'left'",
",",
"left_on",
"=",
"'geneIds'",
",",
"right_on",
"=",
"'gene_id'",
")",
"names",
"=",
"ids",
"[",
"'gene_name'",
"]",
".",
"tolist",
"(",
")",
"names",
"=",
"', '",
".",
"join",
"(",
"names",
")",
"tmp",
"[",
"\"Gene_names\"",
"]",
"=",
"names",
"enrichN",
"=",
"pd",
".",
"concat",
"(",
"[",
"enrichN",
",",
"tmp",
"]",
")",
"enrichN",
"=",
"enrichN",
".",
"reset_index",
"(",
"drop",
"=",
"True",
")",
"gene_names",
"=",
"enrichN",
"[",
"[",
"'Gene_names'",
"]",
"]",
"gpos",
"=",
"enrichN",
".",
"columns",
".",
"get_loc",
"(",
"\"geneIds\"",
")",
"enrichN",
"=",
"enrichN",
".",
"drop",
"(",
"[",
"'Gene_names'",
"]",
",",
"axis",
"=",
"1",
")",
"cols",
"=",
"enrichN",
".",
"columns",
".",
"tolist",
"(",
")",
"enrichN",
"=",
"pd",
".",
"concat",
"(",
"[",
"enrichN",
"[",
"cols",
"[",
":",
"gpos",
"+",
"1",
"]",
"]",
",",
"gene_names",
",",
"enrichN",
"[",
"cols",
"[",
"gpos",
"+",
"1",
":",
"]",
"]",
"]",
",",
"axis",
"=",
"1",
")",
"return",
"enrichN"
] | Given a DAVIDenrich output it converts ensembl gene ids to genes names and adds this column to the output
:param df: a dataframe output from DAVIDenrich
:param GTF: a GTF dataframe from readGTF()
:param name_id: instead of a gtf dataframe a dataframe with the columns 'gene_name' and 'gene_id' can be given as input
:returns: a pandas dataframe with a gene name column added to it. | [
"Given",
"a",
"DAVIDenrich",
"output",
"it",
"converts",
"ensembl",
"gene",
"ids",
"to",
"genes",
"names",
"and",
"adds",
"this",
"column",
"to",
"the",
"output"
] | 887808a7a2c1504f39ce8d8cb36c15c1721cd29f | https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/david.py#L92-L134 | train |
mpg-age-bioinformatics/AGEpy | AGEpy/david.py | DAVIDgetGeneAttribute | def DAVIDgetGeneAttribute(x,df,refCol="ensembl_gene_id",fieldTOretrieve="gene_name"):
"""
Returns a list of gene names for given gene ids.
:param x: a string with the list of IDs separated by ', '
:param df: a dataframe with the reference column and a the column to retrieve
:param refCol: the header of the column containing the identifiers
:param fieldTOretrieve: the field to retrieve from parsedGTF eg. 'gene_name'
:returns: list of fieldTOretrieve separeted by ', ' in the same order as the given in x
"""
l=x.split(", ")
l=[ s.upper() for s in l ]
tmpdf=pd.DataFrame({refCol:l},index=range(len(l)))
df_fix=df[[refCol,fieldTOretrieve]].drop_duplicates()
df_fix[refCol]=df_fix[refCol].apply(lambda x: x.upper())
ids=pd.merge(tmpdf,df_fix,how="left",on=[refCol])
ids=ids[fieldTOretrieve].tolist()
ids=[ str(s) for s in ids ]
ids=", ".join(ids)
return ids | python | def DAVIDgetGeneAttribute(x,df,refCol="ensembl_gene_id",fieldTOretrieve="gene_name"):
"""
Returns a list of gene names for given gene ids.
:param x: a string with the list of IDs separated by ', '
:param df: a dataframe with the reference column and a the column to retrieve
:param refCol: the header of the column containing the identifiers
:param fieldTOretrieve: the field to retrieve from parsedGTF eg. 'gene_name'
:returns: list of fieldTOretrieve separeted by ', ' in the same order as the given in x
"""
l=x.split(", ")
l=[ s.upper() for s in l ]
tmpdf=pd.DataFrame({refCol:l},index=range(len(l)))
df_fix=df[[refCol,fieldTOretrieve]].drop_duplicates()
df_fix[refCol]=df_fix[refCol].apply(lambda x: x.upper())
ids=pd.merge(tmpdf,df_fix,how="left",on=[refCol])
ids=ids[fieldTOretrieve].tolist()
ids=[ str(s) for s in ids ]
ids=", ".join(ids)
return ids | [
"def",
"DAVIDgetGeneAttribute",
"(",
"x",
",",
"df",
",",
"refCol",
"=",
"\"ensembl_gene_id\"",
",",
"fieldTOretrieve",
"=",
"\"gene_name\"",
")",
":",
"l",
"=",
"x",
".",
"split",
"(",
"\", \"",
")",
"l",
"=",
"[",
"s",
".",
"upper",
"(",
")",
"for",
"s",
"in",
"l",
"]",
"tmpdf",
"=",
"pd",
".",
"DataFrame",
"(",
"{",
"refCol",
":",
"l",
"}",
",",
"index",
"=",
"range",
"(",
"len",
"(",
"l",
")",
")",
")",
"df_fix",
"=",
"df",
"[",
"[",
"refCol",
",",
"fieldTOretrieve",
"]",
"]",
".",
"drop_duplicates",
"(",
")",
"df_fix",
"[",
"refCol",
"]",
"=",
"df_fix",
"[",
"refCol",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"x",
".",
"upper",
"(",
")",
")",
"ids",
"=",
"pd",
".",
"merge",
"(",
"tmpdf",
",",
"df_fix",
",",
"how",
"=",
"\"left\"",
",",
"on",
"=",
"[",
"refCol",
"]",
")",
"ids",
"=",
"ids",
"[",
"fieldTOretrieve",
"]",
".",
"tolist",
"(",
")",
"ids",
"=",
"[",
"str",
"(",
"s",
")",
"for",
"s",
"in",
"ids",
"]",
"ids",
"=",
"\", \"",
".",
"join",
"(",
"ids",
")",
"return",
"ids"
] | Returns a list of gene names for given gene ids.
:param x: a string with the list of IDs separated by ', '
:param df: a dataframe with the reference column and a the column to retrieve
:param refCol: the header of the column containing the identifiers
:param fieldTOretrieve: the field to retrieve from parsedGTF eg. 'gene_name'
:returns: list of fieldTOretrieve separeted by ', ' in the same order as the given in x | [
"Returns",
"a",
"list",
"of",
"gene",
"names",
"for",
"given",
"gene",
"ids",
"."
] | 887808a7a2c1504f39ce8d8cb36c15c1721cd29f | https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/david.py#L136-L157 | train |
Nachtfeuer/pipeline | spline/tools/loc/application.py | main | def main(**options):
"""Spline loc tool."""
application = Application(**options)
# fails application when your defined threshold is higher than your ratio of com/loc.
if not application.run():
sys.exit(1)
return application | python | def main(**options):
"""Spline loc tool."""
application = Application(**options)
# fails application when your defined threshold is higher than your ratio of com/loc.
if not application.run():
sys.exit(1)
return application | [
"def",
"main",
"(",
"**",
"options",
")",
":",
"application",
"=",
"Application",
"(",
"**",
"options",
")",
"if",
"not",
"application",
".",
"run",
"(",
")",
":",
"sys",
".",
"exit",
"(",
"1",
")",
"return",
"application"
] | Spline loc tool. | [
"Spline",
"loc",
"tool",
"."
] | 04ca18c4e95e4349532bb45b768206393e1f2c13 | https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/loc/application.py#L170-L176 | train |
Nachtfeuer/pipeline | spline/tools/loc/application.py | Application.load_configuration | def load_configuration(self):
"""Loading configuration."""
filename = os.path.join(os.path.dirname(__file__), 'templates/spline-loc.yml.j2')
with open(filename) as handle:
return Adapter(safe_load(handle)).configuration | python | def load_configuration(self):
"""Loading configuration."""
filename = os.path.join(os.path.dirname(__file__), 'templates/spline-loc.yml.j2')
with open(filename) as handle:
return Adapter(safe_load(handle)).configuration | [
"def",
"load_configuration",
"(",
"self",
")",
":",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
",",
"'templates/spline-loc.yml.j2'",
")",
"with",
"open",
"(",
"filename",
")",
"as",
"handle",
":",
"return",
"Adapter",
"(",
"safe_load",
"(",
"handle",
")",
")",
".",
"configuration"
] | Loading configuration. | [
"Loading",
"configuration",
"."
] | 04ca18c4e95e4349532bb45b768206393e1f2c13 | https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/loc/application.py#L56-L60 | train |
Nachtfeuer/pipeline | spline/tools/loc/application.py | Application.ignore_path | def ignore_path(path):
"""
Verify whether to ignore a path.
Args:
path (str): path to check.
Returns:
bool: True when to ignore given path.
"""
ignore = False
for name in ['.tox', 'dist', 'build', 'node_modules', 'htmlcov']:
if path.find(name) >= 0:
ignore = True
break
return ignore | python | def ignore_path(path):
"""
Verify whether to ignore a path.
Args:
path (str): path to check.
Returns:
bool: True when to ignore given path.
"""
ignore = False
for name in ['.tox', 'dist', 'build', 'node_modules', 'htmlcov']:
if path.find(name) >= 0:
ignore = True
break
return ignore | [
"def",
"ignore_path",
"(",
"path",
")",
":",
"ignore",
"=",
"False",
"for",
"name",
"in",
"[",
"'.tox'",
",",
"'dist'",
",",
"'build'",
",",
"'node_modules'",
",",
"'htmlcov'",
"]",
":",
"if",
"path",
".",
"find",
"(",
"name",
")",
">=",
"0",
":",
"ignore",
"=",
"True",
"break",
"return",
"ignore"
] | Verify whether to ignore a path.
Args:
path (str): path to check.
Returns:
bool: True when to ignore given path. | [
"Verify",
"whether",
"to",
"ignore",
"a",
"path",
"."
] | 04ca18c4e95e4349532bb45b768206393e1f2c13 | https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/loc/application.py#L63-L78 | train |
Nachtfeuer/pipeline | spline/tools/loc/application.py | Application.walk_files_for | def walk_files_for(paths, supported_extensions):
"""
Iterating files for given extensions.
Args:
supported_extensions (list): supported file extentsion for which to check loc and com.
Returns:
str: yield each full path and filename found.
"""
for path in paths:
for root, _, files in os.walk(path):
if Application.ignore_path(root.replace(path, '')):
continue
for filename in files:
extension = os.path.splitext(filename)[1]
if extension in supported_extensions:
yield path, os.path.join(root, filename), extension | python | def walk_files_for(paths, supported_extensions):
"""
Iterating files for given extensions.
Args:
supported_extensions (list): supported file extentsion for which to check loc and com.
Returns:
str: yield each full path and filename found.
"""
for path in paths:
for root, _, files in os.walk(path):
if Application.ignore_path(root.replace(path, '')):
continue
for filename in files:
extension = os.path.splitext(filename)[1]
if extension in supported_extensions:
yield path, os.path.join(root, filename), extension | [
"def",
"walk_files_for",
"(",
"paths",
",",
"supported_extensions",
")",
":",
"for",
"path",
"in",
"paths",
":",
"for",
"root",
",",
"_",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"path",
")",
":",
"if",
"Application",
".",
"ignore_path",
"(",
"root",
".",
"replace",
"(",
"path",
",",
"''",
")",
")",
":",
"continue",
"for",
"filename",
"in",
"files",
":",
"extension",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"filename",
")",
"[",
"1",
"]",
"if",
"extension",
"in",
"supported_extensions",
":",
"yield",
"path",
",",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"filename",
")",
",",
"extension"
] | Iterating files for given extensions.
Args:
supported_extensions (list): supported file extentsion for which to check loc and com.
Returns:
str: yield each full path and filename found. | [
"Iterating",
"files",
"for",
"given",
"extensions",
"."
] | 04ca18c4e95e4349532bb45b768206393e1f2c13 | https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/loc/application.py#L81-L99 | train |
Nachtfeuer/pipeline | spline/tools/loc/application.py | Application.analyse | def analyse(self, path_and_filename, pattern):
"""
Find out lines of code and lines of comments.
Args:
path_and_filename (str): path and filename to parse for loc and com.
pattern (str): regex to search for line commens and block comments
Returns:
int, int: loc and com for given file.
"""
with open(path_and_filename) as handle:
content = handle.read()
loc = content.count('\n') + 1
com = 0
for match in re.findall(pattern, content, re.DOTALL):
com += match.count('\n') + 1
return max(0, loc - com), com | python | def analyse(self, path_and_filename, pattern):
"""
Find out lines of code and lines of comments.
Args:
path_and_filename (str): path and filename to parse for loc and com.
pattern (str): regex to search for line commens and block comments
Returns:
int, int: loc and com for given file.
"""
with open(path_and_filename) as handle:
content = handle.read()
loc = content.count('\n') + 1
com = 0
for match in re.findall(pattern, content, re.DOTALL):
com += match.count('\n') + 1
return max(0, loc - com), com | [
"def",
"analyse",
"(",
"self",
",",
"path_and_filename",
",",
"pattern",
")",
":",
"with",
"open",
"(",
"path_and_filename",
")",
"as",
"handle",
":",
"content",
"=",
"handle",
".",
"read",
"(",
")",
"loc",
"=",
"content",
".",
"count",
"(",
"'\\n'",
")",
"+",
"1",
"com",
"=",
"0",
"for",
"match",
"in",
"re",
".",
"findall",
"(",
"pattern",
",",
"content",
",",
"re",
".",
"DOTALL",
")",
":",
"com",
"+=",
"match",
".",
"count",
"(",
"'\\n'",
")",
"+",
"1",
"return",
"max",
"(",
"0",
",",
"loc",
"-",
"com",
")",
",",
"com"
] | Find out lines of code and lines of comments.
Args:
path_and_filename (str): path and filename to parse for loc and com.
pattern (str): regex to search for line commens and block comments
Returns:
int, int: loc and com for given file. | [
"Find",
"out",
"lines",
"of",
"code",
"and",
"lines",
"of",
"comments",
"."
] | 04ca18c4e95e4349532bb45b768206393e1f2c13 | https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/loc/application.py#L101-L119 | train |
mpg-age-bioinformatics/AGEpy | AGEpy/biom.py | datasetsBM | def datasetsBM(host=biomart_host):
"""
Lists BioMart datasets.
:param host: address of the host server, default='http://www.ensembl.org/biomart'
:returns: nothing
"""
stdout_ = sys.stdout #Keep track of the previous value.
stream = StringIO()
sys.stdout = stream
server = BiomartServer(biomart_host)
server.show_datasets()
sys.stdout = stdout_ # restore the previous stdout.
variable = stream.getvalue()
v=variable.replace("{"," ")
v=v.replace("}"," ")
v=v.replace(": ","\t")
print(v) | python | def datasetsBM(host=biomart_host):
"""
Lists BioMart datasets.
:param host: address of the host server, default='http://www.ensembl.org/biomart'
:returns: nothing
"""
stdout_ = sys.stdout #Keep track of the previous value.
stream = StringIO()
sys.stdout = stream
server = BiomartServer(biomart_host)
server.show_datasets()
sys.stdout = stdout_ # restore the previous stdout.
variable = stream.getvalue()
v=variable.replace("{"," ")
v=v.replace("}"," ")
v=v.replace(": ","\t")
print(v) | [
"def",
"datasetsBM",
"(",
"host",
"=",
"biomart_host",
")",
":",
"stdout_",
"=",
"sys",
".",
"stdout",
"stream",
"=",
"StringIO",
"(",
")",
"sys",
".",
"stdout",
"=",
"stream",
"server",
"=",
"BiomartServer",
"(",
"biomart_host",
")",
"server",
".",
"show_datasets",
"(",
")",
"sys",
".",
"stdout",
"=",
"stdout_",
"variable",
"=",
"stream",
".",
"getvalue",
"(",
")",
"v",
"=",
"variable",
".",
"replace",
"(",
"\"{\"",
",",
"\" \"",
")",
"v",
"=",
"v",
".",
"replace",
"(",
"\"}\"",
",",
"\" \"",
")",
"v",
"=",
"v",
".",
"replace",
"(",
"\": \"",
",",
"\"\\t\"",
")",
"print",
"(",
"v",
")"
] | Lists BioMart datasets.
:param host: address of the host server, default='http://www.ensembl.org/biomart'
:returns: nothing | [
"Lists",
"BioMart",
"datasets",
"."
] | 887808a7a2c1504f39ce8d8cb36c15c1721cd29f | https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/biom.py#L12-L31 | train |
mpg-age-bioinformatics/AGEpy | AGEpy/biom.py | filtersBM | def filtersBM(dataset,host=biomart_host):
"""
Lists BioMart filters for a specific dataset.
:param dataset: dataset to list filters of.
:param host: address of the host server, default='http://www.ensembl.org/biomart'
:returns: nothing
"""
stdout_ = sys.stdout #Keep track of the previous value.
stream = StringIO()
sys.stdout = stream
server = BiomartServer(host)
d=server.datasets[dataset]
d.show_filters()
sys.stdout = stdout_ # restore the previous stdout.
variable = stream.getvalue()
v=variable.replace("{"," ")
v=v.replace("}"," ")
v=v.replace(": ","\t")
print(v) | python | def filtersBM(dataset,host=biomart_host):
"""
Lists BioMart filters for a specific dataset.
:param dataset: dataset to list filters of.
:param host: address of the host server, default='http://www.ensembl.org/biomart'
:returns: nothing
"""
stdout_ = sys.stdout #Keep track of the previous value.
stream = StringIO()
sys.stdout = stream
server = BiomartServer(host)
d=server.datasets[dataset]
d.show_filters()
sys.stdout = stdout_ # restore the previous stdout.
variable = stream.getvalue()
v=variable.replace("{"," ")
v=v.replace("}"," ")
v=v.replace(": ","\t")
print(v) | [
"def",
"filtersBM",
"(",
"dataset",
",",
"host",
"=",
"biomart_host",
")",
":",
"stdout_",
"=",
"sys",
".",
"stdout",
"stream",
"=",
"StringIO",
"(",
")",
"sys",
".",
"stdout",
"=",
"stream",
"server",
"=",
"BiomartServer",
"(",
"host",
")",
"d",
"=",
"server",
".",
"datasets",
"[",
"dataset",
"]",
"d",
".",
"show_filters",
"(",
")",
"sys",
".",
"stdout",
"=",
"stdout_",
"variable",
"=",
"stream",
".",
"getvalue",
"(",
")",
"v",
"=",
"variable",
".",
"replace",
"(",
"\"{\"",
",",
"\" \"",
")",
"v",
"=",
"v",
".",
"replace",
"(",
"\"}\"",
",",
"\" \"",
")",
"v",
"=",
"v",
".",
"replace",
"(",
"\": \"",
",",
"\"\\t\"",
")",
"print",
"(",
"v",
")"
] | Lists BioMart filters for a specific dataset.
:param dataset: dataset to list filters of.
:param host: address of the host server, default='http://www.ensembl.org/biomart'
:returns: nothing | [
"Lists",
"BioMart",
"filters",
"for",
"a",
"specific",
"dataset",
"."
] | 887808a7a2c1504f39ce8d8cb36c15c1721cd29f | https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/biom.py#L33-L54 | train |
acutesoftware/AIKIF | aikif/core_data.py | CoreData.format_csv | def format_csv(self, delim=',', qu='"'):
"""
Prepares the data in CSV format
"""
res = qu + self.name + qu + delim
if self.data:
for d in self.data:
res += qu + str(d) + qu + delim
return res + '\n' | python | def format_csv(self, delim=',', qu='"'):
"""
Prepares the data in CSV format
"""
res = qu + self.name + qu + delim
if self.data:
for d in self.data:
res += qu + str(d) + qu + delim
return res + '\n' | [
"def",
"format_csv",
"(",
"self",
",",
"delim",
"=",
"','",
",",
"qu",
"=",
"'\"'",
")",
":",
"res",
"=",
"qu",
"+",
"self",
".",
"name",
"+",
"qu",
"+",
"delim",
"if",
"self",
".",
"data",
":",
"for",
"d",
"in",
"self",
".",
"data",
":",
"res",
"+=",
"qu",
"+",
"str",
"(",
"d",
")",
"+",
"qu",
"+",
"delim",
"return",
"res",
"+",
"'\\n'"
] | Prepares the data in CSV format | [
"Prepares",
"the",
"data",
"in",
"CSV",
"format"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/core_data.py#L40-L48 | train |
acutesoftware/AIKIF | aikif/core_data.py | CoreData.format_all | def format_all(self):
"""
return a trace of parents and children of the obect
"""
res = '\n--- Format all : ' + str(self.name) + ' -------------\n'
res += ' parent = ' + str(self.parent) + '\n'
res += self._get_all_children()
res += self._get_links()
return res | python | def format_all(self):
"""
return a trace of parents and children of the obect
"""
res = '\n--- Format all : ' + str(self.name) + ' -------------\n'
res += ' parent = ' + str(self.parent) + '\n'
res += self._get_all_children()
res += self._get_links()
return res | [
"def",
"format_all",
"(",
"self",
")",
":",
"res",
"=",
"'\\n--- Format all : '",
"+",
"str",
"(",
"self",
".",
"name",
")",
"+",
"' -------------\\n'",
"res",
"+=",
"' parent = '",
"+",
"str",
"(",
"self",
".",
"parent",
")",
"+",
"'\\n'",
"res",
"+=",
"self",
".",
"_get_all_children",
"(",
")",
"res",
"+=",
"self",
".",
"_get_links",
"(",
")",
"return",
"res"
] | return a trace of parents and children of the obect | [
"return",
"a",
"trace",
"of",
"parents",
"and",
"children",
"of",
"the",
"obect"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/core_data.py#L60-L69 | train |
acutesoftware/AIKIF | aikif/core_data.py | CoreData._get_all_children | def _get_all_children(self,):
"""
return the list of children of a node
"""
res = ''
if self.child_nodes:
for c in self.child_nodes:
res += ' child = ' + str(c) + '\n'
if c.child_nodes:
for grandchild in c.child_nodes:
res += ' child = ' + str(grandchild) + '\n'
else:
res += ' child = None\n'
return res | python | def _get_all_children(self,):
"""
return the list of children of a node
"""
res = ''
if self.child_nodes:
for c in self.child_nodes:
res += ' child = ' + str(c) + '\n'
if c.child_nodes:
for grandchild in c.child_nodes:
res += ' child = ' + str(grandchild) + '\n'
else:
res += ' child = None\n'
return res | [
"def",
"_get_all_children",
"(",
"self",
",",
")",
":",
"res",
"=",
"''",
"if",
"self",
".",
"child_nodes",
":",
"for",
"c",
"in",
"self",
".",
"child_nodes",
":",
"res",
"+=",
"' child = '",
"+",
"str",
"(",
"c",
")",
"+",
"'\\n'",
"if",
"c",
".",
"child_nodes",
":",
"for",
"grandchild",
"in",
"c",
".",
"child_nodes",
":",
"res",
"+=",
"' child = '",
"+",
"str",
"(",
"grandchild",
")",
"+",
"'\\n'",
"else",
":",
"res",
"+=",
"' child = None\\n'",
"return",
"res"
] | return the list of children of a node | [
"return",
"the",
"list",
"of",
"children",
"of",
"a",
"node"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/core_data.py#L71-L84 | train |
acutesoftware/AIKIF | aikif/core_data.py | CoreData._get_links | def _get_links(self,):
"""
return the list of links of a node
"""
res = ''
if self.links:
for l in self.links:
res += ' links = ' + str(l[0]) + '\n'
if l[0].child_nodes:
for chld in l[0].child_nodes:
res += ' child = ' + str(chld) + '\n'
if l[0].links:
for lnk in l[0].links:
res += ' sublink = ' + str(lnk[0]) + '\n'
else:
res += ' links = None\n'
return res | python | def _get_links(self,):
"""
return the list of links of a node
"""
res = ''
if self.links:
for l in self.links:
res += ' links = ' + str(l[0]) + '\n'
if l[0].child_nodes:
for chld in l[0].child_nodes:
res += ' child = ' + str(chld) + '\n'
if l[0].links:
for lnk in l[0].links:
res += ' sublink = ' + str(lnk[0]) + '\n'
else:
res += ' links = None\n'
return res | [
"def",
"_get_links",
"(",
"self",
",",
")",
":",
"res",
"=",
"''",
"if",
"self",
".",
"links",
":",
"for",
"l",
"in",
"self",
".",
"links",
":",
"res",
"+=",
"' links = '",
"+",
"str",
"(",
"l",
"[",
"0",
"]",
")",
"+",
"'\\n'",
"if",
"l",
"[",
"0",
"]",
".",
"child_nodes",
":",
"for",
"chld",
"in",
"l",
"[",
"0",
"]",
".",
"child_nodes",
":",
"res",
"+=",
"' child = '",
"+",
"str",
"(",
"chld",
")",
"+",
"'\\n'",
"if",
"l",
"[",
"0",
"]",
".",
"links",
":",
"for",
"lnk",
"in",
"l",
"[",
"0",
"]",
".",
"links",
":",
"res",
"+=",
"' sublink = '",
"+",
"str",
"(",
"lnk",
"[",
"0",
"]",
")",
"+",
"'\\n'",
"else",
":",
"res",
"+=",
"' links = None\\n'",
"return",
"res"
] | return the list of links of a node | [
"return",
"the",
"list",
"of",
"links",
"of",
"a",
"node"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/core_data.py#L86-L103 | train |
acutesoftware/AIKIF | aikif/core_data.py | CoreData.get_child_by_name | def get_child_by_name(self, name):
"""
find the child object by name and return the object
"""
for c in self.child_nodes:
if c.name == name:
return c
return None | python | def get_child_by_name(self, name):
"""
find the child object by name and return the object
"""
for c in self.child_nodes:
if c.name == name:
return c
return None | [
"def",
"get_child_by_name",
"(",
"self",
",",
"name",
")",
":",
"for",
"c",
"in",
"self",
".",
"child_nodes",
":",
"if",
"c",
".",
"name",
"==",
"name",
":",
"return",
"c",
"return",
"None"
] | find the child object by name and return the object | [
"find",
"the",
"child",
"object",
"by",
"name",
"and",
"return",
"the",
"object"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/core_data.py#L149-L156 | train |
acutesoftware/AIKIF | aikif/core_data.py | CoreTable.get_filename | def get_filename(self, year):
"""
returns the filename
"""
res = self.fldr + os.sep + self.type + year + '.' + self.user
return res | python | def get_filename(self, year):
"""
returns the filename
"""
res = self.fldr + os.sep + self.type + year + '.' + self.user
return res | [
"def",
"get_filename",
"(",
"self",
",",
"year",
")",
":",
"res",
"=",
"self",
".",
"fldr",
"+",
"os",
".",
"sep",
"+",
"self",
".",
"type",
"+",
"year",
"+",
"'.'",
"+",
"self",
".",
"user",
"return",
"res"
] | returns the filename | [
"returns",
"the",
"filename"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/core_data.py#L301-L306 | train |
acutesoftware/AIKIF | aikif/core_data.py | CoreTable.save | def save(self, file_tag='2016', add_header='N'):
"""
save table to folder in appropriate files
NOTE - ONLY APPEND AT THIS STAGE - THEN USE DATABASE
"""
fname = self.get_filename(file_tag)
with open(fname, 'a') as f:
if add_header == 'Y':
f.write(self.format_hdr())
for e in self.table:
f.write(e.format_csv()) | python | def save(self, file_tag='2016', add_header='N'):
"""
save table to folder in appropriate files
NOTE - ONLY APPEND AT THIS STAGE - THEN USE DATABASE
"""
fname = self.get_filename(file_tag)
with open(fname, 'a') as f:
if add_header == 'Y':
f.write(self.format_hdr())
for e in self.table:
f.write(e.format_csv()) | [
"def",
"save",
"(",
"self",
",",
"file_tag",
"=",
"'2016'",
",",
"add_header",
"=",
"'N'",
")",
":",
"fname",
"=",
"self",
".",
"get_filename",
"(",
"file_tag",
")",
"with",
"open",
"(",
"fname",
",",
"'a'",
")",
"as",
"f",
":",
"if",
"add_header",
"==",
"'Y'",
":",
"f",
".",
"write",
"(",
"self",
".",
"format_hdr",
"(",
")",
")",
"for",
"e",
"in",
"self",
".",
"table",
":",
"f",
".",
"write",
"(",
"e",
".",
"format_csv",
"(",
")",
")"
] | save table to folder in appropriate files
NOTE - ONLY APPEND AT THIS STAGE - THEN USE DATABASE | [
"save",
"table",
"to",
"folder",
"in",
"appropriate",
"files",
"NOTE",
"-",
"ONLY",
"APPEND",
"AT",
"THIS",
"STAGE",
"-",
"THEN",
"USE",
"DATABASE"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/core_data.py#L320-L331 | train |
acutesoftware/AIKIF | aikif/core_data.py | CoreTable.format_hdr | def format_hdr(self, delim=',', qu='"'):
"""
Prepares the header in CSV format
"""
res = ''
if self.header:
for d in self.header:
res += qu + str(d) + qu + delim
return res + '\n' | python | def format_hdr(self, delim=',', qu='"'):
"""
Prepares the header in CSV format
"""
res = ''
if self.header:
for d in self.header:
res += qu + str(d) + qu + delim
return res + '\n' | [
"def",
"format_hdr",
"(",
"self",
",",
"delim",
"=",
"','",
",",
"qu",
"=",
"'\"'",
")",
":",
"res",
"=",
"''",
"if",
"self",
".",
"header",
":",
"for",
"d",
"in",
"self",
".",
"header",
":",
"res",
"+=",
"qu",
"+",
"str",
"(",
"d",
")",
"+",
"qu",
"+",
"delim",
"return",
"res",
"+",
"'\\n'"
] | Prepares the header in CSV format | [
"Prepares",
"the",
"header",
"in",
"CSV",
"format"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/core_data.py#L333-L341 | train |
acutesoftware/AIKIF | aikif/core_data.py | CoreTable.generate_diary | def generate_diary(self):
"""
extracts event information from core tables into diary files
"""
print('Generate diary files from Event rows only')
for r in self.table:
print(str(type(r)) + ' = ', r) | python | def generate_diary(self):
"""
extracts event information from core tables into diary files
"""
print('Generate diary files from Event rows only')
for r in self.table:
print(str(type(r)) + ' = ', r) | [
"def",
"generate_diary",
"(",
"self",
")",
":",
"print",
"(",
"'Generate diary files from Event rows only'",
")",
"for",
"r",
"in",
"self",
".",
"table",
":",
"print",
"(",
"str",
"(",
"type",
"(",
"r",
")",
")",
"+",
"' = '",
",",
"r",
")"
] | extracts event information from core tables into diary files | [
"extracts",
"event",
"information",
"from",
"core",
"tables",
"into",
"diary",
"files"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/core_data.py#L344-L350 | train |
Phelimb/atlas | mykatlas/typing/typer/variant.py | VariantTyper.type | def type(self, variant_probe_coverages, variant=None):
"""
Takes a list of VariantProbeCoverages and returns a Call for the Variant.
Note, in the simplest case the list will be of length one. However, we may be typing the
Variant on multiple backgrouds leading to multiple VariantProbes for a single Variant.
"""
if not isinstance(variant_probe_coverages, list):
variant_probe_coverages = [variant_probe_coverages]
calls = []
for variant_probe_coverage in variant_probe_coverages:
calls.append(
self._type_variant_probe_coverages(
variant_probe_coverage, variant))
hom_alt_calls = [c for c in calls if sum(c["genotype"]) > 1]
het_calls = [c for c in calls if sum(c["genotype"]) == 1]
if hom_alt_calls:
hom_alt_calls.sort(key=lambda x: x["info"]["conf"], reverse=True)
return hom_alt_calls[0]
elif het_calls:
het_calls.sort(key=lambda x: x["info"]["conf"], reverse=True)
return het_calls[0]
else:
calls.sort(key=lambda x: x["info"]["conf"], reverse=True)
return calls[0] | python | def type(self, variant_probe_coverages, variant=None):
"""
Takes a list of VariantProbeCoverages and returns a Call for the Variant.
Note, in the simplest case the list will be of length one. However, we may be typing the
Variant on multiple backgrouds leading to multiple VariantProbes for a single Variant.
"""
if not isinstance(variant_probe_coverages, list):
variant_probe_coverages = [variant_probe_coverages]
calls = []
for variant_probe_coverage in variant_probe_coverages:
calls.append(
self._type_variant_probe_coverages(
variant_probe_coverage, variant))
hom_alt_calls = [c for c in calls if sum(c["genotype"]) > 1]
het_calls = [c for c in calls if sum(c["genotype"]) == 1]
if hom_alt_calls:
hom_alt_calls.sort(key=lambda x: x["info"]["conf"], reverse=True)
return hom_alt_calls[0]
elif het_calls:
het_calls.sort(key=lambda x: x["info"]["conf"], reverse=True)
return het_calls[0]
else:
calls.sort(key=lambda x: x["info"]["conf"], reverse=True)
return calls[0] | [
"def",
"type",
"(",
"self",
",",
"variant_probe_coverages",
",",
"variant",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"variant_probe_coverages",
",",
"list",
")",
":",
"variant_probe_coverages",
"=",
"[",
"variant_probe_coverages",
"]",
"calls",
"=",
"[",
"]",
"for",
"variant_probe_coverage",
"in",
"variant_probe_coverages",
":",
"calls",
".",
"append",
"(",
"self",
".",
"_type_variant_probe_coverages",
"(",
"variant_probe_coverage",
",",
"variant",
")",
")",
"hom_alt_calls",
"=",
"[",
"c",
"for",
"c",
"in",
"calls",
"if",
"sum",
"(",
"c",
"[",
"\"genotype\"",
"]",
")",
">",
"1",
"]",
"het_calls",
"=",
"[",
"c",
"for",
"c",
"in",
"calls",
"if",
"sum",
"(",
"c",
"[",
"\"genotype\"",
"]",
")",
"==",
"1",
"]",
"if",
"hom_alt_calls",
":",
"hom_alt_calls",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"\"info\"",
"]",
"[",
"\"conf\"",
"]",
",",
"reverse",
"=",
"True",
")",
"return",
"hom_alt_calls",
"[",
"0",
"]",
"elif",
"het_calls",
":",
"het_calls",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"\"info\"",
"]",
"[",
"\"conf\"",
"]",
",",
"reverse",
"=",
"True",
")",
"return",
"het_calls",
"[",
"0",
"]",
"else",
":",
"calls",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"\"info\"",
"]",
"[",
"\"conf\"",
"]",
",",
"reverse",
"=",
"True",
")",
"return",
"calls",
"[",
"0",
"]"
] | Takes a list of VariantProbeCoverages and returns a Call for the Variant.
Note, in the simplest case the list will be of length one. However, we may be typing the
Variant on multiple backgrouds leading to multiple VariantProbes for a single Variant. | [
"Takes",
"a",
"list",
"of",
"VariantProbeCoverages",
"and",
"returns",
"a",
"Call",
"for",
"the",
"Variant",
".",
"Note",
"in",
"the",
"simplest",
"case",
"the",
"list",
"will",
"be",
"of",
"length",
"one",
".",
"However",
"we",
"may",
"be",
"typing",
"the",
"Variant",
"on",
"multiple",
"backgrouds",
"leading",
"to",
"multiple",
"VariantProbes",
"for",
"a",
"single",
"Variant",
"."
] | 02e85497bb5ac423d6452a10dca11964582ac4d7 | https://github.com/Phelimb/atlas/blob/02e85497bb5ac423d6452a10dca11964582ac4d7/mykatlas/typing/typer/variant.py#L64-L88 | train |
Nachtfeuer/pipeline | spline/components/ansible.py | Ansible.creator | def creator(entry, config):
"""Creator function for creating an instance of an Ansible script."""
ansible_playbook = "ansible.playbook.dry.run.see.comment"
ansible_inventory = "ansible.inventory.dry.run.see.comment"
ansible_playbook_content = render(config.script, model=config.model, env=config.env,
variables=config.variables, item=config.item)
ansible_inventory_content = render(entry['inventory'], model=config.model, env=config.env,
variables=config.variables, item=config.item)
if not config.dry_run:
ansible_playbook = write_temporary_file(ansible_playbook_content, 'ansible-play-', '.yaml')
ansible_playbook_content = ''
ansible_inventory = write_temporary_file(ansible_inventory_content, prefix='ansible-inventory-')
ansible_inventory_content = ''
# rendering the Bash script for running the Ansible playbook
template_file = os.path.join(os.path.dirname(__file__), 'templates/ansible.sh.j2')
with open(template_file) as handle:
template = handle.read()
config.script = render(template, debug=config.debug,
ansible_playbook_content=ansible_playbook_content,
ansible_playbook=ansible_playbook,
ansible_inventory_content=ansible_inventory_content,
ansible_inventory=ansible_inventory,
limit=entry['limit'])
return Ansible(config) | python | def creator(entry, config):
"""Creator function for creating an instance of an Ansible script."""
ansible_playbook = "ansible.playbook.dry.run.see.comment"
ansible_inventory = "ansible.inventory.dry.run.see.comment"
ansible_playbook_content = render(config.script, model=config.model, env=config.env,
variables=config.variables, item=config.item)
ansible_inventory_content = render(entry['inventory'], model=config.model, env=config.env,
variables=config.variables, item=config.item)
if not config.dry_run:
ansible_playbook = write_temporary_file(ansible_playbook_content, 'ansible-play-', '.yaml')
ansible_playbook_content = ''
ansible_inventory = write_temporary_file(ansible_inventory_content, prefix='ansible-inventory-')
ansible_inventory_content = ''
# rendering the Bash script for running the Ansible playbook
template_file = os.path.join(os.path.dirname(__file__), 'templates/ansible.sh.j2')
with open(template_file) as handle:
template = handle.read()
config.script = render(template, debug=config.debug,
ansible_playbook_content=ansible_playbook_content,
ansible_playbook=ansible_playbook,
ansible_inventory_content=ansible_inventory_content,
ansible_inventory=ansible_inventory,
limit=entry['limit'])
return Ansible(config) | [
"def",
"creator",
"(",
"entry",
",",
"config",
")",
":",
"ansible_playbook",
"=",
"\"ansible.playbook.dry.run.see.comment\"",
"ansible_inventory",
"=",
"\"ansible.inventory.dry.run.see.comment\"",
"ansible_playbook_content",
"=",
"render",
"(",
"config",
".",
"script",
",",
"model",
"=",
"config",
".",
"model",
",",
"env",
"=",
"config",
".",
"env",
",",
"variables",
"=",
"config",
".",
"variables",
",",
"item",
"=",
"config",
".",
"item",
")",
"ansible_inventory_content",
"=",
"render",
"(",
"entry",
"[",
"'inventory'",
"]",
",",
"model",
"=",
"config",
".",
"model",
",",
"env",
"=",
"config",
".",
"env",
",",
"variables",
"=",
"config",
".",
"variables",
",",
"item",
"=",
"config",
".",
"item",
")",
"if",
"not",
"config",
".",
"dry_run",
":",
"ansible_playbook",
"=",
"write_temporary_file",
"(",
"ansible_playbook_content",
",",
"'ansible-play-'",
",",
"'.yaml'",
")",
"ansible_playbook_content",
"=",
"''",
"ansible_inventory",
"=",
"write_temporary_file",
"(",
"ansible_inventory_content",
",",
"prefix",
"=",
"'ansible-inventory-'",
")",
"ansible_inventory_content",
"=",
"''",
"template_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
",",
"'templates/ansible.sh.j2'",
")",
"with",
"open",
"(",
"template_file",
")",
"as",
"handle",
":",
"template",
"=",
"handle",
".",
"read",
"(",
")",
"config",
".",
"script",
"=",
"render",
"(",
"template",
",",
"debug",
"=",
"config",
".",
"debug",
",",
"ansible_playbook_content",
"=",
"ansible_playbook_content",
",",
"ansible_playbook",
"=",
"ansible_playbook",
",",
"ansible_inventory_content",
"=",
"ansible_inventory_content",
",",
"ansible_inventory",
"=",
"ansible_inventory",
",",
"limit",
"=",
"entry",
"[",
"'limit'",
"]",
")",
"return",
"Ansible",
"(",
"config",
")"
] | Creator function for creating an instance of an Ansible script. | [
"Creator",
"function",
"for",
"creating",
"an",
"instance",
"of",
"an",
"Ansible",
"script",
"."
] | 04ca18c4e95e4349532bb45b768206393e1f2c13 | https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/components/ansible.py#L37-L64 | train |
acutesoftware/AIKIF | aikif/toolbox/cls_grid_life.py | GameOfLife.update_gol | def update_gol(self):
"""
Function that performs one step of the Game of Life
"""
updated_grid = [[self.update_cell(row, col) \
for col in range(self.get_grid_width())] \
for row in range(self.get_grid_height())]
self.replace_grid(updated_grid) | python | def update_gol(self):
"""
Function that performs one step of the Game of Life
"""
updated_grid = [[self.update_cell(row, col) \
for col in range(self.get_grid_width())] \
for row in range(self.get_grid_height())]
self.replace_grid(updated_grid) | [
"def",
"update_gol",
"(",
"self",
")",
":",
"updated_grid",
"=",
"[",
"[",
"self",
".",
"update_cell",
"(",
"row",
",",
"col",
")",
"for",
"col",
"in",
"range",
"(",
"self",
".",
"get_grid_width",
"(",
")",
")",
"]",
"for",
"row",
"in",
"range",
"(",
"self",
".",
"get_grid_height",
"(",
")",
")",
"]",
"self",
".",
"replace_grid",
"(",
"updated_grid",
")"
] | Function that performs one step of the Game of Life | [
"Function",
"that",
"performs",
"one",
"step",
"of",
"the",
"Game",
"of",
"Life"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/cls_grid_life.py#L19-L28 | train |
acutesoftware/AIKIF | aikif/toolbox/cls_grid_life.py | GameOfLife.update_cell | def update_cell(self, row, col):
"""
Function that computes the update for one cell in the Game of Life
"""
# compute number of living neighbors
neighbors = self.eight_neighbors(row, col)
living_neighbors = 0
for neighbor in neighbors:
if not self.is_empty(neighbor[0], neighbor[1]):
living_neighbors += 1
# logic for Game of life
if (living_neighbors == 3) or (living_neighbors == 2 and not self.is_empty(row, col)):
return mod_grid.FULL
else:
return mod_grid.EMPTY | python | def update_cell(self, row, col):
"""
Function that computes the update for one cell in the Game of Life
"""
# compute number of living neighbors
neighbors = self.eight_neighbors(row, col)
living_neighbors = 0
for neighbor in neighbors:
if not self.is_empty(neighbor[0], neighbor[1]):
living_neighbors += 1
# logic for Game of life
if (living_neighbors == 3) or (living_neighbors == 2 and not self.is_empty(row, col)):
return mod_grid.FULL
else:
return mod_grid.EMPTY | [
"def",
"update_cell",
"(",
"self",
",",
"row",
",",
"col",
")",
":",
"neighbors",
"=",
"self",
".",
"eight_neighbors",
"(",
"row",
",",
"col",
")",
"living_neighbors",
"=",
"0",
"for",
"neighbor",
"in",
"neighbors",
":",
"if",
"not",
"self",
".",
"is_empty",
"(",
"neighbor",
"[",
"0",
"]",
",",
"neighbor",
"[",
"1",
"]",
")",
":",
"living_neighbors",
"+=",
"1",
"if",
"(",
"living_neighbors",
"==",
"3",
")",
"or",
"(",
"living_neighbors",
"==",
"2",
"and",
"not",
"self",
".",
"is_empty",
"(",
"row",
",",
"col",
")",
")",
":",
"return",
"mod_grid",
".",
"FULL",
"else",
":",
"return",
"mod_grid",
".",
"EMPTY"
] | Function that computes the update for one cell in the Game of Life | [
"Function",
"that",
"computes",
"the",
"update",
"for",
"one",
"cell",
"in",
"the",
"Game",
"of",
"Life"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/cls_grid_life.py#L31-L46 | train |
acutesoftware/AIKIF | aikif/toolbox/cls_grid_life.py | GameOfLifePatterns.random_offset | def random_offset(self, lst):
"""
offsets a pattern list generated below to a random
position in the grid
"""
res = []
x = random.randint(4,self.max_x - 42)
y = random.randint(4,self.max_y - 10)
for itm in lst:
res.append([itm[0] + y, itm[1] + x])
return res | python | def random_offset(self, lst):
"""
offsets a pattern list generated below to a random
position in the grid
"""
res = []
x = random.randint(4,self.max_x - 42)
y = random.randint(4,self.max_y - 10)
for itm in lst:
res.append([itm[0] + y, itm[1] + x])
return res | [
"def",
"random_offset",
"(",
"self",
",",
"lst",
")",
":",
"res",
"=",
"[",
"]",
"x",
"=",
"random",
".",
"randint",
"(",
"4",
",",
"self",
".",
"max_x",
"-",
"42",
")",
"y",
"=",
"random",
".",
"randint",
"(",
"4",
",",
"self",
".",
"max_y",
"-",
"10",
")",
"for",
"itm",
"in",
"lst",
":",
"res",
".",
"append",
"(",
"[",
"itm",
"[",
"0",
"]",
"+",
"y",
",",
"itm",
"[",
"1",
"]",
"+",
"x",
"]",
")",
"return",
"res"
] | offsets a pattern list generated below to a random
position in the grid | [
"offsets",
"a",
"pattern",
"list",
"generated",
"below",
"to",
"a",
"random",
"position",
"in",
"the",
"grid"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/cls_grid_life.py#L100-L110 | train |
JelleAalbers/multihist | multihist.py | Hist1d.get_random | def get_random(self, size=10):
"""Returns random variates from the histogram.
Note this assumes the histogram is an 'events per bin', not a pdf.
Inside the bins, a uniform distribution is assumed.
"""
bin_i = np.random.choice(np.arange(len(self.bin_centers)), size=size, p=self.normalized_histogram)
return self.bin_centers[bin_i] + np.random.uniform(-0.5, 0.5, size=size) * self.bin_volumes()[bin_i] | python | def get_random(self, size=10):
"""Returns random variates from the histogram.
Note this assumes the histogram is an 'events per bin', not a pdf.
Inside the bins, a uniform distribution is assumed.
"""
bin_i = np.random.choice(np.arange(len(self.bin_centers)), size=size, p=self.normalized_histogram)
return self.bin_centers[bin_i] + np.random.uniform(-0.5, 0.5, size=size) * self.bin_volumes()[bin_i] | [
"def",
"get_random",
"(",
"self",
",",
"size",
"=",
"10",
")",
":",
"bin_i",
"=",
"np",
".",
"random",
".",
"choice",
"(",
"np",
".",
"arange",
"(",
"len",
"(",
"self",
".",
"bin_centers",
")",
")",
",",
"size",
"=",
"size",
",",
"p",
"=",
"self",
".",
"normalized_histogram",
")",
"return",
"self",
".",
"bin_centers",
"[",
"bin_i",
"]",
"+",
"np",
".",
"random",
".",
"uniform",
"(",
"-",
"0.5",
",",
"0.5",
",",
"size",
"=",
"size",
")",
"*",
"self",
".",
"bin_volumes",
"(",
")",
"[",
"bin_i",
"]"
] | Returns random variates from the histogram.
Note this assumes the histogram is an 'events per bin', not a pdf.
Inside the bins, a uniform distribution is assumed. | [
"Returns",
"random",
"variates",
"from",
"the",
"histogram",
".",
"Note",
"this",
"assumes",
"the",
"histogram",
"is",
"an",
"events",
"per",
"bin",
"not",
"a",
"pdf",
".",
"Inside",
"the",
"bins",
"a",
"uniform",
"distribution",
"is",
"assumed",
"."
] | 072288277f807e7e388fdf424c3921c80576f3ab | https://github.com/JelleAalbers/multihist/blob/072288277f807e7e388fdf424c3921c80576f3ab/multihist.py#L187-L193 | train |
JelleAalbers/multihist | multihist.py | Hist1d.std | def std(self, bessel_correction=True):
"""Estimates std of underlying data, assuming each datapoint was exactly in the center of its bin."""
if bessel_correction:
n = self.n
bc = n / (n - 1)
else:
bc = 1
return np.sqrt(np.average((self.bin_centers - self.mean) ** 2, weights=self.histogram)) * bc | python | def std(self, bessel_correction=True):
"""Estimates std of underlying data, assuming each datapoint was exactly in the center of its bin."""
if bessel_correction:
n = self.n
bc = n / (n - 1)
else:
bc = 1
return np.sqrt(np.average((self.bin_centers - self.mean) ** 2, weights=self.histogram)) * bc | [
"def",
"std",
"(",
"self",
",",
"bessel_correction",
"=",
"True",
")",
":",
"if",
"bessel_correction",
":",
"n",
"=",
"self",
".",
"n",
"bc",
"=",
"n",
"/",
"(",
"n",
"-",
"1",
")",
"else",
":",
"bc",
"=",
"1",
"return",
"np",
".",
"sqrt",
"(",
"np",
".",
"average",
"(",
"(",
"self",
".",
"bin_centers",
"-",
"self",
".",
"mean",
")",
"**",
"2",
",",
"weights",
"=",
"self",
".",
"histogram",
")",
")",
"*",
"bc"
] | Estimates std of underlying data, assuming each datapoint was exactly in the center of its bin. | [
"Estimates",
"std",
"of",
"underlying",
"data",
"assuming",
"each",
"datapoint",
"was",
"exactly",
"in",
"the",
"center",
"of",
"its",
"bin",
"."
] | 072288277f807e7e388fdf424c3921c80576f3ab | https://github.com/JelleAalbers/multihist/blob/072288277f807e7e388fdf424c3921c80576f3ab/multihist.py#L205-L212 | train |
JelleAalbers/multihist | multihist.py | Hist1d.percentile | def percentile(self, percentile):
"""Return bin center nearest to percentile"""
return self.bin_centers[np.argmin(np.abs(self.cumulative_density * 100 - percentile))] | python | def percentile(self, percentile):
"""Return bin center nearest to percentile"""
return self.bin_centers[np.argmin(np.abs(self.cumulative_density * 100 - percentile))] | [
"def",
"percentile",
"(",
"self",
",",
"percentile",
")",
":",
"return",
"self",
".",
"bin_centers",
"[",
"np",
".",
"argmin",
"(",
"np",
".",
"abs",
"(",
"self",
".",
"cumulative_density",
"*",
"100",
"-",
"percentile",
")",
")",
"]"
] | Return bin center nearest to percentile | [
"Return",
"bin",
"center",
"nearest",
"to",
"percentile"
] | 072288277f807e7e388fdf424c3921c80576f3ab | https://github.com/JelleAalbers/multihist/blob/072288277f807e7e388fdf424c3921c80576f3ab/multihist.py#L238-L240 | train |
JelleAalbers/multihist | multihist.py | Histdd._data_to_hist | def _data_to_hist(self, data, **kwargs):
"""Return bin_edges, histogram array"""
if hasattr(self, 'bin_edges'):
kwargs.setdefault('bins', self.bin_edges)
if len(data) == 1 and isinstance(data[0], COLUMNAR_DATA_SOURCES):
data = data[0]
if self.axis_names is None:
raise ValueError("When histogramming from a columnar data source, "
"axis_names or dimensions is mandatory")
is_dask = False
if WE_HAVE_DASK:
is_dask = isinstance(data, dask.dataframe.DataFrame)
if is_dask:
fake_histogram = Histdd(axis_names=self.axis_names, bins=kwargs['bins'])
partial_hists = []
for partition in data.to_delayed():
ph = dask.delayed(Histdd)(partition, axis_names=self.axis_names, bins=kwargs['bins'])
ph = dask.delayed(lambda x: x.histogram)(ph)
ph = dask.array.from_delayed(ph,
shape=fake_histogram.histogram.shape,
dtype=fake_histogram.histogram.dtype)
partial_hists.append(ph)
partial_hists = dask.array.stack(partial_hists, axis=0)
compute_options = kwargs.get('compute_options', {})
for k, v in DEFAULT_DASK_COMPUTE_KWARGS.items():
compute_options.setdefault(k, v)
histogram = partial_hists.sum(axis=0).compute(**compute_options)
bin_edges = fake_histogram.bin_edges
return histogram, bin_edges
else:
data = np.vstack([data[x].values for x in self.axis_names])
data = np.array(data).T
return np.histogramdd(data,
bins=kwargs.get('bins'),
weights=kwargs.get('weights'),
range=kwargs.get('range')) | python | def _data_to_hist(self, data, **kwargs):
"""Return bin_edges, histogram array"""
if hasattr(self, 'bin_edges'):
kwargs.setdefault('bins', self.bin_edges)
if len(data) == 1 and isinstance(data[0], COLUMNAR_DATA_SOURCES):
data = data[0]
if self.axis_names is None:
raise ValueError("When histogramming from a columnar data source, "
"axis_names or dimensions is mandatory")
is_dask = False
if WE_HAVE_DASK:
is_dask = isinstance(data, dask.dataframe.DataFrame)
if is_dask:
fake_histogram = Histdd(axis_names=self.axis_names, bins=kwargs['bins'])
partial_hists = []
for partition in data.to_delayed():
ph = dask.delayed(Histdd)(partition, axis_names=self.axis_names, bins=kwargs['bins'])
ph = dask.delayed(lambda x: x.histogram)(ph)
ph = dask.array.from_delayed(ph,
shape=fake_histogram.histogram.shape,
dtype=fake_histogram.histogram.dtype)
partial_hists.append(ph)
partial_hists = dask.array.stack(partial_hists, axis=0)
compute_options = kwargs.get('compute_options', {})
for k, v in DEFAULT_DASK_COMPUTE_KWARGS.items():
compute_options.setdefault(k, v)
histogram = partial_hists.sum(axis=0).compute(**compute_options)
bin_edges = fake_histogram.bin_edges
return histogram, bin_edges
else:
data = np.vstack([data[x].values for x in self.axis_names])
data = np.array(data).T
return np.histogramdd(data,
bins=kwargs.get('bins'),
weights=kwargs.get('weights'),
range=kwargs.get('range')) | [
"def",
"_data_to_hist",
"(",
"self",
",",
"data",
",",
"**",
"kwargs",
")",
":",
"if",
"hasattr",
"(",
"self",
",",
"'bin_edges'",
")",
":",
"kwargs",
".",
"setdefault",
"(",
"'bins'",
",",
"self",
".",
"bin_edges",
")",
"if",
"len",
"(",
"data",
")",
"==",
"1",
"and",
"isinstance",
"(",
"data",
"[",
"0",
"]",
",",
"COLUMNAR_DATA_SOURCES",
")",
":",
"data",
"=",
"data",
"[",
"0",
"]",
"if",
"self",
".",
"axis_names",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"When histogramming from a columnar data source, \"",
"\"axis_names or dimensions is mandatory\"",
")",
"is_dask",
"=",
"False",
"if",
"WE_HAVE_DASK",
":",
"is_dask",
"=",
"isinstance",
"(",
"data",
",",
"dask",
".",
"dataframe",
".",
"DataFrame",
")",
"if",
"is_dask",
":",
"fake_histogram",
"=",
"Histdd",
"(",
"axis_names",
"=",
"self",
".",
"axis_names",
",",
"bins",
"=",
"kwargs",
"[",
"'bins'",
"]",
")",
"partial_hists",
"=",
"[",
"]",
"for",
"partition",
"in",
"data",
".",
"to_delayed",
"(",
")",
":",
"ph",
"=",
"dask",
".",
"delayed",
"(",
"Histdd",
")",
"(",
"partition",
",",
"axis_names",
"=",
"self",
".",
"axis_names",
",",
"bins",
"=",
"kwargs",
"[",
"'bins'",
"]",
")",
"ph",
"=",
"dask",
".",
"delayed",
"(",
"lambda",
"x",
":",
"x",
".",
"histogram",
")",
"(",
"ph",
")",
"ph",
"=",
"dask",
".",
"array",
".",
"from_delayed",
"(",
"ph",
",",
"shape",
"=",
"fake_histogram",
".",
"histogram",
".",
"shape",
",",
"dtype",
"=",
"fake_histogram",
".",
"histogram",
".",
"dtype",
")",
"partial_hists",
".",
"append",
"(",
"ph",
")",
"partial_hists",
"=",
"dask",
".",
"array",
".",
"stack",
"(",
"partial_hists",
",",
"axis",
"=",
"0",
")",
"compute_options",
"=",
"kwargs",
".",
"get",
"(",
"'compute_options'",
",",
"{",
"}",
")",
"for",
"k",
",",
"v",
"in",
"DEFAULT_DASK_COMPUTE_KWARGS",
".",
"items",
"(",
")",
":",
"compute_options",
".",
"setdefault",
"(",
"k",
",",
"v",
")",
"histogram",
"=",
"partial_hists",
".",
"sum",
"(",
"axis",
"=",
"0",
")",
".",
"compute",
"(",
"**",
"compute_options",
")",
"bin_edges",
"=",
"fake_histogram",
".",
"bin_edges",
"return",
"histogram",
",",
"bin_edges",
"else",
":",
"data",
"=",
"np",
".",
"vstack",
"(",
"[",
"data",
"[",
"x",
"]",
".",
"values",
"for",
"x",
"in",
"self",
".",
"axis_names",
"]",
")",
"data",
"=",
"np",
".",
"array",
"(",
"data",
")",
".",
"T",
"return",
"np",
".",
"histogramdd",
"(",
"data",
",",
"bins",
"=",
"kwargs",
".",
"get",
"(",
"'bins'",
")",
",",
"weights",
"=",
"kwargs",
".",
"get",
"(",
"'weights'",
")",
",",
"range",
"=",
"kwargs",
".",
"get",
"(",
"'range'",
")",
")"
] | Return bin_edges, histogram array | [
"Return",
"bin_edges",
"histogram",
"array"
] | 072288277f807e7e388fdf424c3921c80576f3ab | https://github.com/JelleAalbers/multihist/blob/072288277f807e7e388fdf424c3921c80576f3ab/multihist.py#L302-L345 | train |
JelleAalbers/multihist | multihist.py | Histdd.axis_names_without | def axis_names_without(self, axis):
"""Return axis names without axis, or None if axis_names is None"""
if self.axis_names is None:
return None
return itemgetter(*self.other_axes(axis))(self.axis_names) | python | def axis_names_without(self, axis):
"""Return axis names without axis, or None if axis_names is None"""
if self.axis_names is None:
return None
return itemgetter(*self.other_axes(axis))(self.axis_names) | [
"def",
"axis_names_without",
"(",
"self",
",",
"axis",
")",
":",
"if",
"self",
".",
"axis_names",
"is",
"None",
":",
"return",
"None",
"return",
"itemgetter",
"(",
"*",
"self",
".",
"other_axes",
"(",
"axis",
")",
")",
"(",
"self",
".",
"axis_names",
")"
] | Return axis names without axis, or None if axis_names is None | [
"Return",
"axis",
"names",
"without",
"axis",
"or",
"None",
"if",
"axis_names",
"is",
"None"
] | 072288277f807e7e388fdf424c3921c80576f3ab | https://github.com/JelleAalbers/multihist/blob/072288277f807e7e388fdf424c3921c80576f3ab/multihist.py#L369-L373 | train |
JelleAalbers/multihist | multihist.py | Histdd.bin_centers | def bin_centers(self, axis=None):
"""Return bin centers along an axis, or if axis=None, list of bin_centers along each axis"""
if axis is None:
return np.array([self.bin_centers(axis=i) for i in range(self.dimensions)])
axis = self.get_axis_number(axis)
return 0.5 * (self.bin_edges[axis][1:] + self.bin_edges[axis][:-1]) | python | def bin_centers(self, axis=None):
"""Return bin centers along an axis, or if axis=None, list of bin_centers along each axis"""
if axis is None:
return np.array([self.bin_centers(axis=i) for i in range(self.dimensions)])
axis = self.get_axis_number(axis)
return 0.5 * (self.bin_edges[axis][1:] + self.bin_edges[axis][:-1]) | [
"def",
"bin_centers",
"(",
"self",
",",
"axis",
"=",
"None",
")",
":",
"if",
"axis",
"is",
"None",
":",
"return",
"np",
".",
"array",
"(",
"[",
"self",
".",
"bin_centers",
"(",
"axis",
"=",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"dimensions",
")",
"]",
")",
"axis",
"=",
"self",
".",
"get_axis_number",
"(",
"axis",
")",
"return",
"0.5",
"*",
"(",
"self",
".",
"bin_edges",
"[",
"axis",
"]",
"[",
"1",
":",
"]",
"+",
"self",
".",
"bin_edges",
"[",
"axis",
"]",
"[",
":",
"-",
"1",
"]",
")"
] | Return bin centers along an axis, or if axis=None, list of bin_centers along each axis | [
"Return",
"bin",
"centers",
"along",
"an",
"axis",
"or",
"if",
"axis",
"=",
"None",
"list",
"of",
"bin_centers",
"along",
"each",
"axis"
] | 072288277f807e7e388fdf424c3921c80576f3ab | https://github.com/JelleAalbers/multihist/blob/072288277f807e7e388fdf424c3921c80576f3ab/multihist.py#L378-L383 | train |
JelleAalbers/multihist | multihist.py | Histdd.get_axis_bin_index | def get_axis_bin_index(self, value, axis):
"""Returns index along axis of bin in histogram which contains value
Inclusive on both endpoints
"""
axis = self.get_axis_number(axis)
bin_edges = self.bin_edges[axis]
# The right bin edge of np.histogram is inclusive:
if value == bin_edges[-1]:
# Minus two: one for bin edges rather than centers, one for 0-based indexing
return len(bin_edges) - 2
# For all other bins, it is exclusive.
result = np.searchsorted(bin_edges, [value], side='right')[0] - 1
if not 0 <= result <= len(bin_edges) - 1:
raise CoordinateOutOfRangeException("Value %s is not in range (%s-%s) of axis %s" % (
value, bin_edges[0], bin_edges[-1], axis))
return result | python | def get_axis_bin_index(self, value, axis):
"""Returns index along axis of bin in histogram which contains value
Inclusive on both endpoints
"""
axis = self.get_axis_number(axis)
bin_edges = self.bin_edges[axis]
# The right bin edge of np.histogram is inclusive:
if value == bin_edges[-1]:
# Minus two: one for bin edges rather than centers, one for 0-based indexing
return len(bin_edges) - 2
# For all other bins, it is exclusive.
result = np.searchsorted(bin_edges, [value], side='right')[0] - 1
if not 0 <= result <= len(bin_edges) - 1:
raise CoordinateOutOfRangeException("Value %s is not in range (%s-%s) of axis %s" % (
value, bin_edges[0], bin_edges[-1], axis))
return result | [
"def",
"get_axis_bin_index",
"(",
"self",
",",
"value",
",",
"axis",
")",
":",
"axis",
"=",
"self",
".",
"get_axis_number",
"(",
"axis",
")",
"bin_edges",
"=",
"self",
".",
"bin_edges",
"[",
"axis",
"]",
"if",
"value",
"==",
"bin_edges",
"[",
"-",
"1",
"]",
":",
"return",
"len",
"(",
"bin_edges",
")",
"-",
"2",
"result",
"=",
"np",
".",
"searchsorted",
"(",
"bin_edges",
",",
"[",
"value",
"]",
",",
"side",
"=",
"'right'",
")",
"[",
"0",
"]",
"-",
"1",
"if",
"not",
"0",
"<=",
"result",
"<=",
"len",
"(",
"bin_edges",
")",
"-",
"1",
":",
"raise",
"CoordinateOutOfRangeException",
"(",
"\"Value %s is not in range (%s-%s) of axis %s\"",
"%",
"(",
"value",
",",
"bin_edges",
"[",
"0",
"]",
",",
"bin_edges",
"[",
"-",
"1",
"]",
",",
"axis",
")",
")",
"return",
"result"
] | Returns index along axis of bin in histogram which contains value
Inclusive on both endpoints | [
"Returns",
"index",
"along",
"axis",
"of",
"bin",
"in",
"histogram",
"which",
"contains",
"value",
"Inclusive",
"on",
"both",
"endpoints"
] | 072288277f807e7e388fdf424c3921c80576f3ab | https://github.com/JelleAalbers/multihist/blob/072288277f807e7e388fdf424c3921c80576f3ab/multihist.py#L385-L400 | train |
JelleAalbers/multihist | multihist.py | Histdd.get_bin_indices | def get_bin_indices(self, values):
"""Returns index tuple in histogram of bin which contains value"""
return tuple([self.get_axis_bin_index(values[ax_i], ax_i)
for ax_i in range(self.dimensions)]) | python | def get_bin_indices(self, values):
"""Returns index tuple in histogram of bin which contains value"""
return tuple([self.get_axis_bin_index(values[ax_i], ax_i)
for ax_i in range(self.dimensions)]) | [
"def",
"get_bin_indices",
"(",
"self",
",",
"values",
")",
":",
"return",
"tuple",
"(",
"[",
"self",
".",
"get_axis_bin_index",
"(",
"values",
"[",
"ax_i",
"]",
",",
"ax_i",
")",
"for",
"ax_i",
"in",
"range",
"(",
"self",
".",
"dimensions",
")",
"]",
")"
] | Returns index tuple in histogram of bin which contains value | [
"Returns",
"index",
"tuple",
"in",
"histogram",
"of",
"bin",
"which",
"contains",
"value"
] | 072288277f807e7e388fdf424c3921c80576f3ab | https://github.com/JelleAalbers/multihist/blob/072288277f807e7e388fdf424c3921c80576f3ab/multihist.py#L402-L405 | train |
JelleAalbers/multihist | multihist.py | Histdd.all_axis_bin_centers | def all_axis_bin_centers(self, axis):
"""Return ndarray of same shape as histogram containing bin center value along axis at each point"""
# Arcane hack that seems to work, at least in 3d... hope
axis = self.get_axis_number(axis)
return np.meshgrid(*self.bin_centers(), indexing='ij')[axis] | python | def all_axis_bin_centers(self, axis):
"""Return ndarray of same shape as histogram containing bin center value along axis at each point"""
# Arcane hack that seems to work, at least in 3d... hope
axis = self.get_axis_number(axis)
return np.meshgrid(*self.bin_centers(), indexing='ij')[axis] | [
"def",
"all_axis_bin_centers",
"(",
"self",
",",
"axis",
")",
":",
"axis",
"=",
"self",
".",
"get_axis_number",
"(",
"axis",
")",
"return",
"np",
".",
"meshgrid",
"(",
"*",
"self",
".",
"bin_centers",
"(",
")",
",",
"indexing",
"=",
"'ij'",
")",
"[",
"axis",
"]"
] | Return ndarray of same shape as histogram containing bin center value along axis at each point | [
"Return",
"ndarray",
"of",
"same",
"shape",
"as",
"histogram",
"containing",
"bin",
"center",
"value",
"along",
"axis",
"at",
"each",
"point"
] | 072288277f807e7e388fdf424c3921c80576f3ab | https://github.com/JelleAalbers/multihist/blob/072288277f807e7e388fdf424c3921c80576f3ab/multihist.py#L407-L411 | train |
JelleAalbers/multihist | multihist.py | Histdd.sum | def sum(self, axis):
"""Sums all data along axis, returns d-1 dimensional histogram"""
axis = self.get_axis_number(axis)
if self.dimensions == 2:
new_hist = Hist1d
else:
new_hist = Histdd
return new_hist.from_histogram(np.sum(self.histogram, axis=axis),
bin_edges=itemgetter(*self.other_axes(axis))(self.bin_edges),
axis_names=self.axis_names_without(axis)) | python | def sum(self, axis):
"""Sums all data along axis, returns d-1 dimensional histogram"""
axis = self.get_axis_number(axis)
if self.dimensions == 2:
new_hist = Hist1d
else:
new_hist = Histdd
return new_hist.from_histogram(np.sum(self.histogram, axis=axis),
bin_edges=itemgetter(*self.other_axes(axis))(self.bin_edges),
axis_names=self.axis_names_without(axis)) | [
"def",
"sum",
"(",
"self",
",",
"axis",
")",
":",
"axis",
"=",
"self",
".",
"get_axis_number",
"(",
"axis",
")",
"if",
"self",
".",
"dimensions",
"==",
"2",
":",
"new_hist",
"=",
"Hist1d",
"else",
":",
"new_hist",
"=",
"Histdd",
"return",
"new_hist",
".",
"from_histogram",
"(",
"np",
".",
"sum",
"(",
"self",
".",
"histogram",
",",
"axis",
"=",
"axis",
")",
",",
"bin_edges",
"=",
"itemgetter",
"(",
"*",
"self",
".",
"other_axes",
"(",
"axis",
")",
")",
"(",
"self",
".",
"bin_edges",
")",
",",
"axis_names",
"=",
"self",
".",
"axis_names_without",
"(",
"axis",
")",
")"
] | Sums all data along axis, returns d-1 dimensional histogram | [
"Sums",
"all",
"data",
"along",
"axis",
"returns",
"d",
"-",
"1",
"dimensional",
"histogram"
] | 072288277f807e7e388fdf424c3921c80576f3ab | https://github.com/JelleAalbers/multihist/blob/072288277f807e7e388fdf424c3921c80576f3ab/multihist.py#L416-L425 | train |
JelleAalbers/multihist | multihist.py | Histdd.slicesum | def slicesum(self, start, stop=None, axis=0):
"""Slices the histogram along axis, then sums over that slice, returning a d-1 dimensional histogram"""
return self.slice(start, stop, axis).sum(axis) | python | def slicesum(self, start, stop=None, axis=0):
"""Slices the histogram along axis, then sums over that slice, returning a d-1 dimensional histogram"""
return self.slice(start, stop, axis).sum(axis) | [
"def",
"slicesum",
"(",
"self",
",",
"start",
",",
"stop",
"=",
"None",
",",
"axis",
"=",
"0",
")",
":",
"return",
"self",
".",
"slice",
"(",
"start",
",",
"stop",
",",
"axis",
")",
".",
"sum",
"(",
"axis",
")"
] | Slices the histogram along axis, then sums over that slice, returning a d-1 dimensional histogram | [
"Slices",
"the",
"histogram",
"along",
"axis",
"then",
"sums",
"over",
"that",
"slice",
"returning",
"a",
"d",
"-",
"1",
"dimensional",
"histogram"
] | 072288277f807e7e388fdf424c3921c80576f3ab | https://github.com/JelleAalbers/multihist/blob/072288277f807e7e388fdf424c3921c80576f3ab/multihist.py#L442-L444 | train |
JelleAalbers/multihist | multihist.py | Histdd.projection | def projection(self, axis):
"""Sums all data along all other axes, then return Hist1D"""
axis = self.get_axis_number(axis)
projected_hist = np.sum(self.histogram, axis=self.other_axes(axis))
return Hist1d.from_histogram(projected_hist, bin_edges=self.bin_edges[axis]) | python | def projection(self, axis):
"""Sums all data along all other axes, then return Hist1D"""
axis = self.get_axis_number(axis)
projected_hist = np.sum(self.histogram, axis=self.other_axes(axis))
return Hist1d.from_histogram(projected_hist, bin_edges=self.bin_edges[axis]) | [
"def",
"projection",
"(",
"self",
",",
"axis",
")",
":",
"axis",
"=",
"self",
".",
"get_axis_number",
"(",
"axis",
")",
"projected_hist",
"=",
"np",
".",
"sum",
"(",
"self",
".",
"histogram",
",",
"axis",
"=",
"self",
".",
"other_axes",
"(",
"axis",
")",
")",
"return",
"Hist1d",
".",
"from_histogram",
"(",
"projected_hist",
",",
"bin_edges",
"=",
"self",
".",
"bin_edges",
"[",
"axis",
"]",
")"
] | Sums all data along all other axes, then return Hist1D | [
"Sums",
"all",
"data",
"along",
"all",
"other",
"axes",
"then",
"return",
"Hist1D"
] | 072288277f807e7e388fdf424c3921c80576f3ab | https://github.com/JelleAalbers/multihist/blob/072288277f807e7e388fdf424c3921c80576f3ab/multihist.py#L446-L450 | train |
JelleAalbers/multihist | multihist.py | Histdd.cumulate | def cumulate(self, axis):
"""Returns new histogram with all data cumulated along axis."""
axis = self.get_axis_number(axis)
return Histdd.from_histogram(np.cumsum(self.histogram, axis=axis),
bin_edges=self.bin_edges,
axis_names=self.axis_names) | python | def cumulate(self, axis):
"""Returns new histogram with all data cumulated along axis."""
axis = self.get_axis_number(axis)
return Histdd.from_histogram(np.cumsum(self.histogram, axis=axis),
bin_edges=self.bin_edges,
axis_names=self.axis_names) | [
"def",
"cumulate",
"(",
"self",
",",
"axis",
")",
":",
"axis",
"=",
"self",
".",
"get_axis_number",
"(",
"axis",
")",
"return",
"Histdd",
".",
"from_histogram",
"(",
"np",
".",
"cumsum",
"(",
"self",
".",
"histogram",
",",
"axis",
"=",
"axis",
")",
",",
"bin_edges",
"=",
"self",
".",
"bin_edges",
",",
"axis_names",
"=",
"self",
".",
"axis_names",
")"
] | Returns new histogram with all data cumulated along axis. | [
"Returns",
"new",
"histogram",
"with",
"all",
"data",
"cumulated",
"along",
"axis",
"."
] | 072288277f807e7e388fdf424c3921c80576f3ab | https://github.com/JelleAalbers/multihist/blob/072288277f807e7e388fdf424c3921c80576f3ab/multihist.py#L456-L461 | train |
JelleAalbers/multihist | multihist.py | Histdd.central_likelihood | def central_likelihood(self, axis):
"""Returns new histogram with all values replaced by their central likelihoods along axis."""
result = self.cumulative_density(axis)
result.histogram = 1 - 2 * np.abs(result.histogram - 0.5)
return result | python | def central_likelihood(self, axis):
"""Returns new histogram with all values replaced by their central likelihoods along axis."""
result = self.cumulative_density(axis)
result.histogram = 1 - 2 * np.abs(result.histogram - 0.5)
return result | [
"def",
"central_likelihood",
"(",
"self",
",",
"axis",
")",
":",
"result",
"=",
"self",
".",
"cumulative_density",
"(",
"axis",
")",
"result",
".",
"histogram",
"=",
"1",
"-",
"2",
"*",
"np",
".",
"abs",
"(",
"result",
".",
"histogram",
"-",
"0.5",
")",
"return",
"result"
] | Returns new histogram with all values replaced by their central likelihoods along axis. | [
"Returns",
"new",
"histogram",
"with",
"all",
"values",
"replaced",
"by",
"their",
"central",
"likelihoods",
"along",
"axis",
"."
] | 072288277f807e7e388fdf424c3921c80576f3ab | https://github.com/JelleAalbers/multihist/blob/072288277f807e7e388fdf424c3921c80576f3ab/multihist.py#L482-L486 | train |
JelleAalbers/multihist | multihist.py | Histdd.lookup_hist | def lookup_hist(self, mh):
"""Return histogram within binning of Histdd mh, with values looked up in this histogram.
This is not rebinning: no interpolation /renormalization is performed.
It's just a lookup.
"""
result = mh.similar_blank_histogram()
points = np.stack([mh.all_axis_bin_centers(i)
for i in range(mh.dimensions)]).reshape(mh.dimensions, -1)
values = self.lookup(*points)
result.histogram = values.reshape(result.histogram.shape)
return result | python | def lookup_hist(self, mh):
"""Return histogram within binning of Histdd mh, with values looked up in this histogram.
This is not rebinning: no interpolation /renormalization is performed.
It's just a lookup.
"""
result = mh.similar_blank_histogram()
points = np.stack([mh.all_axis_bin_centers(i)
for i in range(mh.dimensions)]).reshape(mh.dimensions, -1)
values = self.lookup(*points)
result.histogram = values.reshape(result.histogram.shape)
return result | [
"def",
"lookup_hist",
"(",
"self",
",",
"mh",
")",
":",
"result",
"=",
"mh",
".",
"similar_blank_histogram",
"(",
")",
"points",
"=",
"np",
".",
"stack",
"(",
"[",
"mh",
".",
"all_axis_bin_centers",
"(",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"mh",
".",
"dimensions",
")",
"]",
")",
".",
"reshape",
"(",
"mh",
".",
"dimensions",
",",
"-",
"1",
")",
"values",
"=",
"self",
".",
"lookup",
"(",
"*",
"points",
")",
"result",
".",
"histogram",
"=",
"values",
".",
"reshape",
"(",
"result",
".",
"histogram",
".",
"shape",
")",
"return",
"result"
] | Return histogram within binning of Histdd mh, with values looked up in this histogram.
This is not rebinning: no interpolation /renormalization is performed.
It's just a lookup. | [
"Return",
"histogram",
"within",
"binning",
"of",
"Histdd",
"mh",
"with",
"values",
"looked",
"up",
"in",
"this",
"histogram",
"."
] | 072288277f807e7e388fdf424c3921c80576f3ab | https://github.com/JelleAalbers/multihist/blob/072288277f807e7e388fdf424c3921c80576f3ab/multihist.py#L685-L696 | train |
acutesoftware/AIKIF | scripts/examples/doc_roadmap.py | create_roadmap_doc | def create_roadmap_doc(dat, opFile):
"""
takes a dictionary read from a yaml file and converts
it to the roadmap documentation
"""
op = format_title('Roadmap for AIKIF')
for h1 in dat['projects']:
op += format_h1(h1)
if dat[h1] is None:
op += '(No details)\n'
else:
for h2 in dat[h1]:
op += '\n' + format_h2(h2)
if dat[h1][h2] is None:
op += '(blank text)\n'
else:
for txt in dat[h1][h2]:
op += ' - ' + txt + '\n'
op += '\n'
with open(opFile, 'w') as f:
f.write(op) | python | def create_roadmap_doc(dat, opFile):
"""
takes a dictionary read from a yaml file and converts
it to the roadmap documentation
"""
op = format_title('Roadmap for AIKIF')
for h1 in dat['projects']:
op += format_h1(h1)
if dat[h1] is None:
op += '(No details)\n'
else:
for h2 in dat[h1]:
op += '\n' + format_h2(h2)
if dat[h1][h2] is None:
op += '(blank text)\n'
else:
for txt in dat[h1][h2]:
op += ' - ' + txt + '\n'
op += '\n'
with open(opFile, 'w') as f:
f.write(op) | [
"def",
"create_roadmap_doc",
"(",
"dat",
",",
"opFile",
")",
":",
"op",
"=",
"format_title",
"(",
"'Roadmap for AIKIF'",
")",
"for",
"h1",
"in",
"dat",
"[",
"'projects'",
"]",
":",
"op",
"+=",
"format_h1",
"(",
"h1",
")",
"if",
"dat",
"[",
"h1",
"]",
"is",
"None",
":",
"op",
"+=",
"'(No details)\\n'",
"else",
":",
"for",
"h2",
"in",
"dat",
"[",
"h1",
"]",
":",
"op",
"+=",
"'\\n'",
"+",
"format_h2",
"(",
"h2",
")",
"if",
"dat",
"[",
"h1",
"]",
"[",
"h2",
"]",
"is",
"None",
":",
"op",
"+=",
"'(blank text)\\n'",
"else",
":",
"for",
"txt",
"in",
"dat",
"[",
"h1",
"]",
"[",
"h2",
"]",
":",
"op",
"+=",
"' - '",
"+",
"txt",
"+",
"'\\n'",
"op",
"+=",
"'\\n'",
"with",
"open",
"(",
"opFile",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"op",
")"
] | takes a dictionary read from a yaml file and converts
it to the roadmap documentation | [
"takes",
"a",
"dictionary",
"read",
"from",
"a",
"yaml",
"file",
"and",
"converts",
"it",
"to",
"the",
"roadmap",
"documentation"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/scripts/examples/doc_roadmap.py#L20-L41 | train |
acutesoftware/AIKIF | aikif/toolbox/cls_grid.py | Grid.clear | def clear(self):
"""
Clears grid to be EMPTY
"""
self.grid = [[EMPTY for dummy_col in range(self.grid_width)] for dummy_row in range(self.grid_height)] | python | def clear(self):
"""
Clears grid to be EMPTY
"""
self.grid = [[EMPTY for dummy_col in range(self.grid_width)] for dummy_row in range(self.grid_height)] | [
"def",
"clear",
"(",
"self",
")",
":",
"self",
".",
"grid",
"=",
"[",
"[",
"EMPTY",
"for",
"dummy_col",
"in",
"range",
"(",
"self",
".",
"grid_width",
")",
"]",
"for",
"dummy_row",
"in",
"range",
"(",
"self",
".",
"grid_height",
")",
"]"
] | Clears grid to be EMPTY | [
"Clears",
"grid",
"to",
"be",
"EMPTY"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/cls_grid.py#L34-L38 | train |
acutesoftware/AIKIF | aikif/toolbox/cls_grid.py | Grid.save | def save(self, fname):
""" saves a grid to file as ASCII text """
try:
with open(fname, "w") as f:
f.write(str(self))
except Exception as ex:
print('ERROR = cant save grid results to ' + fname + str(ex)) | python | def save(self, fname):
""" saves a grid to file as ASCII text """
try:
with open(fname, "w") as f:
f.write(str(self))
except Exception as ex:
print('ERROR = cant save grid results to ' + fname + str(ex)) | [
"def",
"save",
"(",
"self",
",",
"fname",
")",
":",
"try",
":",
"with",
"open",
"(",
"fname",
",",
"\"w\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"str",
"(",
"self",
")",
")",
"except",
"Exception",
"as",
"ex",
":",
"print",
"(",
"'ERROR = cant save grid results to '",
"+",
"fname",
"+",
"str",
"(",
"ex",
")",
")"
] | saves a grid to file as ASCII text | [
"saves",
"a",
"grid",
"to",
"file",
"as",
"ASCII",
"text"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/cls_grid.py#L52-L58 | train |
acutesoftware/AIKIF | aikif/toolbox/cls_grid.py | Grid.load | def load(self, fname):
""" loads a ASCII text file grid to self """
# get height and width of grid from file
self.grid_width = 4
self.grid_height = 4
# re-read the file and load it
self.grid = [[0 for dummy_l in range(self.grid_width)] for dummy_l in range(self.grid_height)]
with open(fname, 'r') as f:
for row_num, row in enumerate(f):
if row.strip('\n') == '':
break
for col_num, col in enumerate(row.strip('\n')):
self.set_tile(row_num, col_num, col) | python | def load(self, fname):
""" loads a ASCII text file grid to self """
# get height and width of grid from file
self.grid_width = 4
self.grid_height = 4
# re-read the file and load it
self.grid = [[0 for dummy_l in range(self.grid_width)] for dummy_l in range(self.grid_height)]
with open(fname, 'r') as f:
for row_num, row in enumerate(f):
if row.strip('\n') == '':
break
for col_num, col in enumerate(row.strip('\n')):
self.set_tile(row_num, col_num, col) | [
"def",
"load",
"(",
"self",
",",
"fname",
")",
":",
"self",
".",
"grid_width",
"=",
"4",
"self",
".",
"grid_height",
"=",
"4",
"self",
".",
"grid",
"=",
"[",
"[",
"0",
"for",
"dummy_l",
"in",
"range",
"(",
"self",
".",
"grid_width",
")",
"]",
"for",
"dummy_l",
"in",
"range",
"(",
"self",
".",
"grid_height",
")",
"]",
"with",
"open",
"(",
"fname",
",",
"'r'",
")",
"as",
"f",
":",
"for",
"row_num",
",",
"row",
"in",
"enumerate",
"(",
"f",
")",
":",
"if",
"row",
".",
"strip",
"(",
"'\\n'",
")",
"==",
"''",
":",
"break",
"for",
"col_num",
",",
"col",
"in",
"enumerate",
"(",
"row",
".",
"strip",
"(",
"'\\n'",
")",
")",
":",
"self",
".",
"set_tile",
"(",
"row_num",
",",
"col_num",
",",
"col",
")"
] | loads a ASCII text file grid to self | [
"loads",
"a",
"ASCII",
"text",
"file",
"grid",
"to",
"self"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/cls_grid.py#L62-L76 | train |
acutesoftware/AIKIF | aikif/toolbox/cls_grid.py | Grid.extract_col | def extract_col(self, col):
"""
get column number 'col'
"""
new_col = [row[col] for row in self.grid]
return new_col | python | def extract_col(self, col):
"""
get column number 'col'
"""
new_col = [row[col] for row in self.grid]
return new_col | [
"def",
"extract_col",
"(",
"self",
",",
"col",
")",
":",
"new_col",
"=",
"[",
"row",
"[",
"col",
"]",
"for",
"row",
"in",
"self",
".",
"grid",
"]",
"return",
"new_col"
] | get column number 'col' | [
"get",
"column",
"number",
"col"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/cls_grid.py#L103-L108 | train |
acutesoftware/AIKIF | aikif/toolbox/cls_grid.py | Grid.extract_row | def extract_row(self, row):
"""
get row number 'row'
"""
new_row = []
for col in range(self.get_grid_width()):
new_row.append(self.get_tile(row, col))
return new_row | python | def extract_row(self, row):
"""
get row number 'row'
"""
new_row = []
for col in range(self.get_grid_width()):
new_row.append(self.get_tile(row, col))
return new_row | [
"def",
"extract_row",
"(",
"self",
",",
"row",
")",
":",
"new_row",
"=",
"[",
"]",
"for",
"col",
"in",
"range",
"(",
"self",
".",
"get_grid_width",
"(",
")",
")",
":",
"new_row",
".",
"append",
"(",
"self",
".",
"get_tile",
"(",
"row",
",",
"col",
")",
")",
"return",
"new_row"
] | get row number 'row' | [
"get",
"row",
"number",
"row"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/cls_grid.py#L110-L117 | train |
acutesoftware/AIKIF | aikif/toolbox/cls_grid.py | Grid.replace_row | def replace_row(self, line, ndx):
"""
replace a grids row at index 'ndx' with 'line'
"""
for col in range(len(line)):
self.set_tile(ndx, col, line[col]) | python | def replace_row(self, line, ndx):
"""
replace a grids row at index 'ndx' with 'line'
"""
for col in range(len(line)):
self.set_tile(ndx, col, line[col]) | [
"def",
"replace_row",
"(",
"self",
",",
"line",
",",
"ndx",
")",
":",
"for",
"col",
"in",
"range",
"(",
"len",
"(",
"line",
")",
")",
":",
"self",
".",
"set_tile",
"(",
"ndx",
",",
"col",
",",
"line",
"[",
"col",
"]",
")"
] | replace a grids row at index 'ndx' with 'line' | [
"replace",
"a",
"grids",
"row",
"at",
"index",
"ndx",
"with",
"line"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/cls_grid.py#L119-L124 | train |
acutesoftware/AIKIF | aikif/toolbox/cls_grid.py | Grid.replace_col | def replace_col(self, line, ndx):
"""
replace a grids column at index 'ndx' with 'line'
"""
for row in range(len(line)):
self.set_tile(row, ndx, line[row]) | python | def replace_col(self, line, ndx):
"""
replace a grids column at index 'ndx' with 'line'
"""
for row in range(len(line)):
self.set_tile(row, ndx, line[row]) | [
"def",
"replace_col",
"(",
"self",
",",
"line",
",",
"ndx",
")",
":",
"for",
"row",
"in",
"range",
"(",
"len",
"(",
"line",
")",
")",
":",
"self",
".",
"set_tile",
"(",
"row",
",",
"ndx",
",",
"line",
"[",
"row",
"]",
")"
] | replace a grids column at index 'ndx' with 'line' | [
"replace",
"a",
"grids",
"column",
"at",
"index",
"ndx",
"with",
"line"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/cls_grid.py#L126-L131 | train |
acutesoftware/AIKIF | aikif/toolbox/cls_grid.py | Grid.new_tile | def new_tile(self, num=1):
"""
Create a new tile in a randomly selected empty
square. The tile should be 2 90% of the time and
4 10% of the time.
"""
for _ in range(num):
if random.random() > .5:
new_tile = self.pieces[0]
else:
new_tile = self.pieces[1]
# check for game over
blanks = self.count_blank_positions()
if blanks == 0:
print ("GAME OVER")
else:
res = self.find_random_blank_cell()
row = res[0]
col = res[1]
self.set_tile(row, col, new_tile) | python | def new_tile(self, num=1):
"""
Create a new tile in a randomly selected empty
square. The tile should be 2 90% of the time and
4 10% of the time.
"""
for _ in range(num):
if random.random() > .5:
new_tile = self.pieces[0]
else:
new_tile = self.pieces[1]
# check for game over
blanks = self.count_blank_positions()
if blanks == 0:
print ("GAME OVER")
else:
res = self.find_random_blank_cell()
row = res[0]
col = res[1]
self.set_tile(row, col, new_tile) | [
"def",
"new_tile",
"(",
"self",
",",
"num",
"=",
"1",
")",
":",
"for",
"_",
"in",
"range",
"(",
"num",
")",
":",
"if",
"random",
".",
"random",
"(",
")",
">",
".5",
":",
"new_tile",
"=",
"self",
".",
"pieces",
"[",
"0",
"]",
"else",
":",
"new_tile",
"=",
"self",
".",
"pieces",
"[",
"1",
"]",
"blanks",
"=",
"self",
".",
"count_blank_positions",
"(",
")",
"if",
"blanks",
"==",
"0",
":",
"print",
"(",
"\"GAME OVER\"",
")",
"else",
":",
"res",
"=",
"self",
".",
"find_random_blank_cell",
"(",
")",
"row",
"=",
"res",
"[",
"0",
"]",
"col",
"=",
"res",
"[",
"1",
"]",
"self",
".",
"set_tile",
"(",
"row",
",",
"col",
",",
"new_tile",
")"
] | Create a new tile in a randomly selected empty
square. The tile should be 2 90% of the time and
4 10% of the time. | [
"Create",
"a",
"new",
"tile",
"in",
"a",
"randomly",
"selected",
"empty",
"square",
".",
"The",
"tile",
"should",
"be",
"2",
"90%",
"of",
"the",
"time",
"and",
"4",
"10%",
"of",
"the",
"time",
"."
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/cls_grid.py#L140-L161 | train |
acutesoftware/AIKIF | aikif/toolbox/cls_grid.py | Grid.set_tile | def set_tile(self, row, col, value):
"""
Set the tile at position row, col to have the given value.
"""
#print('set_tile: y=', row, 'x=', col)
if col < 0:
print("ERROR - x less than zero", col)
col = 0
#return
if col > self.grid_width - 1 :
print("ERROR - x larger than grid", col)
col = self.grid_width - 1
#return
if row < 0:
print("ERROR - y less than zero", row)
row = 0
#return
if row > self.grid_height - 1:
print("ERROR - y larger than grid", row)
row = self.grid_height - 1
self.grid[row][col] = value | python | def set_tile(self, row, col, value):
"""
Set the tile at position row, col to have the given value.
"""
#print('set_tile: y=', row, 'x=', col)
if col < 0:
print("ERROR - x less than zero", col)
col = 0
#return
if col > self.grid_width - 1 :
print("ERROR - x larger than grid", col)
col = self.grid_width - 1
#return
if row < 0:
print("ERROR - y less than zero", row)
row = 0
#return
if row > self.grid_height - 1:
print("ERROR - y larger than grid", row)
row = self.grid_height - 1
self.grid[row][col] = value | [
"def",
"set_tile",
"(",
"self",
",",
"row",
",",
"col",
",",
"value",
")",
":",
"if",
"col",
"<",
"0",
":",
"print",
"(",
"\"ERROR - x less than zero\"",
",",
"col",
")",
"col",
"=",
"0",
"if",
"col",
">",
"self",
".",
"grid_width",
"-",
"1",
":",
"print",
"(",
"\"ERROR - x larger than grid\"",
",",
"col",
")",
"col",
"=",
"self",
".",
"grid_width",
"-",
"1",
"if",
"row",
"<",
"0",
":",
"print",
"(",
"\"ERROR - y less than zero\"",
",",
"row",
")",
"row",
"=",
"0",
"if",
"row",
">",
"self",
".",
"grid_height",
"-",
"1",
":",
"print",
"(",
"\"ERROR - y larger than grid\"",
",",
"row",
")",
"row",
"=",
"self",
".",
"grid_height",
"-",
"1",
"self",
".",
"grid",
"[",
"row",
"]",
"[",
"col",
"]",
"=",
"value"
] | Set the tile at position row, col to have the given value. | [
"Set",
"the",
"tile",
"at",
"position",
"row",
"col",
"to",
"have",
"the",
"given",
"value",
"."
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/cls_grid.py#L196-L220 | train |
acutesoftware/AIKIF | aikif/toolbox/cls_grid.py | Grid.replace_grid | def replace_grid(self, updated_grid):
"""
replace all cells in current grid with updated grid
"""
for col in range(self.get_grid_width()):
for row in range(self.get_grid_height()):
if updated_grid[row][col] == EMPTY:
self.set_empty(row, col)
else:
self.set_full(row, col) | python | def replace_grid(self, updated_grid):
"""
replace all cells in current grid with updated grid
"""
for col in range(self.get_grid_width()):
for row in range(self.get_grid_height()):
if updated_grid[row][col] == EMPTY:
self.set_empty(row, col)
else:
self.set_full(row, col) | [
"def",
"replace_grid",
"(",
"self",
",",
"updated_grid",
")",
":",
"for",
"col",
"in",
"range",
"(",
"self",
".",
"get_grid_width",
"(",
")",
")",
":",
"for",
"row",
"in",
"range",
"(",
"self",
".",
"get_grid_height",
"(",
")",
")",
":",
"if",
"updated_grid",
"[",
"row",
"]",
"[",
"col",
"]",
"==",
"EMPTY",
":",
"self",
".",
"set_empty",
"(",
"row",
",",
"col",
")",
"else",
":",
"self",
".",
"set_full",
"(",
"row",
",",
"col",
")"
] | replace all cells in current grid with updated grid | [
"replace",
"all",
"cells",
"in",
"current",
"grid",
"with",
"updated",
"grid"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/cls_grid.py#L289-L298 | train |
acutesoftware/AIKIF | aikif/toolbox/cls_grid.py | Grid.find_safe_starting_point | def find_safe_starting_point(self):
"""
finds a place on the grid which is clear on all sides
to avoid starting in the middle of a blockage
"""
y = random.randint(2,self.grid_height-4)
x = random.randint(2,self.grid_width-4)
return y, x | python | def find_safe_starting_point(self):
"""
finds a place on the grid which is clear on all sides
to avoid starting in the middle of a blockage
"""
y = random.randint(2,self.grid_height-4)
x = random.randint(2,self.grid_width-4)
return y, x | [
"def",
"find_safe_starting_point",
"(",
"self",
")",
":",
"y",
"=",
"random",
".",
"randint",
"(",
"2",
",",
"self",
".",
"grid_height",
"-",
"4",
")",
"x",
"=",
"random",
".",
"randint",
"(",
"2",
",",
"self",
".",
"grid_width",
"-",
"4",
")",
"return",
"y",
",",
"x"
] | finds a place on the grid which is clear on all sides
to avoid starting in the middle of a blockage | [
"finds",
"a",
"place",
"on",
"the",
"grid",
"which",
"is",
"clear",
"on",
"all",
"sides",
"to",
"avoid",
"starting",
"in",
"the",
"middle",
"of",
"a",
"blockage"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/cls_grid.py#L300-L307 | train |
acutesoftware/AIKIF | aikif/toolbox/image_tools.py | resize | def resize(fname, basewidth, opFilename):
""" resize an image to basewidth """
if basewidth == 0:
basewidth = 300
img = Image.open(fname)
wpercent = (basewidth/float(img.size[0]))
hsize = int((float(img.size[1])*float(wpercent)))
img = img.resize((basewidth,hsize), Image.ANTIALIAS)
img.save(opFilename) | python | def resize(fname, basewidth, opFilename):
""" resize an image to basewidth """
if basewidth == 0:
basewidth = 300
img = Image.open(fname)
wpercent = (basewidth/float(img.size[0]))
hsize = int((float(img.size[1])*float(wpercent)))
img = img.resize((basewidth,hsize), Image.ANTIALIAS)
img.save(opFilename) | [
"def",
"resize",
"(",
"fname",
",",
"basewidth",
",",
"opFilename",
")",
":",
"if",
"basewidth",
"==",
"0",
":",
"basewidth",
"=",
"300",
"img",
"=",
"Image",
".",
"open",
"(",
"fname",
")",
"wpercent",
"=",
"(",
"basewidth",
"/",
"float",
"(",
"img",
".",
"size",
"[",
"0",
"]",
")",
")",
"hsize",
"=",
"int",
"(",
"(",
"float",
"(",
"img",
".",
"size",
"[",
"1",
"]",
")",
"*",
"float",
"(",
"wpercent",
")",
")",
")",
"img",
"=",
"img",
".",
"resize",
"(",
"(",
"basewidth",
",",
"hsize",
")",
",",
"Image",
".",
"ANTIALIAS",
")",
"img",
".",
"save",
"(",
"opFilename",
")"
] | resize an image to basewidth | [
"resize",
"an",
"image",
"to",
"basewidth"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/image_tools.py#L103-L111 | train |
acutesoftware/AIKIF | aikif/toolbox/image_tools.py | print_stats | def print_stats(img):
""" prints stats, remember that img should already have been loaded """
stat = ImageStat.Stat(img)
print("extrema : ", stat.extrema)
print("count : ", stat.count)
print("sum : ", stat.sum)
print("sum2 : ", stat.sum2)
print("mean : ", stat.mean)
print("median : ", stat.median)
print("rms : ", stat.rms)
print("var : ", stat.var)
print("stddev : ", stat.stddev) | python | def print_stats(img):
""" prints stats, remember that img should already have been loaded """
stat = ImageStat.Stat(img)
print("extrema : ", stat.extrema)
print("count : ", stat.count)
print("sum : ", stat.sum)
print("sum2 : ", stat.sum2)
print("mean : ", stat.mean)
print("median : ", stat.median)
print("rms : ", stat.rms)
print("var : ", stat.var)
print("stddev : ", stat.stddev) | [
"def",
"print_stats",
"(",
"img",
")",
":",
"stat",
"=",
"ImageStat",
".",
"Stat",
"(",
"img",
")",
"print",
"(",
"\"extrema : \"",
",",
"stat",
".",
"extrema",
")",
"print",
"(",
"\"count : \"",
",",
"stat",
".",
"count",
")",
"print",
"(",
"\"sum : \"",
",",
"stat",
".",
"sum",
")",
"print",
"(",
"\"sum2 : \"",
",",
"stat",
".",
"sum2",
")",
"print",
"(",
"\"mean : \"",
",",
"stat",
".",
"mean",
")",
"print",
"(",
"\"median : \"",
",",
"stat",
".",
"median",
")",
"print",
"(",
"\"rms : \"",
",",
"stat",
".",
"rms",
")",
"print",
"(",
"\"var : \"",
",",
"stat",
".",
"var",
")",
"print",
"(",
"\"stddev : \"",
",",
"stat",
".",
"stddev",
")"
] | prints stats, remember that img should already have been loaded | [
"prints",
"stats",
"remember",
"that",
"img",
"should",
"already",
"have",
"been",
"loaded"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/image_tools.py#L114-L125 | train |
acutesoftware/AIKIF | aikif/toolbox/image_tools.py | print_all_metadata | def print_all_metadata(fname):
""" high level that prints all as long list """
print("Filename :", fname )
print("Basename :", os.path.basename(fname))
print("Path :", os.path.dirname(fname))
print("Size :", os.path.getsize(fname))
img = Image.open(fname)
# get the image's width and height in pixels
width, height = img.size
# get the largest dimension
#max_dim = max(img.size)
print("Width :", width)
print("Height :", height)
print("Format :", img.format)
print("palette :", img.palette )
print_stats(img)
#print_exif_data(img)
exif_data = get_exif_data(img)
(lat, lon) = get_lat_lon(exif_data)
print("GPS Lat :", lat )
print("GPS Long :", lon ) | python | def print_all_metadata(fname):
""" high level that prints all as long list """
print("Filename :", fname )
print("Basename :", os.path.basename(fname))
print("Path :", os.path.dirname(fname))
print("Size :", os.path.getsize(fname))
img = Image.open(fname)
# get the image's width and height in pixels
width, height = img.size
# get the largest dimension
#max_dim = max(img.size)
print("Width :", width)
print("Height :", height)
print("Format :", img.format)
print("palette :", img.palette )
print_stats(img)
#print_exif_data(img)
exif_data = get_exif_data(img)
(lat, lon) = get_lat_lon(exif_data)
print("GPS Lat :", lat )
print("GPS Long :", lon ) | [
"def",
"print_all_metadata",
"(",
"fname",
")",
":",
"print",
"(",
"\"Filename :\"",
",",
"fname",
")",
"print",
"(",
"\"Basename :\"",
",",
"os",
".",
"path",
".",
"basename",
"(",
"fname",
")",
")",
"print",
"(",
"\"Path :\"",
",",
"os",
".",
"path",
".",
"dirname",
"(",
"fname",
")",
")",
"print",
"(",
"\"Size :\"",
",",
"os",
".",
"path",
".",
"getsize",
"(",
"fname",
")",
")",
"img",
"=",
"Image",
".",
"open",
"(",
"fname",
")",
"width",
",",
"height",
"=",
"img",
".",
"size",
"print",
"(",
"\"Width :\"",
",",
"width",
")",
"print",
"(",
"\"Height :\"",
",",
"height",
")",
"print",
"(",
"\"Format :\"",
",",
"img",
".",
"format",
")",
"print",
"(",
"\"palette :\"",
",",
"img",
".",
"palette",
")",
"print_stats",
"(",
"img",
")",
"exif_data",
"=",
"get_exif_data",
"(",
"img",
")",
"(",
"lat",
",",
"lon",
")",
"=",
"get_lat_lon",
"(",
"exif_data",
")",
"print",
"(",
"\"GPS Lat :\"",
",",
"lat",
")",
"print",
"(",
"\"GPS Long :\"",
",",
"lon",
")"
] | high level that prints all as long list | [
"high",
"level",
"that",
"prints",
"all",
"as",
"long",
"list"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/image_tools.py#L152-L173 | train |
acutesoftware/AIKIF | aikif/toolbox/image_tools.py | get_metadata_as_dict | def get_metadata_as_dict(fname):
""" Gets all metadata and puts into dictionary """
imgdict = {}
try:
imgdict['filename'] = fname
imgdict['size'] = str(os.path.getsize(fname))
imgdict['basename'] = os.path.basename(fname)
imgdict['path'] = os.path.dirname(fname)
img = Image.open(fname)
# get the image's width and height in pixels
width, height = img.size
imgdict['width'] = str(width)
imgdict['height'] = str(height)
imgdict['format'] = str(img.format)
imgdict['palette'] = str(img.palette)
stat = ImageStat.Stat(img)
#res = res + q + str(stat.extrema) + q + d
imgdict['count'] = List2String(stat.count, ",")
imgdict['sum'] = List2String(stat.sum, ",")
imgdict['sum2'] = List2String(stat.sum2, ",")
imgdict['mean'] = List2String(stat.mean, ",")
imgdict['median'] = List2String(stat.median, ",")
imgdict['rms'] = List2String(stat.rms, ",")
imgdict['var'] = List2String(stat.var, ",")
imgdict['stddev'] = List2String(stat.stddev, ",")
exif_data = get_exif_data(img)
print('exif_data = ', exif_data)
(lat, lon) = get_lat_lon(exif_data)
print('(lat, lon)', (lat, lon))
imgdict['lat'] = str(lat)
imgdict['lon'] = str(lon)
except Exception as ex:
print('problem reading image file metadata in ', fname, str(ex))
imgdict['lat'] = 'ERROR'
imgdict['lon'] = 'ERROR'
return imgdict | python | def get_metadata_as_dict(fname):
""" Gets all metadata and puts into dictionary """
imgdict = {}
try:
imgdict['filename'] = fname
imgdict['size'] = str(os.path.getsize(fname))
imgdict['basename'] = os.path.basename(fname)
imgdict['path'] = os.path.dirname(fname)
img = Image.open(fname)
# get the image's width and height in pixels
width, height = img.size
imgdict['width'] = str(width)
imgdict['height'] = str(height)
imgdict['format'] = str(img.format)
imgdict['palette'] = str(img.palette)
stat = ImageStat.Stat(img)
#res = res + q + str(stat.extrema) + q + d
imgdict['count'] = List2String(stat.count, ",")
imgdict['sum'] = List2String(stat.sum, ",")
imgdict['sum2'] = List2String(stat.sum2, ",")
imgdict['mean'] = List2String(stat.mean, ",")
imgdict['median'] = List2String(stat.median, ",")
imgdict['rms'] = List2String(stat.rms, ",")
imgdict['var'] = List2String(stat.var, ",")
imgdict['stddev'] = List2String(stat.stddev, ",")
exif_data = get_exif_data(img)
print('exif_data = ', exif_data)
(lat, lon) = get_lat_lon(exif_data)
print('(lat, lon)', (lat, lon))
imgdict['lat'] = str(lat)
imgdict['lon'] = str(lon)
except Exception as ex:
print('problem reading image file metadata in ', fname, str(ex))
imgdict['lat'] = 'ERROR'
imgdict['lon'] = 'ERROR'
return imgdict | [
"def",
"get_metadata_as_dict",
"(",
"fname",
")",
":",
"imgdict",
"=",
"{",
"}",
"try",
":",
"imgdict",
"[",
"'filename'",
"]",
"=",
"fname",
"imgdict",
"[",
"'size'",
"]",
"=",
"str",
"(",
"os",
".",
"path",
".",
"getsize",
"(",
"fname",
")",
")",
"imgdict",
"[",
"'basename'",
"]",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"fname",
")",
"imgdict",
"[",
"'path'",
"]",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"fname",
")",
"img",
"=",
"Image",
".",
"open",
"(",
"fname",
")",
"width",
",",
"height",
"=",
"img",
".",
"size",
"imgdict",
"[",
"'width'",
"]",
"=",
"str",
"(",
"width",
")",
"imgdict",
"[",
"'height'",
"]",
"=",
"str",
"(",
"height",
")",
"imgdict",
"[",
"'format'",
"]",
"=",
"str",
"(",
"img",
".",
"format",
")",
"imgdict",
"[",
"'palette'",
"]",
"=",
"str",
"(",
"img",
".",
"palette",
")",
"stat",
"=",
"ImageStat",
".",
"Stat",
"(",
"img",
")",
"imgdict",
"[",
"'count'",
"]",
"=",
"List2String",
"(",
"stat",
".",
"count",
",",
"\",\"",
")",
"imgdict",
"[",
"'sum'",
"]",
"=",
"List2String",
"(",
"stat",
".",
"sum",
",",
"\",\"",
")",
"imgdict",
"[",
"'sum2'",
"]",
"=",
"List2String",
"(",
"stat",
".",
"sum2",
",",
"\",\"",
")",
"imgdict",
"[",
"'mean'",
"]",
"=",
"List2String",
"(",
"stat",
".",
"mean",
",",
"\",\"",
")",
"imgdict",
"[",
"'median'",
"]",
"=",
"List2String",
"(",
"stat",
".",
"median",
",",
"\",\"",
")",
"imgdict",
"[",
"'rms'",
"]",
"=",
"List2String",
"(",
"stat",
".",
"rms",
",",
"\",\"",
")",
"imgdict",
"[",
"'var'",
"]",
"=",
"List2String",
"(",
"stat",
".",
"var",
",",
"\",\"",
")",
"imgdict",
"[",
"'stddev'",
"]",
"=",
"List2String",
"(",
"stat",
".",
"stddev",
",",
"\",\"",
")",
"exif_data",
"=",
"get_exif_data",
"(",
"img",
")",
"print",
"(",
"'exif_data = '",
",",
"exif_data",
")",
"(",
"lat",
",",
"lon",
")",
"=",
"get_lat_lon",
"(",
"exif_data",
")",
"print",
"(",
"'(lat, lon)'",
",",
"(",
"lat",
",",
"lon",
")",
")",
"imgdict",
"[",
"'lat'",
"]",
"=",
"str",
"(",
"lat",
")",
"imgdict",
"[",
"'lon'",
"]",
"=",
"str",
"(",
"lon",
")",
"except",
"Exception",
"as",
"ex",
":",
"print",
"(",
"'problem reading image file metadata in '",
",",
"fname",
",",
"str",
"(",
"ex",
")",
")",
"imgdict",
"[",
"'lat'",
"]",
"=",
"'ERROR'",
"imgdict",
"[",
"'lon'",
"]",
"=",
"'ERROR'",
"return",
"imgdict"
] | Gets all metadata and puts into dictionary | [
"Gets",
"all",
"metadata",
"and",
"puts",
"into",
"dictionary"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/image_tools.py#L198-L235 | train |
acutesoftware/AIKIF | aikif/toolbox/image_tools.py | get_metadata_as_csv | def get_metadata_as_csv(fname):
""" Gets all metadata and puts into CSV format """
q = chr(34)
d = ","
res = q + fname + q + d
res = res + q + os.path.basename(fname) + q + d
res = res + q + os.path.dirname(fname) + q + d
try:
res = res + q + str(os.path.getsize(fname)) + q + d
img = Image.open(fname)
# get the image's width and height in pixels
width, height = img.size
res = res + q + str(width) + q + d
res = res + q + str(height) + q + d
res = res + q + str(img.format) + q + d
res = res + q + str(img.palette) + q + d
stat = ImageStat.Stat(img)
#print(fname, width, height)
#res = res + q + str(stat.extrema) + q + d
res = res + q + List2String(stat.count, ",") + q + d
res = res + q + List2String(stat.sum, ",") + q + d
res = res + q + List2String(stat.sum2, ",") + q + d
res = res + q + List2String(stat.mean, ",") + q + d
res = res + q + List2String(stat.median, ",") + q + d
res = res + q + List2String(stat.rms, ",") + q + d
res = res + q + List2String(stat.var, ",") + q + d
res = res + q + List2String(stat.stddev, ",") + q + d
exif_data = get_exif_data(img)
(lat, lon) = get_lat_lon(exif_data)
res = res + q + str(lat) + q + d
res = res + q + str(lon) + q + d
except Exception as ex:
print('problem reading image file metadata in ', fname, str(ex))
return res | python | def get_metadata_as_csv(fname):
""" Gets all metadata and puts into CSV format """
q = chr(34)
d = ","
res = q + fname + q + d
res = res + q + os.path.basename(fname) + q + d
res = res + q + os.path.dirname(fname) + q + d
try:
res = res + q + str(os.path.getsize(fname)) + q + d
img = Image.open(fname)
# get the image's width and height in pixels
width, height = img.size
res = res + q + str(width) + q + d
res = res + q + str(height) + q + d
res = res + q + str(img.format) + q + d
res = res + q + str(img.palette) + q + d
stat = ImageStat.Stat(img)
#print(fname, width, height)
#res = res + q + str(stat.extrema) + q + d
res = res + q + List2String(stat.count, ",") + q + d
res = res + q + List2String(stat.sum, ",") + q + d
res = res + q + List2String(stat.sum2, ",") + q + d
res = res + q + List2String(stat.mean, ",") + q + d
res = res + q + List2String(stat.median, ",") + q + d
res = res + q + List2String(stat.rms, ",") + q + d
res = res + q + List2String(stat.var, ",") + q + d
res = res + q + List2String(stat.stddev, ",") + q + d
exif_data = get_exif_data(img)
(lat, lon) = get_lat_lon(exif_data)
res = res + q + str(lat) + q + d
res = res + q + str(lon) + q + d
except Exception as ex:
print('problem reading image file metadata in ', fname, str(ex))
return res | [
"def",
"get_metadata_as_csv",
"(",
"fname",
")",
":",
"q",
"=",
"chr",
"(",
"34",
")",
"d",
"=",
"\",\"",
"res",
"=",
"q",
"+",
"fname",
"+",
"q",
"+",
"d",
"res",
"=",
"res",
"+",
"q",
"+",
"os",
".",
"path",
".",
"basename",
"(",
"fname",
")",
"+",
"q",
"+",
"d",
"res",
"=",
"res",
"+",
"q",
"+",
"os",
".",
"path",
".",
"dirname",
"(",
"fname",
")",
"+",
"q",
"+",
"d",
"try",
":",
"res",
"=",
"res",
"+",
"q",
"+",
"str",
"(",
"os",
".",
"path",
".",
"getsize",
"(",
"fname",
")",
")",
"+",
"q",
"+",
"d",
"img",
"=",
"Image",
".",
"open",
"(",
"fname",
")",
"width",
",",
"height",
"=",
"img",
".",
"size",
"res",
"=",
"res",
"+",
"q",
"+",
"str",
"(",
"width",
")",
"+",
"q",
"+",
"d",
"res",
"=",
"res",
"+",
"q",
"+",
"str",
"(",
"height",
")",
"+",
"q",
"+",
"d",
"res",
"=",
"res",
"+",
"q",
"+",
"str",
"(",
"img",
".",
"format",
")",
"+",
"q",
"+",
"d",
"res",
"=",
"res",
"+",
"q",
"+",
"str",
"(",
"img",
".",
"palette",
")",
"+",
"q",
"+",
"d",
"stat",
"=",
"ImageStat",
".",
"Stat",
"(",
"img",
")",
"res",
"=",
"res",
"+",
"q",
"+",
"List2String",
"(",
"stat",
".",
"count",
",",
"\",\"",
")",
"+",
"q",
"+",
"d",
"res",
"=",
"res",
"+",
"q",
"+",
"List2String",
"(",
"stat",
".",
"sum",
",",
"\",\"",
")",
"+",
"q",
"+",
"d",
"res",
"=",
"res",
"+",
"q",
"+",
"List2String",
"(",
"stat",
".",
"sum2",
",",
"\",\"",
")",
"+",
"q",
"+",
"d",
"res",
"=",
"res",
"+",
"q",
"+",
"List2String",
"(",
"stat",
".",
"mean",
",",
"\",\"",
")",
"+",
"q",
"+",
"d",
"res",
"=",
"res",
"+",
"q",
"+",
"List2String",
"(",
"stat",
".",
"median",
",",
"\",\"",
")",
"+",
"q",
"+",
"d",
"res",
"=",
"res",
"+",
"q",
"+",
"List2String",
"(",
"stat",
".",
"rms",
",",
"\",\"",
")",
"+",
"q",
"+",
"d",
"res",
"=",
"res",
"+",
"q",
"+",
"List2String",
"(",
"stat",
".",
"var",
",",
"\",\"",
")",
"+",
"q",
"+",
"d",
"res",
"=",
"res",
"+",
"q",
"+",
"List2String",
"(",
"stat",
".",
"stddev",
",",
"\",\"",
")",
"+",
"q",
"+",
"d",
"exif_data",
"=",
"get_exif_data",
"(",
"img",
")",
"(",
"lat",
",",
"lon",
")",
"=",
"get_lat_lon",
"(",
"exif_data",
")",
"res",
"=",
"res",
"+",
"q",
"+",
"str",
"(",
"lat",
")",
"+",
"q",
"+",
"d",
"res",
"=",
"res",
"+",
"q",
"+",
"str",
"(",
"lon",
")",
"+",
"q",
"+",
"d",
"except",
"Exception",
"as",
"ex",
":",
"print",
"(",
"'problem reading image file metadata in '",
",",
"fname",
",",
"str",
"(",
"ex",
")",
")",
"return",
"res"
] | Gets all metadata and puts into CSV format | [
"Gets",
"all",
"metadata",
"and",
"puts",
"into",
"CSV",
"format"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/image_tools.py#L237-L271 | train |
Subsets and Splits