repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
acutesoftware/AIKIF | aikif/toolbox/image_tools.py | add_text_to_image | def add_text_to_image(fname, txt, opFilename):
""" convert an image by adding text """
ft = ImageFont.load("T://user//dev//src//python//_AS_LIB//timR24.pil")
#wh = ft.getsize(txt)
print("Adding text ", txt, " to ", fname, " pixels wide to file " , opFilename)
im = Image.open(fname)
draw = ImageDraw.Draw(im)
draw.text((0, 0), txt, fill=(0, 0, 0), font=ft)
del draw
im.save(opFilename) | python | def add_text_to_image(fname, txt, opFilename):
""" convert an image by adding text """
ft = ImageFont.load("T://user//dev//src//python//_AS_LIB//timR24.pil")
#wh = ft.getsize(txt)
print("Adding text ", txt, " to ", fname, " pixels wide to file " , opFilename)
im = Image.open(fname)
draw = ImageDraw.Draw(im)
draw.text((0, 0), txt, fill=(0, 0, 0), font=ft)
del draw
im.save(opFilename) | [
"def",
"add_text_to_image",
"(",
"fname",
",",
"txt",
",",
"opFilename",
")",
":",
"ft",
"=",
"ImageFont",
".",
"load",
"(",
"\"T://user//dev//src//python//_AS_LIB//timR24.pil\"",
")",
"print",
"(",
"\"Adding text \"",
",",
"txt",
",",
"\" to \"",
",",
"fname",
",",
"\" pixels wide to file \"",
",",
"opFilename",
")",
"im",
"=",
"Image",
".",
"open",
"(",
"fname",
")",
"draw",
"=",
"ImageDraw",
".",
"Draw",
"(",
"im",
")",
"draw",
".",
"text",
"(",
"(",
"0",
",",
"0",
")",
",",
"txt",
",",
"fill",
"=",
"(",
"0",
",",
"0",
",",
"0",
")",
",",
"font",
"=",
"ft",
")",
"del",
"draw",
"im",
".",
"save",
"(",
"opFilename",
")"
] | convert an image by adding text | [
"convert",
"an",
"image",
"by",
"adding",
"text"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/image_tools.py#L304-L313 | train |
acutesoftware/AIKIF | aikif/toolbox/image_tools.py | add_crosshair_to_image | def add_crosshair_to_image(fname, opFilename):
""" convert an image by adding a cross hair """
im = Image.open(fname)
draw = ImageDraw.Draw(im)
draw.line((0, 0) + im.size, fill=(255, 255, 255))
draw.line((0, im.size[1], im.size[0], 0), fill=(255, 255, 255))
del draw
im.save(opFilename) | python | def add_crosshair_to_image(fname, opFilename):
""" convert an image by adding a cross hair """
im = Image.open(fname)
draw = ImageDraw.Draw(im)
draw.line((0, 0) + im.size, fill=(255, 255, 255))
draw.line((0, im.size[1], im.size[0], 0), fill=(255, 255, 255))
del draw
im.save(opFilename) | [
"def",
"add_crosshair_to_image",
"(",
"fname",
",",
"opFilename",
")",
":",
"im",
"=",
"Image",
".",
"open",
"(",
"fname",
")",
"draw",
"=",
"ImageDraw",
".",
"Draw",
"(",
"im",
")",
"draw",
".",
"line",
"(",
"(",
"0",
",",
"0",
")",
"+",
"im",
".",
"size",
",",
"fill",
"=",
"(",
"255",
",",
"255",
",",
"255",
")",
")",
"draw",
".",
"line",
"(",
"(",
"0",
",",
"im",
".",
"size",
"[",
"1",
"]",
",",
"im",
".",
"size",
"[",
"0",
"]",
",",
"0",
")",
",",
"fill",
"=",
"(",
"255",
",",
"255",
",",
"255",
")",
")",
"del",
"draw",
"im",
".",
"save",
"(",
"opFilename",
")"
] | convert an image by adding a cross hair | [
"convert",
"an",
"image",
"by",
"adding",
"a",
"cross",
"hair"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/image_tools.py#L315-L322 | train |
acutesoftware/AIKIF | aikif/toolbox/image_tools.py | filter_contour | def filter_contour(imageFile, opFile):
""" convert an image by applying a contour """
im = Image.open(imageFile)
im1 = im.filter(ImageFilter.CONTOUR)
im1.save(opFile) | python | def filter_contour(imageFile, opFile):
""" convert an image by applying a contour """
im = Image.open(imageFile)
im1 = im.filter(ImageFilter.CONTOUR)
im1.save(opFile) | [
"def",
"filter_contour",
"(",
"imageFile",
",",
"opFile",
")",
":",
"im",
"=",
"Image",
".",
"open",
"(",
"imageFile",
")",
"im1",
"=",
"im",
".",
"filter",
"(",
"ImageFilter",
".",
"CONTOUR",
")",
"im1",
".",
"save",
"(",
"opFile",
")"
] | convert an image by applying a contour | [
"convert",
"an",
"image",
"by",
"applying",
"a",
"contour"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/image_tools.py#L324-L328 | train |
acutesoftware/AIKIF | aikif/toolbox/image_tools.py | get_img_hash | def get_img_hash(image, hash_size = 8):
""" Grayscale and shrink the image in one step """
image = image.resize((hash_size + 1, hash_size), Image.ANTIALIAS, )
pixels = list(image.getdata())
#print('get_img_hash: pixels=', pixels)
# Compare adjacent pixels.
difference = []
for row in range(hash_size):
for col in range(hash_size):
pixel_left = image.getpixel((col, row))
pixel_right = image.getpixel((col + 1, row))
difference.append(pixel_left > pixel_right)
# Convert the binary array to a hexadecimal string.
decimal_value = 0
hex_string = []
for index, value in enumerate(difference):
if value:
decimal_value += 2**(index % 8)
if (index % 8) == 7:
hex_string.append(hex(decimal_value)[2:].rjust(2, '0'))
decimal_value = 0
return ''.join(hex_string) | python | def get_img_hash(image, hash_size = 8):
""" Grayscale and shrink the image in one step """
image = image.resize((hash_size + 1, hash_size), Image.ANTIALIAS, )
pixels = list(image.getdata())
#print('get_img_hash: pixels=', pixels)
# Compare adjacent pixels.
difference = []
for row in range(hash_size):
for col in range(hash_size):
pixel_left = image.getpixel((col, row))
pixel_right = image.getpixel((col + 1, row))
difference.append(pixel_left > pixel_right)
# Convert the binary array to a hexadecimal string.
decimal_value = 0
hex_string = []
for index, value in enumerate(difference):
if value:
decimal_value += 2**(index % 8)
if (index % 8) == 7:
hex_string.append(hex(decimal_value)[2:].rjust(2, '0'))
decimal_value = 0
return ''.join(hex_string) | [
"def",
"get_img_hash",
"(",
"image",
",",
"hash_size",
"=",
"8",
")",
":",
"image",
"=",
"image",
".",
"resize",
"(",
"(",
"hash_size",
"+",
"1",
",",
"hash_size",
")",
",",
"Image",
".",
"ANTIALIAS",
",",
")",
"pixels",
"=",
"list",
"(",
"image",
".",
"getdata",
"(",
")",
")",
"difference",
"=",
"[",
"]",
"for",
"row",
"in",
"range",
"(",
"hash_size",
")",
":",
"for",
"col",
"in",
"range",
"(",
"hash_size",
")",
":",
"pixel_left",
"=",
"image",
".",
"getpixel",
"(",
"(",
"col",
",",
"row",
")",
")",
"pixel_right",
"=",
"image",
".",
"getpixel",
"(",
"(",
"col",
"+",
"1",
",",
"row",
")",
")",
"difference",
".",
"append",
"(",
"pixel_left",
">",
"pixel_right",
")",
"decimal_value",
"=",
"0",
"hex_string",
"=",
"[",
"]",
"for",
"index",
",",
"value",
"in",
"enumerate",
"(",
"difference",
")",
":",
"if",
"value",
":",
"decimal_value",
"+=",
"2",
"**",
"(",
"index",
"%",
"8",
")",
"if",
"(",
"index",
"%",
"8",
")",
"==",
"7",
":",
"hex_string",
".",
"append",
"(",
"hex",
"(",
"decimal_value",
")",
"[",
"2",
":",
"]",
".",
"rjust",
"(",
"2",
",",
"'0'",
")",
")",
"decimal_value",
"=",
"0",
"return",
"''",
".",
"join",
"(",
"hex_string",
")"
] | Grayscale and shrink the image in one step | [
"Grayscale",
"and",
"shrink",
"the",
"image",
"in",
"one",
"step"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/image_tools.py#L374-L400 | train |
acutesoftware/AIKIF | aikif/toolbox/image_tools.py | load_image | def load_image(fname):
""" read an image from file - PIL doesnt close nicely """
with open(fname, "rb") as f:
i = Image.open(fname)
#i.load()
return i | python | def load_image(fname):
""" read an image from file - PIL doesnt close nicely """
with open(fname, "rb") as f:
i = Image.open(fname)
#i.load()
return i | [
"def",
"load_image",
"(",
"fname",
")",
":",
"with",
"open",
"(",
"fname",
",",
"\"rb\"",
")",
"as",
"f",
":",
"i",
"=",
"Image",
".",
"open",
"(",
"fname",
")",
"return",
"i"
] | read an image from file - PIL doesnt close nicely | [
"read",
"an",
"image",
"from",
"file",
"-",
"PIL",
"doesnt",
"close",
"nicely"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/image_tools.py#L402-L407 | train |
acutesoftware/AIKIF | aikif/toolbox/image_tools.py | dump_img | def dump_img(fname):
""" output the image as text """
img = Image.open(fname)
width, _ = img.size
txt = ''
pixels = list(img.getdata())
for col in range(width):
txt += str(pixels[col:col+width])
return txt | python | def dump_img(fname):
""" output the image as text """
img = Image.open(fname)
width, _ = img.size
txt = ''
pixels = list(img.getdata())
for col in range(width):
txt += str(pixels[col:col+width])
return txt | [
"def",
"dump_img",
"(",
"fname",
")",
":",
"img",
"=",
"Image",
".",
"open",
"(",
"fname",
")",
"width",
",",
"_",
"=",
"img",
".",
"size",
"txt",
"=",
"''",
"pixels",
"=",
"list",
"(",
"img",
".",
"getdata",
"(",
")",
")",
"for",
"col",
"in",
"range",
"(",
"width",
")",
":",
"txt",
"+=",
"str",
"(",
"pixels",
"[",
"col",
":",
"col",
"+",
"width",
"]",
")",
"return",
"txt"
] | output the image as text | [
"output",
"the",
"image",
"as",
"text"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/image_tools.py#L409-L417 | train |
mpg-age-bioinformatics/AGEpy | AGEpy/plots.py | NormInt | def NormInt(df,sampleA,sampleB):
"""
Normalizes intensities of a gene in two samples
:param df: dataframe output of GetData()
:param sampleA: column header of sample A
:param sampleB: column header of sample B
:returns: normalized intensities
"""
c1=df[sampleA]
c2=df[sampleB]
return np.log10(np.sqrt(c1*c2)) | python | def NormInt(df,sampleA,sampleB):
"""
Normalizes intensities of a gene in two samples
:param df: dataframe output of GetData()
:param sampleA: column header of sample A
:param sampleB: column header of sample B
:returns: normalized intensities
"""
c1=df[sampleA]
c2=df[sampleB]
return np.log10(np.sqrt(c1*c2)) | [
"def",
"NormInt",
"(",
"df",
",",
"sampleA",
",",
"sampleB",
")",
":",
"c1",
"=",
"df",
"[",
"sampleA",
"]",
"c2",
"=",
"df",
"[",
"sampleB",
"]",
"return",
"np",
".",
"log10",
"(",
"np",
".",
"sqrt",
"(",
"c1",
"*",
"c2",
")",
")"
] | Normalizes intensities of a gene in two samples
:param df: dataframe output of GetData()
:param sampleA: column header of sample A
:param sampleB: column header of sample B
:returns: normalized intensities | [
"Normalizes",
"intensities",
"of",
"a",
"gene",
"in",
"two",
"samples"
] | 887808a7a2c1504f39ce8d8cb36c15c1721cd29f | https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/plots.py#L341-L354 | train |
Nachtfeuer/pipeline | examples/python/primes/demo/primes.py | is_prime | def is_prime(number):
"""
Testing given number to be a prime.
>>> [n for n in range(100+1) if is_prime(n)]
[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97]
"""
if number < 2:
return False
if number % 2 == 0:
return number == 2
limit = int(math.sqrt(number))
for divisor in range(3, limit + 1, 2):
if number % divisor == 0:
return False
return True | python | def is_prime(number):
"""
Testing given number to be a prime.
>>> [n for n in range(100+1) if is_prime(n)]
[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97]
"""
if number < 2:
return False
if number % 2 == 0:
return number == 2
limit = int(math.sqrt(number))
for divisor in range(3, limit + 1, 2):
if number % divisor == 0:
return False
return True | [
"def",
"is_prime",
"(",
"number",
")",
":",
"if",
"number",
"<",
"2",
":",
"return",
"False",
"if",
"number",
"%",
"2",
"==",
"0",
":",
"return",
"number",
"==",
"2",
"limit",
"=",
"int",
"(",
"math",
".",
"sqrt",
"(",
"number",
")",
")",
"for",
"divisor",
"in",
"range",
"(",
"3",
",",
"limit",
"+",
"1",
",",
"2",
")",
":",
"if",
"number",
"%",
"divisor",
"==",
"0",
":",
"return",
"False",
"return",
"True"
] | Testing given number to be a prime.
>>> [n for n in range(100+1) if is_prime(n)]
[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97] | [
"Testing",
"given",
"number",
"to",
"be",
"a",
"prime",
"."
] | 04ca18c4e95e4349532bb45b768206393e1f2c13 | https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/examples/python/primes/demo/primes.py#L32-L49 | train |
OpenHydrology/floodestimation | floodestimation/analysis.py | QmedAnalysis.qmed_all_methods | def qmed_all_methods(self):
"""
Returns a dict of QMED methods using all available methods.
Available methods are defined in :attr:`qmed_methods`. The returned dict keys contain the method name, e.g.
`amax_record` with value representing the corresponding QMED estimate in m³/s.
:return: dict of QMED estimates
:rtype: dict
"""
result = {}
for method in self.methods:
try:
result[method] = getattr(self, '_qmed_from_' + method)()
except:
result[method] = None
return result | python | def qmed_all_methods(self):
"""
Returns a dict of QMED methods using all available methods.
Available methods are defined in :attr:`qmed_methods`. The returned dict keys contain the method name, e.g.
`amax_record` with value representing the corresponding QMED estimate in m³/s.
:return: dict of QMED estimates
:rtype: dict
"""
result = {}
for method in self.methods:
try:
result[method] = getattr(self, '_qmed_from_' + method)()
except:
result[method] = None
return result | [
"def",
"qmed_all_methods",
"(",
"self",
")",
":",
"result",
"=",
"{",
"}",
"for",
"method",
"in",
"self",
".",
"methods",
":",
"try",
":",
"result",
"[",
"method",
"]",
"=",
"getattr",
"(",
"self",
",",
"'_qmed_from_'",
"+",
"method",
")",
"(",
")",
"except",
":",
"result",
"[",
"method",
"]",
"=",
"None",
"return",
"result"
] | Returns a dict of QMED methods using all available methods.
Available methods are defined in :attr:`qmed_methods`. The returned dict keys contain the method name, e.g.
`amax_record` with value representing the corresponding QMED estimate in m³/s.
:return: dict of QMED estimates
:rtype: dict | [
"Returns",
"a",
"dict",
"of",
"QMED",
"methods",
"using",
"all",
"available",
"methods",
"."
] | 782da7c5abd1348923129efe89fb70003ebb088c | https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/analysis.py#L169-L185 | train |
OpenHydrology/floodestimation | floodestimation/analysis.py | QmedAnalysis._qmed_from_amax_records | def _qmed_from_amax_records(self):
"""
Return QMED estimate based on annual maximum flow records.
:return: QMED in m³/s
:rtype: float
"""
valid_flows = valid_flows_array(self.catchment)
n = len(valid_flows)
if n < 2:
raise InsufficientDataError("Insufficient annual maximum flow records available for catchment {}."
.format(self.catchment.id))
return np.median(valid_flows) | python | def _qmed_from_amax_records(self):
"""
Return QMED estimate based on annual maximum flow records.
:return: QMED in m³/s
:rtype: float
"""
valid_flows = valid_flows_array(self.catchment)
n = len(valid_flows)
if n < 2:
raise InsufficientDataError("Insufficient annual maximum flow records available for catchment {}."
.format(self.catchment.id))
return np.median(valid_flows) | [
"def",
"_qmed_from_amax_records",
"(",
"self",
")",
":",
"valid_flows",
"=",
"valid_flows_array",
"(",
"self",
".",
"catchment",
")",
"n",
"=",
"len",
"(",
"valid_flows",
")",
"if",
"n",
"<",
"2",
":",
"raise",
"InsufficientDataError",
"(",
"\"Insufficient annual maximum flow records available for catchment {}.\"",
".",
"format",
"(",
"self",
".",
"catchment",
".",
"id",
")",
")",
"return",
"np",
".",
"median",
"(",
"valid_flows",
")"
] | Return QMED estimate based on annual maximum flow records.
:return: QMED in m³/s
:rtype: float | [
"Return",
"QMED",
"estimate",
"based",
"on",
"annual",
"maximum",
"flow",
"records",
"."
] | 782da7c5abd1348923129efe89fb70003ebb088c | https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/analysis.py#L187-L199 | train |
OpenHydrology/floodestimation | floodestimation/analysis.py | QmedAnalysis._pot_month_counts | def _pot_month_counts(self, pot_dataset):
"""
Return a list of 12 sets. Each sets contains the years included in the POT record period.
:param pot_dataset: POT dataset (records and meta data)
:type pot_dataset: :class:`floodestimation.entities.PotDataset`
"""
periods = pot_dataset.continuous_periods()
result = [set() for x in range(12)]
for period in periods:
year = period.start_date.year
month = period.start_date.month
while True:
# Month by month, add the year
result[month - 1].add(year)
# If at end of period, break loop
if year == period.end_date.year and month == period.end_date.month:
break
# Next month (and year if needed)
month += 1
if month == 13:
month = 1
year += 1
return result | python | def _pot_month_counts(self, pot_dataset):
"""
Return a list of 12 sets. Each sets contains the years included in the POT record period.
:param pot_dataset: POT dataset (records and meta data)
:type pot_dataset: :class:`floodestimation.entities.PotDataset`
"""
periods = pot_dataset.continuous_periods()
result = [set() for x in range(12)]
for period in periods:
year = period.start_date.year
month = period.start_date.month
while True:
# Month by month, add the year
result[month - 1].add(year)
# If at end of period, break loop
if year == period.end_date.year and month == period.end_date.month:
break
# Next month (and year if needed)
month += 1
if month == 13:
month = 1
year += 1
return result | [
"def",
"_pot_month_counts",
"(",
"self",
",",
"pot_dataset",
")",
":",
"periods",
"=",
"pot_dataset",
".",
"continuous_periods",
"(",
")",
"result",
"=",
"[",
"set",
"(",
")",
"for",
"x",
"in",
"range",
"(",
"12",
")",
"]",
"for",
"period",
"in",
"periods",
":",
"year",
"=",
"period",
".",
"start_date",
".",
"year",
"month",
"=",
"period",
".",
"start_date",
".",
"month",
"while",
"True",
":",
"result",
"[",
"month",
"-",
"1",
"]",
".",
"add",
"(",
"year",
")",
"if",
"year",
"==",
"period",
".",
"end_date",
".",
"year",
"and",
"month",
"==",
"period",
".",
"end_date",
".",
"month",
":",
"break",
"month",
"+=",
"1",
"if",
"month",
"==",
"13",
":",
"month",
"=",
"1",
"year",
"+=",
"1",
"return",
"result"
] | Return a list of 12 sets. Each sets contains the years included in the POT record period.
:param pot_dataset: POT dataset (records and meta data)
:type pot_dataset: :class:`floodestimation.entities.PotDataset` | [
"Return",
"a",
"list",
"of",
"12",
"sets",
".",
"Each",
"sets",
"contains",
"the",
"years",
"included",
"in",
"the",
"POT",
"record",
"period",
"."
] | 782da7c5abd1348923129efe89fb70003ebb088c | https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/analysis.py#L229-L252 | train |
OpenHydrology/floodestimation | floodestimation/analysis.py | QmedAnalysis._qmed_from_area | def _qmed_from_area(self):
"""
Return QMED estimate based on catchment area.
TODO: add source of method
:return: QMED in m³/s
:rtype: float
"""
try:
return 1.172 * self.catchment.descriptors.dtm_area ** self._area_exponent() # Area in km²
except (TypeError, KeyError):
raise InsufficientDataError("Catchment `descriptors` attribute must be set first.") | python | def _qmed_from_area(self):
"""
Return QMED estimate based on catchment area.
TODO: add source of method
:return: QMED in m³/s
:rtype: float
"""
try:
return 1.172 * self.catchment.descriptors.dtm_area ** self._area_exponent() # Area in km²
except (TypeError, KeyError):
raise InsufficientDataError("Catchment `descriptors` attribute must be set first.") | [
"def",
"_qmed_from_area",
"(",
"self",
")",
":",
"try",
":",
"return",
"1.172",
"*",
"self",
".",
"catchment",
".",
"descriptors",
".",
"dtm_area",
"**",
"self",
".",
"_area_exponent",
"(",
")",
"except",
"(",
"TypeError",
",",
"KeyError",
")",
":",
"raise",
"InsufficientDataError",
"(",
"\"Catchment `descriptors` attribute must be set first.\"",
")"
] | Return QMED estimate based on catchment area.
TODO: add source of method
:return: QMED in m³/s
:rtype: float | [
"Return",
"QMED",
"estimate",
"based",
"on",
"catchment",
"area",
"."
] | 782da7c5abd1348923129efe89fb70003ebb088c | https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/analysis.py#L310-L322 | train |
OpenHydrology/floodestimation | floodestimation/analysis.py | QmedAnalysis._qmed_from_descriptors_1999 | def _qmed_from_descriptors_1999(self, as_rural=False):
"""
Return QMED estimation based on FEH catchment descriptors, 1999 methodology.
Methodology source: FEH, Vol. 3, p. 14
:param as_rural: assume catchment is fully rural. Default: false.
:type as_rural: bool
:return: QMED in m³/s
:rtype: float
"""
try:
qmed_rural = 1.172 * self.catchment.descriptors.dtm_area ** self._area_exponent() \
* (self.catchment.descriptors.saar / 1000.0) ** 1.560 \
* self.catchment.descriptors.farl ** 2.642 \
* (self.catchment.descriptors.sprhost / 100.0) ** 1.211 * \
0.0198 ** self._residual_soil()
if as_rural:
return qmed_rural
else:
return qmed_rural * self.urban_adj_factor()
except (TypeError, KeyError):
raise InsufficientDataError("Catchment `descriptors` attribute must be set first.") | python | def _qmed_from_descriptors_1999(self, as_rural=False):
"""
Return QMED estimation based on FEH catchment descriptors, 1999 methodology.
Methodology source: FEH, Vol. 3, p. 14
:param as_rural: assume catchment is fully rural. Default: false.
:type as_rural: bool
:return: QMED in m³/s
:rtype: float
"""
try:
qmed_rural = 1.172 * self.catchment.descriptors.dtm_area ** self._area_exponent() \
* (self.catchment.descriptors.saar / 1000.0) ** 1.560 \
* self.catchment.descriptors.farl ** 2.642 \
* (self.catchment.descriptors.sprhost / 100.0) ** 1.211 * \
0.0198 ** self._residual_soil()
if as_rural:
return qmed_rural
else:
return qmed_rural * self.urban_adj_factor()
except (TypeError, KeyError):
raise InsufficientDataError("Catchment `descriptors` attribute must be set first.") | [
"def",
"_qmed_from_descriptors_1999",
"(",
"self",
",",
"as_rural",
"=",
"False",
")",
":",
"try",
":",
"qmed_rural",
"=",
"1.172",
"*",
"self",
".",
"catchment",
".",
"descriptors",
".",
"dtm_area",
"**",
"self",
".",
"_area_exponent",
"(",
")",
"*",
"(",
"self",
".",
"catchment",
".",
"descriptors",
".",
"saar",
"/",
"1000.0",
")",
"**",
"1.560",
"*",
"self",
".",
"catchment",
".",
"descriptors",
".",
"farl",
"**",
"2.642",
"*",
"(",
"self",
".",
"catchment",
".",
"descriptors",
".",
"sprhost",
"/",
"100.0",
")",
"**",
"1.211",
"*",
"0.0198",
"**",
"self",
".",
"_residual_soil",
"(",
")",
"if",
"as_rural",
":",
"return",
"qmed_rural",
"else",
":",
"return",
"qmed_rural",
"*",
"self",
".",
"urban_adj_factor",
"(",
")",
"except",
"(",
"TypeError",
",",
"KeyError",
")",
":",
"raise",
"InsufficientDataError",
"(",
"\"Catchment `descriptors` attribute must be set first.\"",
")"
] | Return QMED estimation based on FEH catchment descriptors, 1999 methodology.
Methodology source: FEH, Vol. 3, p. 14
:param as_rural: assume catchment is fully rural. Default: false.
:type as_rural: bool
:return: QMED in m³/s
:rtype: float | [
"Return",
"QMED",
"estimation",
"based",
"on",
"FEH",
"catchment",
"descriptors",
"1999",
"methodology",
"."
] | 782da7c5abd1348923129efe89fb70003ebb088c | https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/analysis.py#L324-L346 | train |
OpenHydrology/floodestimation | floodestimation/analysis.py | QmedAnalysis._qmed_from_descriptors_2008 | def _qmed_from_descriptors_2008(self, as_rural=False, donor_catchments=None):
"""
Return QMED estimation based on FEH catchment descriptors, 2008 methodology.
Methodology source: Science Report SC050050, p. 36
:param as_rural: assume catchment is fully rural. Default: false.
:type as rural: bool
:param donor_catchments: override donor catchment to improve QMED catchment. If `None` (default),
donor catchment will be searched automatically, if empty list, no donors will be used.
:type donor_catchments: :class:`Catchment`
:return: QMED in m³/s
:rtype: float
"""
try:
# Basis rural QMED from descriptors
lnqmed_rural = 2.1170 \
+ 0.8510 * log(self.catchment.descriptors.dtm_area) \
- 1.8734 * 1000 / self.catchment.descriptors.saar \
+ 3.4451 * log(self.catchment.descriptors.farl) \
- 3.0800 * self.catchment.descriptors.bfihost ** 2.0
qmed_rural = exp(lnqmed_rural)
# Log intermediate results
self.results_log['qmed_descr_rural'] = qmed_rural
if donor_catchments is None:
# If no donor catchments are provided, find the nearest 25
donor_catchments = self.find_donor_catchments()
if donor_catchments:
# If found multiply rural estimate with weighted adjustment factors from all donors
weights = self._vec_alpha(donor_catchments)
errors = self._vec_lnqmed_residuals(donor_catchments)
correction = np.dot(weights, errors)
lnqmed_rural += correction
qmed_rural = exp(lnqmed_rural)
# Log intermediate results
self.results_log['donors'] = donor_catchments
for i, donor in enumerate(self.results_log['donors']):
donor.weight = weights[i]
donor.factor = exp(errors[i])
self.results_log['donor_adj_factor'] = exp(correction)
self.results_log['qmed_adj_rural'] = qmed_rural
if as_rural:
return qmed_rural
else:
# Apply urbanisation adjustment
urban_adj_factor = self.urban_adj_factor()
# Log intermediate results
self.results_log['qmed_descr_urban'] = self.results_log['qmed_descr_rural'] * urban_adj_factor
return qmed_rural * urban_adj_factor
except (TypeError, KeyError):
raise InsufficientDataError("Catchment `descriptors` attribute must be set first.") | python | def _qmed_from_descriptors_2008(self, as_rural=False, donor_catchments=None):
"""
Return QMED estimation based on FEH catchment descriptors, 2008 methodology.
Methodology source: Science Report SC050050, p. 36
:param as_rural: assume catchment is fully rural. Default: false.
:type as rural: bool
:param donor_catchments: override donor catchment to improve QMED catchment. If `None` (default),
donor catchment will be searched automatically, if empty list, no donors will be used.
:type donor_catchments: :class:`Catchment`
:return: QMED in m³/s
:rtype: float
"""
try:
# Basis rural QMED from descriptors
lnqmed_rural = 2.1170 \
+ 0.8510 * log(self.catchment.descriptors.dtm_area) \
- 1.8734 * 1000 / self.catchment.descriptors.saar \
+ 3.4451 * log(self.catchment.descriptors.farl) \
- 3.0800 * self.catchment.descriptors.bfihost ** 2.0
qmed_rural = exp(lnqmed_rural)
# Log intermediate results
self.results_log['qmed_descr_rural'] = qmed_rural
if donor_catchments is None:
# If no donor catchments are provided, find the nearest 25
donor_catchments = self.find_donor_catchments()
if donor_catchments:
# If found multiply rural estimate with weighted adjustment factors from all donors
weights = self._vec_alpha(donor_catchments)
errors = self._vec_lnqmed_residuals(donor_catchments)
correction = np.dot(weights, errors)
lnqmed_rural += correction
qmed_rural = exp(lnqmed_rural)
# Log intermediate results
self.results_log['donors'] = donor_catchments
for i, donor in enumerate(self.results_log['donors']):
donor.weight = weights[i]
donor.factor = exp(errors[i])
self.results_log['donor_adj_factor'] = exp(correction)
self.results_log['qmed_adj_rural'] = qmed_rural
if as_rural:
return qmed_rural
else:
# Apply urbanisation adjustment
urban_adj_factor = self.urban_adj_factor()
# Log intermediate results
self.results_log['qmed_descr_urban'] = self.results_log['qmed_descr_rural'] * urban_adj_factor
return qmed_rural * urban_adj_factor
except (TypeError, KeyError):
raise InsufficientDataError("Catchment `descriptors` attribute must be set first.") | [
"def",
"_qmed_from_descriptors_2008",
"(",
"self",
",",
"as_rural",
"=",
"False",
",",
"donor_catchments",
"=",
"None",
")",
":",
"try",
":",
"lnqmed_rural",
"=",
"2.1170",
"+",
"0.8510",
"*",
"log",
"(",
"self",
".",
"catchment",
".",
"descriptors",
".",
"dtm_area",
")",
"-",
"1.8734",
"*",
"1000",
"/",
"self",
".",
"catchment",
".",
"descriptors",
".",
"saar",
"+",
"3.4451",
"*",
"log",
"(",
"self",
".",
"catchment",
".",
"descriptors",
".",
"farl",
")",
"-",
"3.0800",
"*",
"self",
".",
"catchment",
".",
"descriptors",
".",
"bfihost",
"**",
"2.0",
"qmed_rural",
"=",
"exp",
"(",
"lnqmed_rural",
")",
"self",
".",
"results_log",
"[",
"'qmed_descr_rural'",
"]",
"=",
"qmed_rural",
"if",
"donor_catchments",
"is",
"None",
":",
"donor_catchments",
"=",
"self",
".",
"find_donor_catchments",
"(",
")",
"if",
"donor_catchments",
":",
"weights",
"=",
"self",
".",
"_vec_alpha",
"(",
"donor_catchments",
")",
"errors",
"=",
"self",
".",
"_vec_lnqmed_residuals",
"(",
"donor_catchments",
")",
"correction",
"=",
"np",
".",
"dot",
"(",
"weights",
",",
"errors",
")",
"lnqmed_rural",
"+=",
"correction",
"qmed_rural",
"=",
"exp",
"(",
"lnqmed_rural",
")",
"self",
".",
"results_log",
"[",
"'donors'",
"]",
"=",
"donor_catchments",
"for",
"i",
",",
"donor",
"in",
"enumerate",
"(",
"self",
".",
"results_log",
"[",
"'donors'",
"]",
")",
":",
"donor",
".",
"weight",
"=",
"weights",
"[",
"i",
"]",
"donor",
".",
"factor",
"=",
"exp",
"(",
"errors",
"[",
"i",
"]",
")",
"self",
".",
"results_log",
"[",
"'donor_adj_factor'",
"]",
"=",
"exp",
"(",
"correction",
")",
"self",
".",
"results_log",
"[",
"'qmed_adj_rural'",
"]",
"=",
"qmed_rural",
"if",
"as_rural",
":",
"return",
"qmed_rural",
"else",
":",
"urban_adj_factor",
"=",
"self",
".",
"urban_adj_factor",
"(",
")",
"self",
".",
"results_log",
"[",
"'qmed_descr_urban'",
"]",
"=",
"self",
".",
"results_log",
"[",
"'qmed_descr_rural'",
"]",
"*",
"urban_adj_factor",
"return",
"qmed_rural",
"*",
"urban_adj_factor",
"except",
"(",
"TypeError",
",",
"KeyError",
")",
":",
"raise",
"InsufficientDataError",
"(",
"\"Catchment `descriptors` attribute must be set first.\"",
")"
] | Return QMED estimation based on FEH catchment descriptors, 2008 methodology.
Methodology source: Science Report SC050050, p. 36
:param as_rural: assume catchment is fully rural. Default: false.
:type as rural: bool
:param donor_catchments: override donor catchment to improve QMED catchment. If `None` (default),
donor catchment will be searched automatically, if empty list, no donors will be used.
:type donor_catchments: :class:`Catchment`
:return: QMED in m³/s
:rtype: float | [
"Return",
"QMED",
"estimation",
"based",
"on",
"FEH",
"catchment",
"descriptors",
"2008",
"methodology",
"."
] | 782da7c5abd1348923129efe89fb70003ebb088c | https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/analysis.py#L359-L416 | train |
OpenHydrology/floodestimation | floodestimation/analysis.py | QmedAnalysis._pruaf | def _pruaf(self):
"""
Return percentage runoff urban adjustment factor.
Methodology source: eqn. 6, Kjeldsen 2010
"""
return 1 + 0.47 * self.catchment.descriptors.urbext(self.year) \
* self.catchment.descriptors.bfihost / (1 - self.catchment.descriptors.bfihost) | python | def _pruaf(self):
"""
Return percentage runoff urban adjustment factor.
Methodology source: eqn. 6, Kjeldsen 2010
"""
return 1 + 0.47 * self.catchment.descriptors.urbext(self.year) \
* self.catchment.descriptors.bfihost / (1 - self.catchment.descriptors.bfihost) | [
"def",
"_pruaf",
"(",
"self",
")",
":",
"return",
"1",
"+",
"0.47",
"*",
"self",
".",
"catchment",
".",
"descriptors",
".",
"urbext",
"(",
"self",
".",
"year",
")",
"*",
"self",
".",
"catchment",
".",
"descriptors",
".",
"bfihost",
"/",
"(",
"1",
"-",
"self",
".",
"catchment",
".",
"descriptors",
".",
"bfihost",
")"
] | Return percentage runoff urban adjustment factor.
Methodology source: eqn. 6, Kjeldsen 2010 | [
"Return",
"percentage",
"runoff",
"urban",
"adjustment",
"factor",
"."
] | 782da7c5abd1348923129efe89fb70003ebb088c | https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/analysis.py#L418-L425 | train |
OpenHydrology/floodestimation | floodestimation/analysis.py | QmedAnalysis._dist_corr | def _dist_corr(dist, phi1, phi2, phi3):
"""
Generic distance-decaying correlation function
:param dist: Distance between catchment centrolds in km
:type dist: float
:param phi1: Decay function parameters 1
:type phi1: float
:param phi2: Decay function parameters 2
:type phi2: float
:param phi3: Decay function parameters 3
:type phi3: float
:return: Correlation coefficient, r
:rtype: float
"""
return phi1 * exp(-phi2 * dist) + (1 - phi1) * exp(-phi3 * dist) | python | def _dist_corr(dist, phi1, phi2, phi3):
"""
Generic distance-decaying correlation function
:param dist: Distance between catchment centrolds in km
:type dist: float
:param phi1: Decay function parameters 1
:type phi1: float
:param phi2: Decay function parameters 2
:type phi2: float
:param phi3: Decay function parameters 3
:type phi3: float
:return: Correlation coefficient, r
:rtype: float
"""
return phi1 * exp(-phi2 * dist) + (1 - phi1) * exp(-phi3 * dist) | [
"def",
"_dist_corr",
"(",
"dist",
",",
"phi1",
",",
"phi2",
",",
"phi3",
")",
":",
"return",
"phi1",
"*",
"exp",
"(",
"-",
"phi2",
"*",
"dist",
")",
"+",
"(",
"1",
"-",
"phi1",
")",
"*",
"exp",
"(",
"-",
"phi3",
"*",
"dist",
")"
] | Generic distance-decaying correlation function
:param dist: Distance between catchment centrolds in km
:type dist: float
:param phi1: Decay function parameters 1
:type phi1: float
:param phi2: Decay function parameters 2
:type phi2: float
:param phi3: Decay function parameters 3
:type phi3: float
:return: Correlation coefficient, r
:rtype: float | [
"Generic",
"distance",
"-",
"decaying",
"correlation",
"function"
] | 782da7c5abd1348923129efe89fb70003ebb088c | https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/analysis.py#L443-L458 | train |
OpenHydrology/floodestimation | floodestimation/analysis.py | QmedAnalysis._vec_b | def _vec_b(self, donor_catchments):
"""
Return vector ``b`` of model error covariances to estimate weights
Methodology source: Kjeldsen, Jones and Morris, 2009, eqs 3 and 10
:param donor_catchments: Catchments to use as donors
:type donor_catchments: list of :class:`Catchment`
:return: Model error covariance vector
:rtype: :class:`numpy.ndarray`
"""
p = len(donor_catchments)
b = 0.1175 * np.ones(p)
for i in range(p):
b[i] *= self._model_error_corr(self.catchment, donor_catchments[i])
return b | python | def _vec_b(self, donor_catchments):
"""
Return vector ``b`` of model error covariances to estimate weights
Methodology source: Kjeldsen, Jones and Morris, 2009, eqs 3 and 10
:param donor_catchments: Catchments to use as donors
:type donor_catchments: list of :class:`Catchment`
:return: Model error covariance vector
:rtype: :class:`numpy.ndarray`
"""
p = len(donor_catchments)
b = 0.1175 * np.ones(p)
for i in range(p):
b[i] *= self._model_error_corr(self.catchment, donor_catchments[i])
return b | [
"def",
"_vec_b",
"(",
"self",
",",
"donor_catchments",
")",
":",
"p",
"=",
"len",
"(",
"donor_catchments",
")",
"b",
"=",
"0.1175",
"*",
"np",
".",
"ones",
"(",
"p",
")",
"for",
"i",
"in",
"range",
"(",
"p",
")",
":",
"b",
"[",
"i",
"]",
"*=",
"self",
".",
"_model_error_corr",
"(",
"self",
".",
"catchment",
",",
"donor_catchments",
"[",
"i",
"]",
")",
"return",
"b"
] | Return vector ``b`` of model error covariances to estimate weights
Methodology source: Kjeldsen, Jones and Morris, 2009, eqs 3 and 10
:param donor_catchments: Catchments to use as donors
:type donor_catchments: list of :class:`Catchment`
:return: Model error covariance vector
:rtype: :class:`numpy.ndarray` | [
"Return",
"vector",
"b",
"of",
"model",
"error",
"covariances",
"to",
"estimate",
"weights"
] | 782da7c5abd1348923129efe89fb70003ebb088c | https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/analysis.py#L492-L507 | train |
OpenHydrology/floodestimation | floodestimation/analysis.py | QmedAnalysis._beta | def _beta(catchment):
"""
Return beta, the GLO scale parameter divided by loc parameter estimated using simple regression model
Methodology source: Kjeldsen & Jones, 2009, table 2
:param catchment: Catchment to estimate beta for
:type catchment: :class:`Catchment`
:return: beta
:rtype: float
"""
lnbeta = -1.1221 \
- 0.0816 * log(catchment.descriptors.dtm_area) \
- 0.4580 * log(catchment.descriptors.saar / 1000) \
+ 0.1065 * log(catchment.descriptors.bfihost)
return exp(lnbeta) | python | def _beta(catchment):
"""
Return beta, the GLO scale parameter divided by loc parameter estimated using simple regression model
Methodology source: Kjeldsen & Jones, 2009, table 2
:param catchment: Catchment to estimate beta for
:type catchment: :class:`Catchment`
:return: beta
:rtype: float
"""
lnbeta = -1.1221 \
- 0.0816 * log(catchment.descriptors.dtm_area) \
- 0.4580 * log(catchment.descriptors.saar / 1000) \
+ 0.1065 * log(catchment.descriptors.bfihost)
return exp(lnbeta) | [
"def",
"_beta",
"(",
"catchment",
")",
":",
"lnbeta",
"=",
"-",
"1.1221",
"-",
"0.0816",
"*",
"log",
"(",
"catchment",
".",
"descriptors",
".",
"dtm_area",
")",
"-",
"0.4580",
"*",
"log",
"(",
"catchment",
".",
"descriptors",
".",
"saar",
"/",
"1000",
")",
"+",
"0.1065",
"*",
"log",
"(",
"catchment",
".",
"descriptors",
".",
"bfihost",
")",
"return",
"exp",
"(",
"lnbeta",
")"
] | Return beta, the GLO scale parameter divided by loc parameter estimated using simple regression model
Methodology source: Kjeldsen & Jones, 2009, table 2
:param catchment: Catchment to estimate beta for
:type catchment: :class:`Catchment`
:return: beta
:rtype: float | [
"Return",
"beta",
"the",
"GLO",
"scale",
"parameter",
"divided",
"by",
"loc",
"parameter",
"estimated",
"using",
"simple",
"regression",
"model"
] | 782da7c5abd1348923129efe89fb70003ebb088c | https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/analysis.py#L510-L525 | train |
OpenHydrology/floodestimation | floodestimation/analysis.py | QmedAnalysis._matrix_sigma_eta | def _matrix_sigma_eta(self, donor_catchments):
"""
Return model error coveriance matrix Sigma eta
Methodology source: Kjelsen, Jones & Morris 2014, eqs 2 and 3
:param donor_catchments: Catchments to use as donors
:type donor_catchments: list of :class:`Catchment`
:return: 2-Dimensional, symmetric covariance matrix
:rtype: :class:`numpy.ndarray`
"""
p = len(donor_catchments)
sigma = 0.1175 * np.ones((p, p))
for i in range(p):
for j in range(p):
if i != j:
sigma[i, j] *= self._model_error_corr(donor_catchments[i], donor_catchments[j])
return sigma | python | def _matrix_sigma_eta(self, donor_catchments):
"""
Return model error coveriance matrix Sigma eta
Methodology source: Kjelsen, Jones & Morris 2014, eqs 2 and 3
:param donor_catchments: Catchments to use as donors
:type donor_catchments: list of :class:`Catchment`
:return: 2-Dimensional, symmetric covariance matrix
:rtype: :class:`numpy.ndarray`
"""
p = len(donor_catchments)
sigma = 0.1175 * np.ones((p, p))
for i in range(p):
for j in range(p):
if i != j:
sigma[i, j] *= self._model_error_corr(donor_catchments[i], donor_catchments[j])
return sigma | [
"def",
"_matrix_sigma_eta",
"(",
"self",
",",
"donor_catchments",
")",
":",
"p",
"=",
"len",
"(",
"donor_catchments",
")",
"sigma",
"=",
"0.1175",
"*",
"np",
".",
"ones",
"(",
"(",
"p",
",",
"p",
")",
")",
"for",
"i",
"in",
"range",
"(",
"p",
")",
":",
"for",
"j",
"in",
"range",
"(",
"p",
")",
":",
"if",
"i",
"!=",
"j",
":",
"sigma",
"[",
"i",
",",
"j",
"]",
"*=",
"self",
".",
"_model_error_corr",
"(",
"donor_catchments",
"[",
"i",
"]",
",",
"donor_catchments",
"[",
"j",
"]",
")",
"return",
"sigma"
] | Return model error coveriance matrix Sigma eta
Methodology source: Kjelsen, Jones & Morris 2014, eqs 2 and 3
:param donor_catchments: Catchments to use as donors
:type donor_catchments: list of :class:`Catchment`
:return: 2-Dimensional, symmetric covariance matrix
:rtype: :class:`numpy.ndarray` | [
"Return",
"model",
"error",
"coveriance",
"matrix",
"Sigma",
"eta"
] | 782da7c5abd1348923129efe89fb70003ebb088c | https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/analysis.py#L527-L544 | train |
OpenHydrology/floodestimation | floodestimation/analysis.py | QmedAnalysis._matrix_sigma_eps | def _matrix_sigma_eps(self, donor_catchments):
"""
Return sampling error coveriance matrix Sigma eta
Methodology source: Kjeldsen & Jones 2009, eq 9
:param donor_catchments: Catchments to use as donors
:type donor_catchments: list of :class:`Catchment`
:return: 2-Dimensional, symmetric covariance matrix
:rtype: :class:`numpy.ndarray`
"""
p = len(donor_catchments)
sigma = np.empty((p, p))
for i in range(p):
beta_i = self._beta(donor_catchments[i])
n_i = donor_catchments[i].amax_records_end() - donor_catchments[i].amax_records_start() + 1
for j in range(p):
beta_j = self._beta(donor_catchments[j])
n_j = donor_catchments[j].amax_records_end() - donor_catchments[j].amax_records_start() + 1
rho_ij = self._lnqmed_corr(donor_catchments[i], donor_catchments[j])
n_ij = min(donor_catchments[i].amax_records_end(), donor_catchments[j].amax_records_end()) - \
max(donor_catchments[i].amax_records_start(), donor_catchments[j].amax_records_start()) + 1
sigma[i, j] = 4 * beta_i * beta_j * n_ij / n_i / n_j * rho_ij
return sigma | python | def _matrix_sigma_eps(self, donor_catchments):
"""
Return sampling error coveriance matrix Sigma eta
Methodology source: Kjeldsen & Jones 2009, eq 9
:param donor_catchments: Catchments to use as donors
:type donor_catchments: list of :class:`Catchment`
:return: 2-Dimensional, symmetric covariance matrix
:rtype: :class:`numpy.ndarray`
"""
p = len(donor_catchments)
sigma = np.empty((p, p))
for i in range(p):
beta_i = self._beta(donor_catchments[i])
n_i = donor_catchments[i].amax_records_end() - donor_catchments[i].amax_records_start() + 1
for j in range(p):
beta_j = self._beta(donor_catchments[j])
n_j = donor_catchments[j].amax_records_end() - donor_catchments[j].amax_records_start() + 1
rho_ij = self._lnqmed_corr(donor_catchments[i], donor_catchments[j])
n_ij = min(donor_catchments[i].amax_records_end(), donor_catchments[j].amax_records_end()) - \
max(donor_catchments[i].amax_records_start(), donor_catchments[j].amax_records_start()) + 1
sigma[i, j] = 4 * beta_i * beta_j * n_ij / n_i / n_j * rho_ij
return sigma | [
"def",
"_matrix_sigma_eps",
"(",
"self",
",",
"donor_catchments",
")",
":",
"p",
"=",
"len",
"(",
"donor_catchments",
")",
"sigma",
"=",
"np",
".",
"empty",
"(",
"(",
"p",
",",
"p",
")",
")",
"for",
"i",
"in",
"range",
"(",
"p",
")",
":",
"beta_i",
"=",
"self",
".",
"_beta",
"(",
"donor_catchments",
"[",
"i",
"]",
")",
"n_i",
"=",
"donor_catchments",
"[",
"i",
"]",
".",
"amax_records_end",
"(",
")",
"-",
"donor_catchments",
"[",
"i",
"]",
".",
"amax_records_start",
"(",
")",
"+",
"1",
"for",
"j",
"in",
"range",
"(",
"p",
")",
":",
"beta_j",
"=",
"self",
".",
"_beta",
"(",
"donor_catchments",
"[",
"j",
"]",
")",
"n_j",
"=",
"donor_catchments",
"[",
"j",
"]",
".",
"amax_records_end",
"(",
")",
"-",
"donor_catchments",
"[",
"j",
"]",
".",
"amax_records_start",
"(",
")",
"+",
"1",
"rho_ij",
"=",
"self",
".",
"_lnqmed_corr",
"(",
"donor_catchments",
"[",
"i",
"]",
",",
"donor_catchments",
"[",
"j",
"]",
")",
"n_ij",
"=",
"min",
"(",
"donor_catchments",
"[",
"i",
"]",
".",
"amax_records_end",
"(",
")",
",",
"donor_catchments",
"[",
"j",
"]",
".",
"amax_records_end",
"(",
")",
")",
"-",
"max",
"(",
"donor_catchments",
"[",
"i",
"]",
".",
"amax_records_start",
"(",
")",
",",
"donor_catchments",
"[",
"j",
"]",
".",
"amax_records_start",
"(",
")",
")",
"+",
"1",
"sigma",
"[",
"i",
",",
"j",
"]",
"=",
"4",
"*",
"beta_i",
"*",
"beta_j",
"*",
"n_ij",
"/",
"n_i",
"/",
"n_j",
"*",
"rho_ij",
"return",
"sigma"
] | Return sampling error coveriance matrix Sigma eta
Methodology source: Kjeldsen & Jones 2009, eq 9
:param donor_catchments: Catchments to use as donors
:type donor_catchments: list of :class:`Catchment`
:return: 2-Dimensional, symmetric covariance matrix
:rtype: :class:`numpy.ndarray` | [
"Return",
"sampling",
"error",
"coveriance",
"matrix",
"Sigma",
"eta"
] | 782da7c5abd1348923129efe89fb70003ebb088c | https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/analysis.py#L546-L569 | train |
OpenHydrology/floodestimation | floodestimation/analysis.py | QmedAnalysis._vec_alpha | def _vec_alpha(self, donor_catchments):
"""
Return vector alpha which is the weights for donor model errors
Methodology source: Kjeldsen, Jones & Morris 2014, eq 10
:param donor_catchments: Catchments to use as donors
:type donor_catchments: list of :class:`Catchment`
:return: Vector of donor weights
:rtype: :class:`numpy.ndarray`
"""
return np.dot(linalg.inv(self._matrix_omega(donor_catchments)), self._vec_b(donor_catchments)) | python | def _vec_alpha(self, donor_catchments):
"""
Return vector alpha which is the weights for donor model errors
Methodology source: Kjeldsen, Jones & Morris 2014, eq 10
:param donor_catchments: Catchments to use as donors
:type donor_catchments: list of :class:`Catchment`
:return: Vector of donor weights
:rtype: :class:`numpy.ndarray`
"""
return np.dot(linalg.inv(self._matrix_omega(donor_catchments)), self._vec_b(donor_catchments)) | [
"def",
"_vec_alpha",
"(",
"self",
",",
"donor_catchments",
")",
":",
"return",
"np",
".",
"dot",
"(",
"linalg",
".",
"inv",
"(",
"self",
".",
"_matrix_omega",
"(",
"donor_catchments",
")",
")",
",",
"self",
".",
"_vec_b",
"(",
"donor_catchments",
")",
")"
] | Return vector alpha which is the weights for donor model errors
Methodology source: Kjeldsen, Jones & Morris 2014, eq 10
:param donor_catchments: Catchments to use as donors
:type donor_catchments: list of :class:`Catchment`
:return: Vector of donor weights
:rtype: :class:`numpy.ndarray` | [
"Return",
"vector",
"alpha",
"which",
"is",
"the",
"weights",
"for",
"donor",
"model",
"errors"
] | 782da7c5abd1348923129efe89fb70003ebb088c | https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/analysis.py#L574-L585 | train |
OpenHydrology/floodestimation | floodestimation/analysis.py | QmedAnalysis.find_donor_catchments | def find_donor_catchments(self, limit=6, dist_limit=500):
"""
Return a suitable donor catchment to improve a QMED estimate based on catchment descriptors alone.
:param limit: maximum number of catchments to return. Default: 6. Set to `None` to return all available
catchments.
:type limit: int
:param dist_limit: maximum distance in km. between subject and donor catchment. Default: 500 km. Increasing the
maximum distance will increase computation time!
:type dist_limit: float or int
:return: list of nearby catchments
:rtype: :class:`floodestimation.entities.Catchment`
"""
if self.gauged_catchments:
return self.gauged_catchments.nearest_qmed_catchments(self.catchment, limit, dist_limit)
else:
return [] | python | def find_donor_catchments(self, limit=6, dist_limit=500):
"""
Return a suitable donor catchment to improve a QMED estimate based on catchment descriptors alone.
:param limit: maximum number of catchments to return. Default: 6. Set to `None` to return all available
catchments.
:type limit: int
:param dist_limit: maximum distance in km. between subject and donor catchment. Default: 500 km. Increasing the
maximum distance will increase computation time!
:type dist_limit: float or int
:return: list of nearby catchments
:rtype: :class:`floodestimation.entities.Catchment`
"""
if self.gauged_catchments:
return self.gauged_catchments.nearest_qmed_catchments(self.catchment, limit, dist_limit)
else:
return [] | [
"def",
"find_donor_catchments",
"(",
"self",
",",
"limit",
"=",
"6",
",",
"dist_limit",
"=",
"500",
")",
":",
"if",
"self",
".",
"gauged_catchments",
":",
"return",
"self",
".",
"gauged_catchments",
".",
"nearest_qmed_catchments",
"(",
"self",
".",
"catchment",
",",
"limit",
",",
"dist_limit",
")",
"else",
":",
"return",
"[",
"]"
] | Return a suitable donor catchment to improve a QMED estimate based on catchment descriptors alone.
:param limit: maximum number of catchments to return. Default: 6. Set to `None` to return all available
catchments.
:type limit: int
:param dist_limit: maximum distance in km. between subject and donor catchment. Default: 500 km. Increasing the
maximum distance will increase computation time!
:type dist_limit: float or int
:return: list of nearby catchments
:rtype: :class:`floodestimation.entities.Catchment` | [
"Return",
"a",
"suitable",
"donor",
"catchment",
"to",
"improve",
"a",
"QMED",
"estimate",
"based",
"on",
"catchment",
"descriptors",
"alone",
"."
] | 782da7c5abd1348923129efe89fb70003ebb088c | https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/analysis.py#L616-L632 | train |
OpenHydrology/floodestimation | floodestimation/analysis.py | GrowthCurveAnalysis._var_and_skew | def _var_and_skew(self, catchments, as_rural=False):
"""
Calculate L-CV and L-SKEW from a single catchment or a pooled group of catchments.
Methodology source: Science Report SC050050, para. 6.4.1-6.4.2
"""
if not hasattr(catchments, '__getitem__'): # In case of a single catchment
l_cv, l_skew = self._l_cv_and_skew(self.catchment)
self.results_log['donors'] = []
else:
# Prepare arrays for donor L-CVs and L-SKEWs and their weights
n = len(catchments)
l_cvs = np.empty(n)
l_skews = np.empty(n)
l_cv_weights = np.empty(n)
l_skew_weights = np.empty(n)
# Fill arrays
for index, donor in enumerate(catchments):
l_cvs[index], l_skews[index] = self._l_cv_and_skew(donor)
l_cv_weights[index] = self._l_cv_weight(donor)
l_skew_weights[index] = self._l_skew_weight(donor)
# Weighted averages of L-CV and l-SKEW
l_cv_weights /= sum(l_cv_weights) # Weights sum to 1
# Special case if the first donor is the subject catchment itself, assumed if similarity distance == 0.
if self._similarity_distance(self.catchment, catchments[0]) == 0:
l_cv_weights *= self._l_cv_weight_factor() # Reduce weights of all donor catchments
l_cv_weights[0] += 1 - sum(l_cv_weights) # But increase the weight of the subject catchment
l_cv_rural = sum(l_cv_weights * l_cvs)
l_skew_weights /= sum(l_skew_weights) # Weights sum to 1
l_skew_rural = sum(l_skew_weights * l_skews)
self.results_log['l_cv_rural'] = l_cv_rural
self.results_log['l_skew_rural'] = l_skew_rural
if as_rural:
l_cv = l_cv_rural
l_skew = l_skew_rural
else:
# Method source: eqns. 10 and 11, Kjeldsen 2010
l_cv = l_cv_rural * 0.5547 ** self.catchment.descriptors.urbext(self.year)
l_skew = (l_skew_rural + 1) * 1.1545 ** self.catchment.descriptors.urbext(self.year) - 1
# Record intermediate results (donors)
self.results_log['donors'] = catchments
total_record_length = 0
for index, donor in enumerate(self.results_log['donors']):
donor.l_cv = l_cvs[index]
donor.l_cv_weight = l_cv_weights[index]
donor.l_skew = l_skews[index]
donor.l_skew_weight = l_skew_weights[index]
total_record_length += donor.record_length
self.results_log['donors_record_length'] = total_record_length
# Record intermediate results
self.results_log['l_cv'] = l_cv
self.results_log['l_skew'] = l_skew
return l_cv, l_skew | python | def _var_and_skew(self, catchments, as_rural=False):
"""
Calculate L-CV and L-SKEW from a single catchment or a pooled group of catchments.
Methodology source: Science Report SC050050, para. 6.4.1-6.4.2
"""
if not hasattr(catchments, '__getitem__'): # In case of a single catchment
l_cv, l_skew = self._l_cv_and_skew(self.catchment)
self.results_log['donors'] = []
else:
# Prepare arrays for donor L-CVs and L-SKEWs and their weights
n = len(catchments)
l_cvs = np.empty(n)
l_skews = np.empty(n)
l_cv_weights = np.empty(n)
l_skew_weights = np.empty(n)
# Fill arrays
for index, donor in enumerate(catchments):
l_cvs[index], l_skews[index] = self._l_cv_and_skew(donor)
l_cv_weights[index] = self._l_cv_weight(donor)
l_skew_weights[index] = self._l_skew_weight(donor)
# Weighted averages of L-CV and l-SKEW
l_cv_weights /= sum(l_cv_weights) # Weights sum to 1
# Special case if the first donor is the subject catchment itself, assumed if similarity distance == 0.
if self._similarity_distance(self.catchment, catchments[0]) == 0:
l_cv_weights *= self._l_cv_weight_factor() # Reduce weights of all donor catchments
l_cv_weights[0] += 1 - sum(l_cv_weights) # But increase the weight of the subject catchment
l_cv_rural = sum(l_cv_weights * l_cvs)
l_skew_weights /= sum(l_skew_weights) # Weights sum to 1
l_skew_rural = sum(l_skew_weights * l_skews)
self.results_log['l_cv_rural'] = l_cv_rural
self.results_log['l_skew_rural'] = l_skew_rural
if as_rural:
l_cv = l_cv_rural
l_skew = l_skew_rural
else:
# Method source: eqns. 10 and 11, Kjeldsen 2010
l_cv = l_cv_rural * 0.5547 ** self.catchment.descriptors.urbext(self.year)
l_skew = (l_skew_rural + 1) * 1.1545 ** self.catchment.descriptors.urbext(self.year) - 1
# Record intermediate results (donors)
self.results_log['donors'] = catchments
total_record_length = 0
for index, donor in enumerate(self.results_log['donors']):
donor.l_cv = l_cvs[index]
donor.l_cv_weight = l_cv_weights[index]
donor.l_skew = l_skews[index]
donor.l_skew_weight = l_skew_weights[index]
total_record_length += donor.record_length
self.results_log['donors_record_length'] = total_record_length
# Record intermediate results
self.results_log['l_cv'] = l_cv
self.results_log['l_skew'] = l_skew
return l_cv, l_skew | [
"def",
"_var_and_skew",
"(",
"self",
",",
"catchments",
",",
"as_rural",
"=",
"False",
")",
":",
"if",
"not",
"hasattr",
"(",
"catchments",
",",
"'__getitem__'",
")",
":",
"l_cv",
",",
"l_skew",
"=",
"self",
".",
"_l_cv_and_skew",
"(",
"self",
".",
"catchment",
")",
"self",
".",
"results_log",
"[",
"'donors'",
"]",
"=",
"[",
"]",
"else",
":",
"n",
"=",
"len",
"(",
"catchments",
")",
"l_cvs",
"=",
"np",
".",
"empty",
"(",
"n",
")",
"l_skews",
"=",
"np",
".",
"empty",
"(",
"n",
")",
"l_cv_weights",
"=",
"np",
".",
"empty",
"(",
"n",
")",
"l_skew_weights",
"=",
"np",
".",
"empty",
"(",
"n",
")",
"for",
"index",
",",
"donor",
"in",
"enumerate",
"(",
"catchments",
")",
":",
"l_cvs",
"[",
"index",
"]",
",",
"l_skews",
"[",
"index",
"]",
"=",
"self",
".",
"_l_cv_and_skew",
"(",
"donor",
")",
"l_cv_weights",
"[",
"index",
"]",
"=",
"self",
".",
"_l_cv_weight",
"(",
"donor",
")",
"l_skew_weights",
"[",
"index",
"]",
"=",
"self",
".",
"_l_skew_weight",
"(",
"donor",
")",
"l_cv_weights",
"/=",
"sum",
"(",
"l_cv_weights",
")",
"if",
"self",
".",
"_similarity_distance",
"(",
"self",
".",
"catchment",
",",
"catchments",
"[",
"0",
"]",
")",
"==",
"0",
":",
"l_cv_weights",
"*=",
"self",
".",
"_l_cv_weight_factor",
"(",
")",
"l_cv_weights",
"[",
"0",
"]",
"+=",
"1",
"-",
"sum",
"(",
"l_cv_weights",
")",
"l_cv_rural",
"=",
"sum",
"(",
"l_cv_weights",
"*",
"l_cvs",
")",
"l_skew_weights",
"/=",
"sum",
"(",
"l_skew_weights",
")",
"l_skew_rural",
"=",
"sum",
"(",
"l_skew_weights",
"*",
"l_skews",
")",
"self",
".",
"results_log",
"[",
"'l_cv_rural'",
"]",
"=",
"l_cv_rural",
"self",
".",
"results_log",
"[",
"'l_skew_rural'",
"]",
"=",
"l_skew_rural",
"if",
"as_rural",
":",
"l_cv",
"=",
"l_cv_rural",
"l_skew",
"=",
"l_skew_rural",
"else",
":",
"l_cv",
"=",
"l_cv_rural",
"*",
"0.5547",
"**",
"self",
".",
"catchment",
".",
"descriptors",
".",
"urbext",
"(",
"self",
".",
"year",
")",
"l_skew",
"=",
"(",
"l_skew_rural",
"+",
"1",
")",
"*",
"1.1545",
"**",
"self",
".",
"catchment",
".",
"descriptors",
".",
"urbext",
"(",
"self",
".",
"year",
")",
"-",
"1",
"self",
".",
"results_log",
"[",
"'donors'",
"]",
"=",
"catchments",
"total_record_length",
"=",
"0",
"for",
"index",
",",
"donor",
"in",
"enumerate",
"(",
"self",
".",
"results_log",
"[",
"'donors'",
"]",
")",
":",
"donor",
".",
"l_cv",
"=",
"l_cvs",
"[",
"index",
"]",
"donor",
".",
"l_cv_weight",
"=",
"l_cv_weights",
"[",
"index",
"]",
"donor",
".",
"l_skew",
"=",
"l_skews",
"[",
"index",
"]",
"donor",
".",
"l_skew_weight",
"=",
"l_skew_weights",
"[",
"index",
"]",
"total_record_length",
"+=",
"donor",
".",
"record_length",
"self",
".",
"results_log",
"[",
"'donors_record_length'",
"]",
"=",
"total_record_length",
"self",
".",
"results_log",
"[",
"'l_cv'",
"]",
"=",
"l_cv",
"self",
".",
"results_log",
"[",
"'l_skew'",
"]",
"=",
"l_skew",
"return",
"l_cv",
",",
"l_skew"
] | Calculate L-CV and L-SKEW from a single catchment or a pooled group of catchments.
Methodology source: Science Report SC050050, para. 6.4.1-6.4.2 | [
"Calculate",
"L",
"-",
"CV",
"and",
"L",
"-",
"SKEW",
"from",
"a",
"single",
"catchment",
"or",
"a",
"pooled",
"group",
"of",
"catchments",
"."
] | 782da7c5abd1348923129efe89fb70003ebb088c | https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/analysis.py#L713-L770 | train |
OpenHydrology/floodestimation | floodestimation/analysis.py | GrowthCurveAnalysis._l_cv_and_skew | def _l_cv_and_skew(self, catchment):
"""
Calculate L-CV and L-SKEW for a gauged catchment. Uses `lmoments3` library.
Methodology source: Science Report SC050050, para. 6.7.5
"""
z = self._dimensionless_flows(catchment)
l1, l2, t3 = lm.lmom_ratios(z, nmom=3)
return l2 / l1, t3 | python | def _l_cv_and_skew(self, catchment):
"""
Calculate L-CV and L-SKEW for a gauged catchment. Uses `lmoments3` library.
Methodology source: Science Report SC050050, para. 6.7.5
"""
z = self._dimensionless_flows(catchment)
l1, l2, t3 = lm.lmom_ratios(z, nmom=3)
return l2 / l1, t3 | [
"def",
"_l_cv_and_skew",
"(",
"self",
",",
"catchment",
")",
":",
"z",
"=",
"self",
".",
"_dimensionless_flows",
"(",
"catchment",
")",
"l1",
",",
"l2",
",",
"t3",
"=",
"lm",
".",
"lmom_ratios",
"(",
"z",
",",
"nmom",
"=",
"3",
")",
"return",
"l2",
"/",
"l1",
",",
"t3"
] | Calculate L-CV and L-SKEW for a gauged catchment. Uses `lmoments3` library.
Methodology source: Science Report SC050050, para. 6.7.5 | [
"Calculate",
"L",
"-",
"CV",
"and",
"L",
"-",
"SKEW",
"for",
"a",
"gauged",
"catchment",
".",
"Uses",
"lmoments3",
"library",
"."
] | 782da7c5abd1348923129efe89fb70003ebb088c | https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/analysis.py#L772-L780 | train |
OpenHydrology/floodestimation | floodestimation/analysis.py | GrowthCurveAnalysis._l_cv_weight | def _l_cv_weight(self, donor_catchment):
"""
Return L-CV weighting for a donor catchment.
Methodology source: Science Report SC050050, eqn. 6.18 and 6.22a
"""
try:
dist = donor_catchment.similarity_dist
except AttributeError:
dist = self._similarity_distance(self.catchment, donor_catchment)
b = 0.0047 * sqrt(dist) + 0.0023 / 2
c = 0.02609 / (donor_catchment.record_length - 1)
return 1 / (b + c) | python | def _l_cv_weight(self, donor_catchment):
"""
Return L-CV weighting for a donor catchment.
Methodology source: Science Report SC050050, eqn. 6.18 and 6.22a
"""
try:
dist = donor_catchment.similarity_dist
except AttributeError:
dist = self._similarity_distance(self.catchment, donor_catchment)
b = 0.0047 * sqrt(dist) + 0.0023 / 2
c = 0.02609 / (donor_catchment.record_length - 1)
return 1 / (b + c) | [
"def",
"_l_cv_weight",
"(",
"self",
",",
"donor_catchment",
")",
":",
"try",
":",
"dist",
"=",
"donor_catchment",
".",
"similarity_dist",
"except",
"AttributeError",
":",
"dist",
"=",
"self",
".",
"_similarity_distance",
"(",
"self",
".",
"catchment",
",",
"donor_catchment",
")",
"b",
"=",
"0.0047",
"*",
"sqrt",
"(",
"dist",
")",
"+",
"0.0023",
"/",
"2",
"c",
"=",
"0.02609",
"/",
"(",
"donor_catchment",
".",
"record_length",
"-",
"1",
")",
"return",
"1",
"/",
"(",
"b",
"+",
"c",
")"
] | Return L-CV weighting for a donor catchment.
Methodology source: Science Report SC050050, eqn. 6.18 and 6.22a | [
"Return",
"L",
"-",
"CV",
"weighting",
"for",
"a",
"donor",
"catchment",
"."
] | 782da7c5abd1348923129efe89fb70003ebb088c | https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/analysis.py#L782-L794 | train |
OpenHydrology/floodestimation | floodestimation/analysis.py | GrowthCurveAnalysis._l_cv_weight_factor | def _l_cv_weight_factor(self):
"""
Return multiplier for L-CV weightings in case of enhanced single site analysis.
Methodology source: Science Report SC050050, eqn. 6.15a and 6.15b
"""
b = 0.0047 * sqrt(0) + 0.0023 / 2
c = 0.02609 / (self.catchment.record_length - 1)
return c / (b + c) | python | def _l_cv_weight_factor(self):
"""
Return multiplier for L-CV weightings in case of enhanced single site analysis.
Methodology source: Science Report SC050050, eqn. 6.15a and 6.15b
"""
b = 0.0047 * sqrt(0) + 0.0023 / 2
c = 0.02609 / (self.catchment.record_length - 1)
return c / (b + c) | [
"def",
"_l_cv_weight_factor",
"(",
"self",
")",
":",
"b",
"=",
"0.0047",
"*",
"sqrt",
"(",
"0",
")",
"+",
"0.0023",
"/",
"2",
"c",
"=",
"0.02609",
"/",
"(",
"self",
".",
"catchment",
".",
"record_length",
"-",
"1",
")",
"return",
"c",
"/",
"(",
"b",
"+",
"c",
")"
] | Return multiplier for L-CV weightings in case of enhanced single site analysis.
Methodology source: Science Report SC050050, eqn. 6.15a and 6.15b | [
"Return",
"multiplier",
"for",
"L",
"-",
"CV",
"weightings",
"in",
"case",
"of",
"enhanced",
"single",
"site",
"analysis",
"."
] | 782da7c5abd1348923129efe89fb70003ebb088c | https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/analysis.py#L796-L804 | train |
OpenHydrology/floodestimation | floodestimation/analysis.py | GrowthCurveAnalysis._l_skew_weight | def _l_skew_weight(self, donor_catchment):
"""
Return L-SKEW weighting for donor catchment.
Methodology source: Science Report SC050050, eqn. 6.19 and 6.22b
"""
try:
dist = donor_catchment.similarity_dist
except AttributeError:
dist = self._similarity_distance(self.catchment, donor_catchment)
b = 0.0219 * (1 - exp(-dist / 0.2360))
c = 0.2743 / (donor_catchment.record_length - 2)
return 1 / (b + c) | python | def _l_skew_weight(self, donor_catchment):
"""
Return L-SKEW weighting for donor catchment.
Methodology source: Science Report SC050050, eqn. 6.19 and 6.22b
"""
try:
dist = donor_catchment.similarity_dist
except AttributeError:
dist = self._similarity_distance(self.catchment, donor_catchment)
b = 0.0219 * (1 - exp(-dist / 0.2360))
c = 0.2743 / (donor_catchment.record_length - 2)
return 1 / (b + c) | [
"def",
"_l_skew_weight",
"(",
"self",
",",
"donor_catchment",
")",
":",
"try",
":",
"dist",
"=",
"donor_catchment",
".",
"similarity_dist",
"except",
"AttributeError",
":",
"dist",
"=",
"self",
".",
"_similarity_distance",
"(",
"self",
".",
"catchment",
",",
"donor_catchment",
")",
"b",
"=",
"0.0219",
"*",
"(",
"1",
"-",
"exp",
"(",
"-",
"dist",
"/",
"0.2360",
")",
")",
"c",
"=",
"0.2743",
"/",
"(",
"donor_catchment",
".",
"record_length",
"-",
"2",
")",
"return",
"1",
"/",
"(",
"b",
"+",
"c",
")"
] | Return L-SKEW weighting for donor catchment.
Methodology source: Science Report SC050050, eqn. 6.19 and 6.22b | [
"Return",
"L",
"-",
"SKEW",
"weighting",
"for",
"donor",
"catchment",
"."
] | 782da7c5abd1348923129efe89fb70003ebb088c | https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/analysis.py#L806-L818 | train |
OpenHydrology/floodestimation | floodestimation/analysis.py | GrowthCurveAnalysis._growth_curve_single_site | def _growth_curve_single_site(self, distr='glo'):
"""
Return flood growth curve function based on `amax_records` from the subject catchment only.
:return: Inverse cumulative distribution function with one parameter `aep` (annual exceedance probability)
:type: :class:`.GrowthCurve`
"""
if self.catchment.amax_records:
self.donor_catchments = []
return GrowthCurve(distr, *self._var_and_skew(self.catchment))
else:
raise InsufficientDataError("Catchment's `amax_records` must be set for a single site analysis.") | python | def _growth_curve_single_site(self, distr='glo'):
"""
Return flood growth curve function based on `amax_records` from the subject catchment only.
:return: Inverse cumulative distribution function with one parameter `aep` (annual exceedance probability)
:type: :class:`.GrowthCurve`
"""
if self.catchment.amax_records:
self.donor_catchments = []
return GrowthCurve(distr, *self._var_and_skew(self.catchment))
else:
raise InsufficientDataError("Catchment's `amax_records` must be set for a single site analysis.") | [
"def",
"_growth_curve_single_site",
"(",
"self",
",",
"distr",
"=",
"'glo'",
")",
":",
"if",
"self",
".",
"catchment",
".",
"amax_records",
":",
"self",
".",
"donor_catchments",
"=",
"[",
"]",
"return",
"GrowthCurve",
"(",
"distr",
",",
"*",
"self",
".",
"_var_and_skew",
"(",
"self",
".",
"catchment",
")",
")",
"else",
":",
"raise",
"InsufficientDataError",
"(",
"\"Catchment's `amax_records` must be set for a single site analysis.\"",
")"
] | Return flood growth curve function based on `amax_records` from the subject catchment only.
:return: Inverse cumulative distribution function with one parameter `aep` (annual exceedance probability)
:type: :class:`.GrowthCurve` | [
"Return",
"flood",
"growth",
"curve",
"function",
"based",
"on",
"amax_records",
"from",
"the",
"subject",
"catchment",
"only",
"."
] | 782da7c5abd1348923129efe89fb70003ebb088c | https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/analysis.py#L820-L831 | train |
OpenHydrology/floodestimation | floodestimation/analysis.py | GrowthCurveAnalysis._growth_curve_pooling_group | def _growth_curve_pooling_group(self, distr='glo', as_rural=False):
"""
Return flood growth curve function based on `amax_records` from a pooling group.
:return: Inverse cumulative distribution function with one parameter `aep` (annual exceedance probability)
:type: :class:`.GrowthCurve`
:param as_rural: assume catchment is fully rural. Default: false.
:type as rural: bool
"""
if not self.donor_catchments:
self.find_donor_catchments()
gc = GrowthCurve(distr, *self._var_and_skew(self.donor_catchments))
# Record intermediate results
self.results_log['distr_name'] = distr.upper()
self.results_log['distr_params'] = gc.params
return gc | python | def _growth_curve_pooling_group(self, distr='glo', as_rural=False):
"""
Return flood growth curve function based on `amax_records` from a pooling group.
:return: Inverse cumulative distribution function with one parameter `aep` (annual exceedance probability)
:type: :class:`.GrowthCurve`
:param as_rural: assume catchment is fully rural. Default: false.
:type as rural: bool
"""
if not self.donor_catchments:
self.find_donor_catchments()
gc = GrowthCurve(distr, *self._var_and_skew(self.donor_catchments))
# Record intermediate results
self.results_log['distr_name'] = distr.upper()
self.results_log['distr_params'] = gc.params
return gc | [
"def",
"_growth_curve_pooling_group",
"(",
"self",
",",
"distr",
"=",
"'glo'",
",",
"as_rural",
"=",
"False",
")",
":",
"if",
"not",
"self",
".",
"donor_catchments",
":",
"self",
".",
"find_donor_catchments",
"(",
")",
"gc",
"=",
"GrowthCurve",
"(",
"distr",
",",
"*",
"self",
".",
"_var_and_skew",
"(",
"self",
".",
"donor_catchments",
")",
")",
"self",
".",
"results_log",
"[",
"'distr_name'",
"]",
"=",
"distr",
".",
"upper",
"(",
")",
"self",
".",
"results_log",
"[",
"'distr_params'",
"]",
"=",
"gc",
".",
"params",
"return",
"gc"
] | Return flood growth curve function based on `amax_records` from a pooling group.
:return: Inverse cumulative distribution function with one parameter `aep` (annual exceedance probability)
:type: :class:`.GrowthCurve`
:param as_rural: assume catchment is fully rural. Default: false.
:type as rural: bool | [
"Return",
"flood",
"growth",
"curve",
"function",
"based",
"on",
"amax_records",
"from",
"a",
"pooling",
"group",
"."
] | 782da7c5abd1348923129efe89fb70003ebb088c | https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/analysis.py#L833-L849 | train |
Nachtfeuer/pipeline | spline/tools/version.py | VersionsCheck.process | def process(self, document):
"""Logging versions of required tools."""
content = json.dumps(document)
versions = {}
versions.update({'Spline': Version(VERSION)})
versions.update(self.get_version("Bash", self.BASH_VERSION))
if content.find('"docker(container)":') >= 0 or content.find('"docker(image)":') >= 0:
versions.update(VersionsCheck.get_version("Docker", self.DOCKER_VERSION))
if content.find('"packer":') >= 0:
versions.update(VersionsCheck.get_version("Packer", self.PACKER_VERSION))
if content.find('"ansible(simple)":') >= 0:
versions.update(VersionsCheck.get_version('Ansible', self.ANSIBLE_VERSION))
return versions | python | def process(self, document):
"""Logging versions of required tools."""
content = json.dumps(document)
versions = {}
versions.update({'Spline': Version(VERSION)})
versions.update(self.get_version("Bash", self.BASH_VERSION))
if content.find('"docker(container)":') >= 0 or content.find('"docker(image)":') >= 0:
versions.update(VersionsCheck.get_version("Docker", self.DOCKER_VERSION))
if content.find('"packer":') >= 0:
versions.update(VersionsCheck.get_version("Packer", self.PACKER_VERSION))
if content.find('"ansible(simple)":') >= 0:
versions.update(VersionsCheck.get_version('Ansible', self.ANSIBLE_VERSION))
return versions | [
"def",
"process",
"(",
"self",
",",
"document",
")",
":",
"content",
"=",
"json",
".",
"dumps",
"(",
"document",
")",
"versions",
"=",
"{",
"}",
"versions",
".",
"update",
"(",
"{",
"'Spline'",
":",
"Version",
"(",
"VERSION",
")",
"}",
")",
"versions",
".",
"update",
"(",
"self",
".",
"get_version",
"(",
"\"Bash\"",
",",
"self",
".",
"BASH_VERSION",
")",
")",
"if",
"content",
".",
"find",
"(",
"'\"docker(container)\":'",
")",
">=",
"0",
"or",
"content",
".",
"find",
"(",
"'\"docker(image)\":'",
")",
">=",
"0",
":",
"versions",
".",
"update",
"(",
"VersionsCheck",
".",
"get_version",
"(",
"\"Docker\"",
",",
"self",
".",
"DOCKER_VERSION",
")",
")",
"if",
"content",
".",
"find",
"(",
"'\"packer\":'",
")",
">=",
"0",
":",
"versions",
".",
"update",
"(",
"VersionsCheck",
".",
"get_version",
"(",
"\"Packer\"",
",",
"self",
".",
"PACKER_VERSION",
")",
")",
"if",
"content",
".",
"find",
"(",
"'\"ansible(simple)\":'",
")",
">=",
"0",
":",
"versions",
".",
"update",
"(",
"VersionsCheck",
".",
"get_version",
"(",
"'Ansible'",
",",
"self",
".",
"ANSIBLE_VERSION",
")",
")",
"return",
"versions"
] | Logging versions of required tools. | [
"Logging",
"versions",
"of",
"required",
"tools",
"."
] | 04ca18c4e95e4349532bb45b768206393e1f2c13 | https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/version.py#L70-L85 | train |
Nachtfeuer/pipeline | spline/tools/version.py | VersionsCheck.get_version | def get_version(tool_name, tool_command):
"""
Get name and version of a tool defined by given command.
Args:
tool_name (str): name of the tool.
tool_command (str): Bash one line command to get the version of the tool.
Returns:
dict: tool name and version or empty when no line has been found
"""
result = {}
for line in Bash(ShellConfig(script=tool_command, internal=True)).process():
if line.find("command not found") >= 0:
VersionsCheck.LOGGER.error("Required tool '%s' not found (stopping pipeline)!", tool_name)
sys.exit(1)
else:
version = list(re.findall(r'(\d+(\.\d+)+)+', line))[0][0]
result = {tool_name: Version(str(version))}
break
return result | python | def get_version(tool_name, tool_command):
"""
Get name and version of a tool defined by given command.
Args:
tool_name (str): name of the tool.
tool_command (str): Bash one line command to get the version of the tool.
Returns:
dict: tool name and version or empty when no line has been found
"""
result = {}
for line in Bash(ShellConfig(script=tool_command, internal=True)).process():
if line.find("command not found") >= 0:
VersionsCheck.LOGGER.error("Required tool '%s' not found (stopping pipeline)!", tool_name)
sys.exit(1)
else:
version = list(re.findall(r'(\d+(\.\d+)+)+', line))[0][0]
result = {tool_name: Version(str(version))}
break
return result | [
"def",
"get_version",
"(",
"tool_name",
",",
"tool_command",
")",
":",
"result",
"=",
"{",
"}",
"for",
"line",
"in",
"Bash",
"(",
"ShellConfig",
"(",
"script",
"=",
"tool_command",
",",
"internal",
"=",
"True",
")",
")",
".",
"process",
"(",
")",
":",
"if",
"line",
".",
"find",
"(",
"\"command not found\"",
")",
">=",
"0",
":",
"VersionsCheck",
".",
"LOGGER",
".",
"error",
"(",
"\"Required tool '%s' not found (stopping pipeline)!\"",
",",
"tool_name",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"else",
":",
"version",
"=",
"list",
"(",
"re",
".",
"findall",
"(",
"r'(\\d+(\\.\\d+)+)+'",
",",
"line",
")",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"result",
"=",
"{",
"tool_name",
":",
"Version",
"(",
"str",
"(",
"version",
")",
")",
"}",
"break",
"return",
"result"
] | Get name and version of a tool defined by given command.
Args:
tool_name (str): name of the tool.
tool_command (str): Bash one line command to get the version of the tool.
Returns:
dict: tool name and version or empty when no line has been found | [
"Get",
"name",
"and",
"version",
"of",
"a",
"tool",
"defined",
"by",
"given",
"command",
"."
] | 04ca18c4e95e4349532bb45b768206393e1f2c13 | https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/version.py#L88-L108 | train |
Nachtfeuer/pipeline | spline/tools/version.py | VersionsReport.process | def process(self, versions):
"""Logging version sorted ascending by tool name."""
for tool_name in sorted(versions.keys()):
version = versions[tool_name]
self._log("Using tool '%s', %s" % (tool_name, version)) | python | def process(self, versions):
"""Logging version sorted ascending by tool name."""
for tool_name in sorted(versions.keys()):
version = versions[tool_name]
self._log("Using tool '%s', %s" % (tool_name, version)) | [
"def",
"process",
"(",
"self",
",",
"versions",
")",
":",
"for",
"tool_name",
"in",
"sorted",
"(",
"versions",
".",
"keys",
"(",
")",
")",
":",
"version",
"=",
"versions",
"[",
"tool_name",
"]",
"self",
".",
"_log",
"(",
"\"Using tool '%s', %s\"",
"%",
"(",
"tool_name",
",",
"version",
")",
")"
] | Logging version sorted ascending by tool name. | [
"Logging",
"version",
"sorted",
"ascending",
"by",
"tool",
"name",
"."
] | 04ca18c4e95e4349532bb45b768206393e1f2c13 | https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/version.py#L117-L121 | train |
nocarryr/python-dispatch | pydispatch/dispatch.py | Dispatcher.register_event | def register_event(self, *names):
"""Registers new events after instance creation
Args:
*names (str): Name or names of the events to register
"""
for name in names:
if name in self.__events:
continue
self.__events[name] = Event(name) | python | def register_event(self, *names):
"""Registers new events after instance creation
Args:
*names (str): Name or names of the events to register
"""
for name in names:
if name in self.__events:
continue
self.__events[name] = Event(name) | [
"def",
"register_event",
"(",
"self",
",",
"*",
"names",
")",
":",
"for",
"name",
"in",
"names",
":",
"if",
"name",
"in",
"self",
".",
"__events",
":",
"continue",
"self",
".",
"__events",
"[",
"name",
"]",
"=",
"Event",
"(",
"name",
")"
] | Registers new events after instance creation
Args:
*names (str): Name or names of the events to register | [
"Registers",
"new",
"events",
"after",
"instance",
"creation"
] | 7c5ca03835c922cbfdfd62772c9e560062c954c7 | https://github.com/nocarryr/python-dispatch/blob/7c5ca03835c922cbfdfd62772c9e560062c954c7/pydispatch/dispatch.py#L125-L134 | train |
nocarryr/python-dispatch | pydispatch/dispatch.py | Dispatcher.emit | def emit(self, name, *args, **kwargs):
"""Dispatches an event to any subscribed listeners
Note:
If a listener returns :obj:`False`, the event will stop dispatching to
other listeners. Any other return value is ignored.
Args:
name (str): The name of the :class:`Event` to dispatch
*args (Optional): Positional arguments to be sent to listeners
**kwargs (Optional): Keyword arguments to be sent to listeners
"""
e = self.__property_events.get(name)
if e is None:
e = self.__events[name]
return e(*args, **kwargs) | python | def emit(self, name, *args, **kwargs):
"""Dispatches an event to any subscribed listeners
Note:
If a listener returns :obj:`False`, the event will stop dispatching to
other listeners. Any other return value is ignored.
Args:
name (str): The name of the :class:`Event` to dispatch
*args (Optional): Positional arguments to be sent to listeners
**kwargs (Optional): Keyword arguments to be sent to listeners
"""
e = self.__property_events.get(name)
if e is None:
e = self.__events[name]
return e(*args, **kwargs) | [
"def",
"emit",
"(",
"self",
",",
"name",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"e",
"=",
"self",
".",
"__property_events",
".",
"get",
"(",
"name",
")",
"if",
"e",
"is",
"None",
":",
"e",
"=",
"self",
".",
"__events",
"[",
"name",
"]",
"return",
"e",
"(",
"*",
"args",
",",
"**",
"kwargs",
")"
] | Dispatches an event to any subscribed listeners
Note:
If a listener returns :obj:`False`, the event will stop dispatching to
other listeners. Any other return value is ignored.
Args:
name (str): The name of the :class:`Event` to dispatch
*args (Optional): Positional arguments to be sent to listeners
**kwargs (Optional): Keyword arguments to be sent to listeners | [
"Dispatches",
"an",
"event",
"to",
"any",
"subscribed",
"listeners"
] | 7c5ca03835c922cbfdfd62772c9e560062c954c7 | https://github.com/nocarryr/python-dispatch/blob/7c5ca03835c922cbfdfd62772c9e560062c954c7/pydispatch/dispatch.py#L236-L251 | train |
nocarryr/python-dispatch | pydispatch/dispatch.py | Dispatcher.get_dispatcher_event | def get_dispatcher_event(self, name):
"""Retrieves an Event object by name
Args:
name (str): The name of the :class:`Event` or
:class:`~pydispatch.properties.Property` object to retrieve
Returns:
The :class:`Event` instance for the event or property definition
.. versionadded:: 0.1.0
"""
e = self.__property_events.get(name)
if e is None:
e = self.__events[name]
return e | python | def get_dispatcher_event(self, name):
"""Retrieves an Event object by name
Args:
name (str): The name of the :class:`Event` or
:class:`~pydispatch.properties.Property` object to retrieve
Returns:
The :class:`Event` instance for the event or property definition
.. versionadded:: 0.1.0
"""
e = self.__property_events.get(name)
if e is None:
e = self.__events[name]
return e | [
"def",
"get_dispatcher_event",
"(",
"self",
",",
"name",
")",
":",
"e",
"=",
"self",
".",
"__property_events",
".",
"get",
"(",
"name",
")",
"if",
"e",
"is",
"None",
":",
"e",
"=",
"self",
".",
"__events",
"[",
"name",
"]",
"return",
"e"
] | Retrieves an Event object by name
Args:
name (str): The name of the :class:`Event` or
:class:`~pydispatch.properties.Property` object to retrieve
Returns:
The :class:`Event` instance for the event or property definition
.. versionadded:: 0.1.0 | [
"Retrieves",
"an",
"Event",
"object",
"by",
"name"
] | 7c5ca03835c922cbfdfd62772c9e560062c954c7 | https://github.com/nocarryr/python-dispatch/blob/7c5ca03835c922cbfdfd62772c9e560062c954c7/pydispatch/dispatch.py#L252-L267 | train |
nocarryr/python-dispatch | pydispatch/dispatch.py | Dispatcher.emission_lock | def emission_lock(self, name):
"""Holds emission of events and dispatches the last event on release
The context manager returned will store the last event data called by
:meth:`emit` and prevent callbacks until it exits. On exit, it will
dispatch the last event captured (if any)::
class Foo(Dispatcher):
_events_ = ['my_event']
def on_my_event(value):
print(value)
foo = Foo()
foo.bind(my_event=on_my_event)
with foo.emission_lock('my_event'):
foo.emit('my_event', 1)
foo.emit('my_event', 2)
>>> 2
Args:
name (str): The name of the :class:`Event` or
:class:`~pydispatch.properties.Property`
Returns:
A context manager to be used by the :keyword:`with` statement.
If available, this will also be an async context manager to be used
with the :keyword:`async with` statement (see `PEP 492`_).
Note:
The context manager is re-entrant, meaning that multiple calls to
this method within nested context scopes are possible.
.. _PEP 492: https://www.python.org/dev/peps/pep-0492/#asynchronous-context-managers-and-async-with
"""
e = self.__property_events.get(name)
if e is None:
e = self.__events[name]
return e.emission_lock | python | def emission_lock(self, name):
"""Holds emission of events and dispatches the last event on release
The context manager returned will store the last event data called by
:meth:`emit` and prevent callbacks until it exits. On exit, it will
dispatch the last event captured (if any)::
class Foo(Dispatcher):
_events_ = ['my_event']
def on_my_event(value):
print(value)
foo = Foo()
foo.bind(my_event=on_my_event)
with foo.emission_lock('my_event'):
foo.emit('my_event', 1)
foo.emit('my_event', 2)
>>> 2
Args:
name (str): The name of the :class:`Event` or
:class:`~pydispatch.properties.Property`
Returns:
A context manager to be used by the :keyword:`with` statement.
If available, this will also be an async context manager to be used
with the :keyword:`async with` statement (see `PEP 492`_).
Note:
The context manager is re-entrant, meaning that multiple calls to
this method within nested context scopes are possible.
.. _PEP 492: https://www.python.org/dev/peps/pep-0492/#asynchronous-context-managers-and-async-with
"""
e = self.__property_events.get(name)
if e is None:
e = self.__events[name]
return e.emission_lock | [
"def",
"emission_lock",
"(",
"self",
",",
"name",
")",
":",
"e",
"=",
"self",
".",
"__property_events",
".",
"get",
"(",
"name",
")",
"if",
"e",
"is",
"None",
":",
"e",
"=",
"self",
".",
"__events",
"[",
"name",
"]",
"return",
"e",
".",
"emission_lock"
] | Holds emission of events and dispatches the last event on release
The context manager returned will store the last event data called by
:meth:`emit` and prevent callbacks until it exits. On exit, it will
dispatch the last event captured (if any)::
class Foo(Dispatcher):
_events_ = ['my_event']
def on_my_event(value):
print(value)
foo = Foo()
foo.bind(my_event=on_my_event)
with foo.emission_lock('my_event'):
foo.emit('my_event', 1)
foo.emit('my_event', 2)
>>> 2
Args:
name (str): The name of the :class:`Event` or
:class:`~pydispatch.properties.Property`
Returns:
A context manager to be used by the :keyword:`with` statement.
If available, this will also be an async context manager to be used
with the :keyword:`async with` statement (see `PEP 492`_).
Note:
The context manager is re-entrant, meaning that multiple calls to
this method within nested context scopes are possible.
.. _PEP 492: https://www.python.org/dev/peps/pep-0492/#asynchronous-context-managers-and-async-with | [
"Holds",
"emission",
"of",
"events",
"and",
"dispatches",
"the",
"last",
"event",
"on",
"release"
] | 7c5ca03835c922cbfdfd62772c9e560062c954c7 | https://github.com/nocarryr/python-dispatch/blob/7c5ca03835c922cbfdfd62772c9e560062c954c7/pydispatch/dispatch.py#L268-L309 | train |
acutesoftware/AIKIF | aikif/toolbox/image_detection_tools.py | TEST | def TEST(fname):
"""
Test function to step through all functions in
order to try and identify all features on a map
This test function should be placed in a main
section later
"""
#fname = os.path.join(os.getcwd(), '..','..', # os.path.join(os.path.getcwd(), '
m = MapObject(fname, os.path.join(os.getcwd(), 'img_prog_results'))
m.add_layer(ImagePathFollow('border'))
m.add_layer(ImagePathFollow('river'))
m.add_layer(ImagePathFollow('road'))
m.add_layer(ImageArea('sea', col='Blue', density='light'))
m.add_layer(ImageArea('desert', col='Yellow', density='med'))
m.add_layer(ImageArea('forest', col='Drak Green', density='light'))
m.add_layer(ImageArea('fields', col='Green', density='light'))
m.add_layer(ImageObject('mountains'))
m.add_layer(ImageObject('trees'))
m.add_layer(ImageObject('towns')) | python | def TEST(fname):
"""
Test function to step through all functions in
order to try and identify all features on a map
This test function should be placed in a main
section later
"""
#fname = os.path.join(os.getcwd(), '..','..', # os.path.join(os.path.getcwd(), '
m = MapObject(fname, os.path.join(os.getcwd(), 'img_prog_results'))
m.add_layer(ImagePathFollow('border'))
m.add_layer(ImagePathFollow('river'))
m.add_layer(ImagePathFollow('road'))
m.add_layer(ImageArea('sea', col='Blue', density='light'))
m.add_layer(ImageArea('desert', col='Yellow', density='med'))
m.add_layer(ImageArea('forest', col='Drak Green', density='light'))
m.add_layer(ImageArea('fields', col='Green', density='light'))
m.add_layer(ImageObject('mountains'))
m.add_layer(ImageObject('trees'))
m.add_layer(ImageObject('towns')) | [
"def",
"TEST",
"(",
"fname",
")",
":",
"m",
"=",
"MapObject",
"(",
"fname",
",",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"getcwd",
"(",
")",
",",
"'img_prog_results'",
")",
")",
"m",
".",
"add_layer",
"(",
"ImagePathFollow",
"(",
"'border'",
")",
")",
"m",
".",
"add_layer",
"(",
"ImagePathFollow",
"(",
"'river'",
")",
")",
"m",
".",
"add_layer",
"(",
"ImagePathFollow",
"(",
"'road'",
")",
")",
"m",
".",
"add_layer",
"(",
"ImageArea",
"(",
"'sea'",
",",
"col",
"=",
"'Blue'",
",",
"density",
"=",
"'light'",
")",
")",
"m",
".",
"add_layer",
"(",
"ImageArea",
"(",
"'desert'",
",",
"col",
"=",
"'Yellow'",
",",
"density",
"=",
"'med'",
")",
")",
"m",
".",
"add_layer",
"(",
"ImageArea",
"(",
"'forest'",
",",
"col",
"=",
"'Drak Green'",
",",
"density",
"=",
"'light'",
")",
")",
"m",
".",
"add_layer",
"(",
"ImageArea",
"(",
"'fields'",
",",
"col",
"=",
"'Green'",
",",
"density",
"=",
"'light'",
")",
")",
"m",
".",
"add_layer",
"(",
"ImageObject",
"(",
"'mountains'",
")",
")",
"m",
".",
"add_layer",
"(",
"ImageObject",
"(",
"'trees'",
")",
")",
"m",
".",
"add_layer",
"(",
"ImageObject",
"(",
"'towns'",
")",
")"
] | Test function to step through all functions in
order to try and identify all features on a map
This test function should be placed in a main
section later | [
"Test",
"function",
"to",
"step",
"through",
"all",
"functions",
"in",
"order",
"to",
"try",
"and",
"identify",
"all",
"features",
"on",
"a",
"map",
"This",
"test",
"function",
"should",
"be",
"placed",
"in",
"a",
"main",
"section",
"later"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/image_detection_tools.py#L41-L61 | train |
acutesoftware/AIKIF | aikif/dataTools/cls_datatable.py | DataTable.describe_contents | def describe_contents(self):
""" describes various contents of data table """
print('======================================================================')
print(self)
print('Table = ', str(len(self.header)) + ' cols x ' + str(len(self.arr)) + ' rows')
print('HEADER = ', self.get_header())
print('arr = ', self.arr[0:2]) | python | def describe_contents(self):
""" describes various contents of data table """
print('======================================================================')
print(self)
print('Table = ', str(len(self.header)) + ' cols x ' + str(len(self.arr)) + ' rows')
print('HEADER = ', self.get_header())
print('arr = ', self.arr[0:2]) | [
"def",
"describe_contents",
"(",
"self",
")",
":",
"print",
"(",
"'======================================================================'",
")",
"print",
"(",
"self",
")",
"print",
"(",
"'Table = '",
",",
"str",
"(",
"len",
"(",
"self",
".",
"header",
")",
")",
"+",
"' cols x '",
"+",
"str",
"(",
"len",
"(",
"self",
".",
"arr",
")",
")",
"+",
"' rows'",
")",
"print",
"(",
"'HEADER = '",
",",
"self",
".",
"get_header",
"(",
")",
")",
"print",
"(",
"'arr = '",
",",
"self",
".",
"arr",
"[",
"0",
":",
"2",
"]",
")"
] | describes various contents of data table | [
"describes",
"various",
"contents",
"of",
"data",
"table"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/dataTools/cls_datatable.py#L68-L74 | train |
acutesoftware/AIKIF | aikif/dataTools/cls_datatable.py | DataTable.get_distinct_values_from_cols | def get_distinct_values_from_cols(self, l_col_list):
"""
returns the list of distinct combinations in a dataset
based on the columns in the list. Note that this is
currently implemented as MAX permutations of the combo
so it is not guarenteed to have values in each case.
"""
uniq_vals = []
for l_col_name in l_col_list:
#print('col_name: ' + l_col_name)
uniq_vals.append(set(self.get_col_data_by_name(l_col_name)))
#print(' unique values = ', uniq_vals)
#print(' unique values[0] = ', uniq_vals[0])
#print(' unique values[1] = ', uniq_vals[1])
if len(l_col_list) == 0:
return []
elif len(l_col_list) == 1:
return sorted([v for v in uniq_vals])
elif len(l_col_list) == 2:
res = []
res = [(a, b) for a in uniq_vals[0] for b in uniq_vals[1]]
return res
else:
print ("TODO ")
return -44 | python | def get_distinct_values_from_cols(self, l_col_list):
"""
returns the list of distinct combinations in a dataset
based on the columns in the list. Note that this is
currently implemented as MAX permutations of the combo
so it is not guarenteed to have values in each case.
"""
uniq_vals = []
for l_col_name in l_col_list:
#print('col_name: ' + l_col_name)
uniq_vals.append(set(self.get_col_data_by_name(l_col_name)))
#print(' unique values = ', uniq_vals)
#print(' unique values[0] = ', uniq_vals[0])
#print(' unique values[1] = ', uniq_vals[1])
if len(l_col_list) == 0:
return []
elif len(l_col_list) == 1:
return sorted([v for v in uniq_vals])
elif len(l_col_list) == 2:
res = []
res = [(a, b) for a in uniq_vals[0] for b in uniq_vals[1]]
return res
else:
print ("TODO ")
return -44 | [
"def",
"get_distinct_values_from_cols",
"(",
"self",
",",
"l_col_list",
")",
":",
"uniq_vals",
"=",
"[",
"]",
"for",
"l_col_name",
"in",
"l_col_list",
":",
"uniq_vals",
".",
"append",
"(",
"set",
"(",
"self",
".",
"get_col_data_by_name",
"(",
"l_col_name",
")",
")",
")",
"if",
"len",
"(",
"l_col_list",
")",
"==",
"0",
":",
"return",
"[",
"]",
"elif",
"len",
"(",
"l_col_list",
")",
"==",
"1",
":",
"return",
"sorted",
"(",
"[",
"v",
"for",
"v",
"in",
"uniq_vals",
"]",
")",
"elif",
"len",
"(",
"l_col_list",
")",
"==",
"2",
":",
"res",
"=",
"[",
"]",
"res",
"=",
"[",
"(",
"a",
",",
"b",
")",
"for",
"a",
"in",
"uniq_vals",
"[",
"0",
"]",
"for",
"b",
"in",
"uniq_vals",
"[",
"1",
"]",
"]",
"return",
"res",
"else",
":",
"print",
"(",
"\"TODO \"",
")",
"return",
"-",
"44"
] | returns the list of distinct combinations in a dataset
based on the columns in the list. Note that this is
currently implemented as MAX permutations of the combo
so it is not guarenteed to have values in each case. | [
"returns",
"the",
"list",
"of",
"distinct",
"combinations",
"in",
"a",
"dataset",
"based",
"on",
"the",
"columns",
"in",
"the",
"list",
".",
"Note",
"that",
"this",
"is",
"currently",
"implemented",
"as",
"MAX",
"permutations",
"of",
"the",
"combo",
"so",
"it",
"is",
"not",
"guarenteed",
"to",
"have",
"values",
"in",
"each",
"case",
"."
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/dataTools/cls_datatable.py#L79-L104 | train |
acutesoftware/AIKIF | aikif/dataTools/cls_datatable.py | DataTable.select_where | def select_where(self, where_col_list, where_value_list, col_name=''):
"""
selects rows from the array where col_list == val_list
"""
res = [] # list of rows to be returned
col_ids = [] # ids of the columns to check
#print('select_where : arr = ', len(self.arr), 'where_value_list = ', where_value_list)
for col_id, col in enumerate(self.header):
if col in where_col_list:
col_ids.append([col_id, col])
#print('select_where : col_ids = ', col_ids) # correctly prints [[0, 'TERM'], [2, 'ID']]
for row_num, row in enumerate(self.arr):
keep_this_row = True
#print('col_ids=', col_ids, ' row = ', row_num, row)
for ndx, where_col in enumerate(col_ids):
#print('type where_value_list[ndx] = ', type(where_value_list[ndx]))
#print('type row[where_col[0]] = ', type(row[where_col[0]]))
if row[where_col[0]] != where_value_list[ndx]:
keep_this_row = False
if keep_this_row is True:
if col_name == '':
res.append([row_num, row])
else: # extracting a single column only
l_dat = self.get_col_by_name(col_name)
if l_dat is not None:
res.append(row[l_dat])
return res | python | def select_where(self, where_col_list, where_value_list, col_name=''):
"""
selects rows from the array where col_list == val_list
"""
res = [] # list of rows to be returned
col_ids = [] # ids of the columns to check
#print('select_where : arr = ', len(self.arr), 'where_value_list = ', where_value_list)
for col_id, col in enumerate(self.header):
if col in where_col_list:
col_ids.append([col_id, col])
#print('select_where : col_ids = ', col_ids) # correctly prints [[0, 'TERM'], [2, 'ID']]
for row_num, row in enumerate(self.arr):
keep_this_row = True
#print('col_ids=', col_ids, ' row = ', row_num, row)
for ndx, where_col in enumerate(col_ids):
#print('type where_value_list[ndx] = ', type(where_value_list[ndx]))
#print('type row[where_col[0]] = ', type(row[where_col[0]]))
if row[where_col[0]] != where_value_list[ndx]:
keep_this_row = False
if keep_this_row is True:
if col_name == '':
res.append([row_num, row])
else: # extracting a single column only
l_dat = self.get_col_by_name(col_name)
if l_dat is not None:
res.append(row[l_dat])
return res | [
"def",
"select_where",
"(",
"self",
",",
"where_col_list",
",",
"where_value_list",
",",
"col_name",
"=",
"''",
")",
":",
"res",
"=",
"[",
"]",
"col_ids",
"=",
"[",
"]",
"for",
"col_id",
",",
"col",
"in",
"enumerate",
"(",
"self",
".",
"header",
")",
":",
"if",
"col",
"in",
"where_col_list",
":",
"col_ids",
".",
"append",
"(",
"[",
"col_id",
",",
"col",
"]",
")",
"for",
"row_num",
",",
"row",
"in",
"enumerate",
"(",
"self",
".",
"arr",
")",
":",
"keep_this_row",
"=",
"True",
"for",
"ndx",
",",
"where_col",
"in",
"enumerate",
"(",
"col_ids",
")",
":",
"if",
"row",
"[",
"where_col",
"[",
"0",
"]",
"]",
"!=",
"where_value_list",
"[",
"ndx",
"]",
":",
"keep_this_row",
"=",
"False",
"if",
"keep_this_row",
"is",
"True",
":",
"if",
"col_name",
"==",
"''",
":",
"res",
".",
"append",
"(",
"[",
"row_num",
",",
"row",
"]",
")",
"else",
":",
"l_dat",
"=",
"self",
".",
"get_col_by_name",
"(",
"col_name",
")",
"if",
"l_dat",
"is",
"not",
"None",
":",
"res",
".",
"append",
"(",
"row",
"[",
"l_dat",
"]",
")",
"return",
"res"
] | selects rows from the array where col_list == val_list | [
"selects",
"rows",
"from",
"the",
"array",
"where",
"col_list",
"==",
"val_list"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/dataTools/cls_datatable.py#L117-L145 | train |
acutesoftware/AIKIF | aikif/dataTools/cls_datatable.py | DataTable.update_where | def update_where(self, col, value, where_col_list, where_value_list):
"""
updates the array to set cell = value where col_list == val_list
"""
if type(col) is str:
col_ndx = self.get_col_by_name(col)
else:
col_ndx = col
#print('col_ndx = ', col_ndx )
#print("updating " + col + " to " , value, " where " , where_col_list , " = " , where_value_list)
new_arr = self.select_where(where_col_list, where_value_list)
#print('new_arr', new_arr)
for r in new_arr:
self.arr[r[0]][col_ndx] = value | python | def update_where(self, col, value, where_col_list, where_value_list):
"""
updates the array to set cell = value where col_list == val_list
"""
if type(col) is str:
col_ndx = self.get_col_by_name(col)
else:
col_ndx = col
#print('col_ndx = ', col_ndx )
#print("updating " + col + " to " , value, " where " , where_col_list , " = " , where_value_list)
new_arr = self.select_where(where_col_list, where_value_list)
#print('new_arr', new_arr)
for r in new_arr:
self.arr[r[0]][col_ndx] = value | [
"def",
"update_where",
"(",
"self",
",",
"col",
",",
"value",
",",
"where_col_list",
",",
"where_value_list",
")",
":",
"if",
"type",
"(",
"col",
")",
"is",
"str",
":",
"col_ndx",
"=",
"self",
".",
"get_col_by_name",
"(",
"col",
")",
"else",
":",
"col_ndx",
"=",
"col",
"new_arr",
"=",
"self",
".",
"select_where",
"(",
"where_col_list",
",",
"where_value_list",
")",
"for",
"r",
"in",
"new_arr",
":",
"self",
".",
"arr",
"[",
"r",
"[",
"0",
"]",
"]",
"[",
"col_ndx",
"]",
"=",
"value"
] | updates the array to set cell = value where col_list == val_list | [
"updates",
"the",
"array",
"to",
"set",
"cell",
"=",
"value",
"where",
"col_list",
"==",
"val_list"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/dataTools/cls_datatable.py#L171-L184 | train |
acutesoftware/AIKIF | aikif/dataTools/cls_datatable.py | DataTable.percentile | def percentile(self, lst_data, percent , key=lambda x:x):
""" calculates the 'num' percentile of the items in the list """
new_list = sorted(lst_data)
#print('new list = ' , new_list)
#n = float(len(lst_data))
k = (len(new_list)-1) * percent
f = math.floor(k)
c = math.ceil(k)
if f == c:
#print(key(new_list[int(k)]))
return key(new_list[int(k)])
d0 = float(key(new_list[int(f)])) * (c-k)
d1 = float(key(new_list[int(c)])) * (k-f)
return d0+d1 | python | def percentile(self, lst_data, percent , key=lambda x:x):
""" calculates the 'num' percentile of the items in the list """
new_list = sorted(lst_data)
#print('new list = ' , new_list)
#n = float(len(lst_data))
k = (len(new_list)-1) * percent
f = math.floor(k)
c = math.ceil(k)
if f == c:
#print(key(new_list[int(k)]))
return key(new_list[int(k)])
d0 = float(key(new_list[int(f)])) * (c-k)
d1 = float(key(new_list[int(c)])) * (k-f)
return d0+d1 | [
"def",
"percentile",
"(",
"self",
",",
"lst_data",
",",
"percent",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
")",
":",
"new_list",
"=",
"sorted",
"(",
"lst_data",
")",
"k",
"=",
"(",
"len",
"(",
"new_list",
")",
"-",
"1",
")",
"*",
"percent",
"f",
"=",
"math",
".",
"floor",
"(",
"k",
")",
"c",
"=",
"math",
".",
"ceil",
"(",
"k",
")",
"if",
"f",
"==",
"c",
":",
"return",
"key",
"(",
"new_list",
"[",
"int",
"(",
"k",
")",
"]",
")",
"d0",
"=",
"float",
"(",
"key",
"(",
"new_list",
"[",
"int",
"(",
"f",
")",
"]",
")",
")",
"*",
"(",
"c",
"-",
"k",
")",
"d1",
"=",
"float",
"(",
"key",
"(",
"new_list",
"[",
"int",
"(",
"c",
")",
"]",
")",
")",
"*",
"(",
"k",
"-",
"f",
")",
"return",
"d0",
"+",
"d1"
] | calculates the 'num' percentile of the items in the list | [
"calculates",
"the",
"num",
"percentile",
"of",
"the",
"items",
"in",
"the",
"list"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/dataTools/cls_datatable.py#L206-L219 | train |
acutesoftware/AIKIF | aikif/dataTools/cls_datatable.py | DataTable.save | def save(self, filename, content):
"""
default is to save a file from list of lines
"""
with open(filename, "w") as f:
if hasattr(content, '__iter__'):
f.write('\n'.join([row for row in content]))
else:
print('WRINGI CONTWETESWREWR')
f.write(str(content)) | python | def save(self, filename, content):
"""
default is to save a file from list of lines
"""
with open(filename, "w") as f:
if hasattr(content, '__iter__'):
f.write('\n'.join([row for row in content]))
else:
print('WRINGI CONTWETESWREWR')
f.write(str(content)) | [
"def",
"save",
"(",
"self",
",",
"filename",
",",
"content",
")",
":",
"with",
"open",
"(",
"filename",
",",
"\"w\"",
")",
"as",
"f",
":",
"if",
"hasattr",
"(",
"content",
",",
"'__iter__'",
")",
":",
"f",
".",
"write",
"(",
"'\\n'",
".",
"join",
"(",
"[",
"row",
"for",
"row",
"in",
"content",
"]",
")",
")",
"else",
":",
"print",
"(",
"'WRINGI CONTWETESWREWR'",
")",
"f",
".",
"write",
"(",
"str",
"(",
"content",
")",
")"
] | default is to save a file from list of lines | [
"default",
"is",
"to",
"save",
"a",
"file",
"from",
"list",
"of",
"lines"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/dataTools/cls_datatable.py#L230-L239 | train |
acutesoftware/AIKIF | aikif/dataTools/cls_datatable.py | DataTable.save_csv | def save_csv(self, filename, write_header_separately=True):
"""
save the default array as a CSV file
"""
txt = ''
#print("SAVING arr = ", self.arr)
with open(filename, "w") as f:
if write_header_separately:
f.write(','.join([c for c in self.header]) + '\n')
for row in self.arr:
#print('save_csv: saving row = ', row)
txt = ','.join([self.force_to_string(col) for col in row])
#print(txt)
f.write(txt + '\n')
f.write('\n') | python | def save_csv(self, filename, write_header_separately=True):
"""
save the default array as a CSV file
"""
txt = ''
#print("SAVING arr = ", self.arr)
with open(filename, "w") as f:
if write_header_separately:
f.write(','.join([c for c in self.header]) + '\n')
for row in self.arr:
#print('save_csv: saving row = ', row)
txt = ','.join([self.force_to_string(col) for col in row])
#print(txt)
f.write(txt + '\n')
f.write('\n') | [
"def",
"save_csv",
"(",
"self",
",",
"filename",
",",
"write_header_separately",
"=",
"True",
")",
":",
"txt",
"=",
"''",
"with",
"open",
"(",
"filename",
",",
"\"w\"",
")",
"as",
"f",
":",
"if",
"write_header_separately",
":",
"f",
".",
"write",
"(",
"','",
".",
"join",
"(",
"[",
"c",
"for",
"c",
"in",
"self",
".",
"header",
"]",
")",
"+",
"'\\n'",
")",
"for",
"row",
"in",
"self",
".",
"arr",
":",
"txt",
"=",
"','",
".",
"join",
"(",
"[",
"self",
".",
"force_to_string",
"(",
"col",
")",
"for",
"col",
"in",
"row",
"]",
")",
"f",
".",
"write",
"(",
"txt",
"+",
"'\\n'",
")",
"f",
".",
"write",
"(",
"'\\n'",
")"
] | save the default array as a CSV file | [
"save",
"the",
"default",
"array",
"as",
"a",
"CSV",
"file"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/dataTools/cls_datatable.py#L241-L256 | train |
acutesoftware/AIKIF | aikif/dataTools/cls_datatable.py | DataTable.drop | def drop(self, fname):
"""
drop the table, view or delete the file
"""
if self.dataset_type == 'file':
import os
try:
os.remove(fname)
except Exception as ex:
print('cant drop file "' + fname + '" : ' + str(ex)) | python | def drop(self, fname):
"""
drop the table, view or delete the file
"""
if self.dataset_type == 'file':
import os
try:
os.remove(fname)
except Exception as ex:
print('cant drop file "' + fname + '" : ' + str(ex)) | [
"def",
"drop",
"(",
"self",
",",
"fname",
")",
":",
"if",
"self",
".",
"dataset_type",
"==",
"'file'",
":",
"import",
"os",
"try",
":",
"os",
".",
"remove",
"(",
"fname",
")",
"except",
"Exception",
"as",
"ex",
":",
"print",
"(",
"'cant drop file \"'",
"+",
"fname",
"+",
"'\" : '",
"+",
"str",
"(",
"ex",
")",
")"
] | drop the table, view or delete the file | [
"drop",
"the",
"table",
"view",
"or",
"delete",
"the",
"file"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/dataTools/cls_datatable.py#L258-L267 | train |
acutesoftware/AIKIF | aikif/dataTools/cls_datatable.py | DataTable.get_col_data_by_name | def get_col_data_by_name(self, col_name, WHERE_Clause=''):
""" returns the values of col_name according to where """
#print('get_col_data_by_name: col_name = ', col_name, ' WHERE = ', WHERE_Clause)
col_key = self.get_col_by_name(col_name)
if col_key is None:
print('get_col_data_by_name: col_name = ', col_name, ' NOT FOUND')
return []
#print('get_col_data_by_name: col_key =', col_key)
res = []
for row in self.arr:
#print('col_key=',col_key, ' len(row)=', len(row), ' row=', row)
res.append(row[col_key]) # need to convert to int for calcs but leave as string for lookups
return res | python | def get_col_data_by_name(self, col_name, WHERE_Clause=''):
""" returns the values of col_name according to where """
#print('get_col_data_by_name: col_name = ', col_name, ' WHERE = ', WHERE_Clause)
col_key = self.get_col_by_name(col_name)
if col_key is None:
print('get_col_data_by_name: col_name = ', col_name, ' NOT FOUND')
return []
#print('get_col_data_by_name: col_key =', col_key)
res = []
for row in self.arr:
#print('col_key=',col_key, ' len(row)=', len(row), ' row=', row)
res.append(row[col_key]) # need to convert to int for calcs but leave as string for lookups
return res | [
"def",
"get_col_data_by_name",
"(",
"self",
",",
"col_name",
",",
"WHERE_Clause",
"=",
"''",
")",
":",
"col_key",
"=",
"self",
".",
"get_col_by_name",
"(",
"col_name",
")",
"if",
"col_key",
"is",
"None",
":",
"print",
"(",
"'get_col_data_by_name: col_name = '",
",",
"col_name",
",",
"' NOT FOUND'",
")",
"return",
"[",
"]",
"res",
"=",
"[",
"]",
"for",
"row",
"in",
"self",
".",
"arr",
":",
"res",
".",
"append",
"(",
"row",
"[",
"col_key",
"]",
")",
"return",
"res"
] | returns the values of col_name according to where | [
"returns",
"the",
"values",
"of",
"col_name",
"according",
"to",
"where"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/dataTools/cls_datatable.py#L311-L323 | train |
acutesoftware/AIKIF | aikif/dataTools/cls_datatable.py | DataTable.format_rst | def format_rst(self):
"""
return table in RST format
"""
res = ''
num_cols = len(self.header)
col_width = 25
for _ in range(num_cols):
res += ''.join(['=' for _ in range(col_width - 1)]) + ' '
res += '\n'
for c in self.header:
res += c.ljust(col_width)
res += '\n'
for _ in range(num_cols):
res += ''.join(['=' for _ in range(col_width - 1)]) + ' '
res += '\n'
for row in self.arr:
for c in row:
res += self.force_to_string(c).ljust(col_width)
res += '\n'
for _ in range(num_cols):
res += ''.join(['=' for _ in range(col_width - 1)]) + ' '
res += '\n'
return res | python | def format_rst(self):
"""
return table in RST format
"""
res = ''
num_cols = len(self.header)
col_width = 25
for _ in range(num_cols):
res += ''.join(['=' for _ in range(col_width - 1)]) + ' '
res += '\n'
for c in self.header:
res += c.ljust(col_width)
res += '\n'
for _ in range(num_cols):
res += ''.join(['=' for _ in range(col_width - 1)]) + ' '
res += '\n'
for row in self.arr:
for c in row:
res += self.force_to_string(c).ljust(col_width)
res += '\n'
for _ in range(num_cols):
res += ''.join(['=' for _ in range(col_width - 1)]) + ' '
res += '\n'
return res | [
"def",
"format_rst",
"(",
"self",
")",
":",
"res",
"=",
"''",
"num_cols",
"=",
"len",
"(",
"self",
".",
"header",
")",
"col_width",
"=",
"25",
"for",
"_",
"in",
"range",
"(",
"num_cols",
")",
":",
"res",
"+=",
"''",
".",
"join",
"(",
"[",
"'='",
"for",
"_",
"in",
"range",
"(",
"col_width",
"-",
"1",
")",
"]",
")",
"+",
"' '",
"res",
"+=",
"'\\n'",
"for",
"c",
"in",
"self",
".",
"header",
":",
"res",
"+=",
"c",
".",
"ljust",
"(",
"col_width",
")",
"res",
"+=",
"'\\n'",
"for",
"_",
"in",
"range",
"(",
"num_cols",
")",
":",
"res",
"+=",
"''",
".",
"join",
"(",
"[",
"'='",
"for",
"_",
"in",
"range",
"(",
"col_width",
"-",
"1",
")",
"]",
")",
"+",
"' '",
"res",
"+=",
"'\\n'",
"for",
"row",
"in",
"self",
".",
"arr",
":",
"for",
"c",
"in",
"row",
":",
"res",
"+=",
"self",
".",
"force_to_string",
"(",
"c",
")",
".",
"ljust",
"(",
"col_width",
")",
"res",
"+=",
"'\\n'",
"for",
"_",
"in",
"range",
"(",
"num_cols",
")",
":",
"res",
"+=",
"''",
".",
"join",
"(",
"[",
"'='",
"for",
"_",
"in",
"range",
"(",
"col_width",
"-",
"1",
")",
"]",
")",
"+",
"' '",
"res",
"+=",
"'\\n'",
"return",
"res"
] | return table in RST format | [
"return",
"table",
"in",
"RST",
"format"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/dataTools/cls_datatable.py#L325-L348 | train |
mpg-age-bioinformatics/AGEpy | AGEpy/homology.py | getHomoloGene | def getHomoloGene(taxfile="build_inputs/taxid_taxname",\
genefile="homologene.data",\
proteinsfile="build_inputs/all_proteins.data",\
proteinsclusterfile="build_inputs/proteins_for_clustering.data",\
baseURL="http://ftp.ncbi.nih.gov/pub/HomoloGene/current/"):
"""
Returns NBCI's Homolog Gene tables.
:param taxfile: path to local file or to baseURL/taxfile
:param genefile: path to local file or to baseURL/genefile
:param proteinsfile: path to local file or to baseURL/proteinsfile
:param proteinsclusterfile: path to local file or to baseURL/proteinsclusterfile
:param baseURL: baseURL for downloading files
:returns genedf: Homolog gene Pandas dataframe
:returns protclusdf: Pandas dataframe. Lists one protein per gene that were used for homologene clustering.
If a gene has multiple protein accessions derived from alternative splicing,
only one protein isoform that give most protein alignment to proteins in other species
was selected for clustering and it is listed in this file.
:returns proteinsdf: Pandas dataframe. Lists all proteins and their gene information.
If a gene has multple protein accessions derived from alternative splicing event,
each protein accession is list in a separate line.
"""
def getDf(inputfile):
if os.path.isfile(inputfile):
df=pd.read_table(inputfile,header=None)
else:
df = urllib2.urlopen(baseURL+inputfile)
df=df.read().split("\n")
df=[ s for s in df if len(s) > 0 ]
df=[s.split("\t") for s in df]
df=pd.DataFrame(df)
return df
taxdf=getDf(taxfile)
taxdf.set_index([0],inplace=True)
taxdi=taxdf.to_dict().get(1)
genedf=getDf(genefile)
genecols=["HID","Taxonomy ID","Gene ID","Gene Symbol","Protein gi","Protein accession"]
genedf.columns=genecols
genedf["organism"]=genedf["Taxonomy ID"].apply(lambda x:taxdi.get(x))
proteinsdf=getDf(proteinsfile)
proteinscols=["taxid","entrez GeneID","gene symbol","gene description","protein accession.ver","mrna accession.ver",\
"length of protein listed in column 5","-11) contains data about gene location on the genome",\
"starting position of gene in 0-based coordinate",\
"end position of the gene in 0-based coordinate","strand","nucleotide gi of genomic sequence where this gene is annotated"]
proteinsdf.columns=proteinscols
proteinsdf["organism"]=proteinsdf["taxid"].apply(lambda x:taxdi.get(x))
protclusdf=getDf(proteinsclusterfile)
protclustercols=["taxid","entrez GeneID","gene symbol","gene description","protein accession.ver","mrna accession.ver",\
"length of protein listed in column 5","-11) contains data about gene location on the genome",\
"starting position of gene in 0-based coordinate",\
"end position of the gene in 0-based coordinate","strand","nucleotide gi of genomic sequence where this gene is annotated"]
protclusdf.columns=proteinscols
protclusdf["organism"]=protclusdf["taxid"].apply(lambda x:taxdi.get(x))
return genedf, protclusdf, proteinsdf | python | def getHomoloGene(taxfile="build_inputs/taxid_taxname",\
genefile="homologene.data",\
proteinsfile="build_inputs/all_proteins.data",\
proteinsclusterfile="build_inputs/proteins_for_clustering.data",\
baseURL="http://ftp.ncbi.nih.gov/pub/HomoloGene/current/"):
"""
Returns NBCI's Homolog Gene tables.
:param taxfile: path to local file or to baseURL/taxfile
:param genefile: path to local file or to baseURL/genefile
:param proteinsfile: path to local file or to baseURL/proteinsfile
:param proteinsclusterfile: path to local file or to baseURL/proteinsclusterfile
:param baseURL: baseURL for downloading files
:returns genedf: Homolog gene Pandas dataframe
:returns protclusdf: Pandas dataframe. Lists one protein per gene that were used for homologene clustering.
If a gene has multiple protein accessions derived from alternative splicing,
only one protein isoform that give most protein alignment to proteins in other species
was selected for clustering and it is listed in this file.
:returns proteinsdf: Pandas dataframe. Lists all proteins and their gene information.
If a gene has multple protein accessions derived from alternative splicing event,
each protein accession is list in a separate line.
"""
def getDf(inputfile):
if os.path.isfile(inputfile):
df=pd.read_table(inputfile,header=None)
else:
df = urllib2.urlopen(baseURL+inputfile)
df=df.read().split("\n")
df=[ s for s in df if len(s) > 0 ]
df=[s.split("\t") for s in df]
df=pd.DataFrame(df)
return df
taxdf=getDf(taxfile)
taxdf.set_index([0],inplace=True)
taxdi=taxdf.to_dict().get(1)
genedf=getDf(genefile)
genecols=["HID","Taxonomy ID","Gene ID","Gene Symbol","Protein gi","Protein accession"]
genedf.columns=genecols
genedf["organism"]=genedf["Taxonomy ID"].apply(lambda x:taxdi.get(x))
proteinsdf=getDf(proteinsfile)
proteinscols=["taxid","entrez GeneID","gene symbol","gene description","protein accession.ver","mrna accession.ver",\
"length of protein listed in column 5","-11) contains data about gene location on the genome",\
"starting position of gene in 0-based coordinate",\
"end position of the gene in 0-based coordinate","strand","nucleotide gi of genomic sequence where this gene is annotated"]
proteinsdf.columns=proteinscols
proteinsdf["organism"]=proteinsdf["taxid"].apply(lambda x:taxdi.get(x))
protclusdf=getDf(proteinsclusterfile)
protclustercols=["taxid","entrez GeneID","gene symbol","gene description","protein accession.ver","mrna accession.ver",\
"length of protein listed in column 5","-11) contains data about gene location on the genome",\
"starting position of gene in 0-based coordinate",\
"end position of the gene in 0-based coordinate","strand","nucleotide gi of genomic sequence where this gene is annotated"]
protclusdf.columns=proteinscols
protclusdf["organism"]=protclusdf["taxid"].apply(lambda x:taxdi.get(x))
return genedf, protclusdf, proteinsdf | [
"def",
"getHomoloGene",
"(",
"taxfile",
"=",
"\"build_inputs/taxid_taxname\"",
",",
"genefile",
"=",
"\"homologene.data\"",
",",
"proteinsfile",
"=",
"\"build_inputs/all_proteins.data\"",
",",
"proteinsclusterfile",
"=",
"\"build_inputs/proteins_for_clustering.data\"",
",",
"baseURL",
"=",
"\"http://ftp.ncbi.nih.gov/pub/HomoloGene/current/\"",
")",
":",
"def",
"getDf",
"(",
"inputfile",
")",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"inputfile",
")",
":",
"df",
"=",
"pd",
".",
"read_table",
"(",
"inputfile",
",",
"header",
"=",
"None",
")",
"else",
":",
"df",
"=",
"urllib2",
".",
"urlopen",
"(",
"baseURL",
"+",
"inputfile",
")",
"df",
"=",
"df",
".",
"read",
"(",
")",
".",
"split",
"(",
"\"\\n\"",
")",
"df",
"=",
"[",
"s",
"for",
"s",
"in",
"df",
"if",
"len",
"(",
"s",
")",
">",
"0",
"]",
"df",
"=",
"[",
"s",
".",
"split",
"(",
"\"\\t\"",
")",
"for",
"s",
"in",
"df",
"]",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"df",
")",
"return",
"df",
"taxdf",
"=",
"getDf",
"(",
"taxfile",
")",
"taxdf",
".",
"set_index",
"(",
"[",
"0",
"]",
",",
"inplace",
"=",
"True",
")",
"taxdi",
"=",
"taxdf",
".",
"to_dict",
"(",
")",
".",
"get",
"(",
"1",
")",
"genedf",
"=",
"getDf",
"(",
"genefile",
")",
"genecols",
"=",
"[",
"\"HID\"",
",",
"\"Taxonomy ID\"",
",",
"\"Gene ID\"",
",",
"\"Gene Symbol\"",
",",
"\"Protein gi\"",
",",
"\"Protein accession\"",
"]",
"genedf",
".",
"columns",
"=",
"genecols",
"genedf",
"[",
"\"organism\"",
"]",
"=",
"genedf",
"[",
"\"Taxonomy ID\"",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"taxdi",
".",
"get",
"(",
"x",
")",
")",
"proteinsdf",
"=",
"getDf",
"(",
"proteinsfile",
")",
"proteinscols",
"=",
"[",
"\"taxid\"",
",",
"\"entrez GeneID\"",
",",
"\"gene symbol\"",
",",
"\"gene description\"",
",",
"\"protein accession.ver\"",
",",
"\"mrna accession.ver\"",
",",
"\"length of protein listed in column 5\"",
",",
"\"-11) contains data about gene location on the genome\"",
",",
"\"starting position of gene in 0-based coordinate\"",
",",
"\"end position of the gene in 0-based coordinate\"",
",",
"\"strand\"",
",",
"\"nucleotide gi of genomic sequence where this gene is annotated\"",
"]",
"proteinsdf",
".",
"columns",
"=",
"proteinscols",
"proteinsdf",
"[",
"\"organism\"",
"]",
"=",
"proteinsdf",
"[",
"\"taxid\"",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"taxdi",
".",
"get",
"(",
"x",
")",
")",
"protclusdf",
"=",
"getDf",
"(",
"proteinsclusterfile",
")",
"protclustercols",
"=",
"[",
"\"taxid\"",
",",
"\"entrez GeneID\"",
",",
"\"gene symbol\"",
",",
"\"gene description\"",
",",
"\"protein accession.ver\"",
",",
"\"mrna accession.ver\"",
",",
"\"length of protein listed in column 5\"",
",",
"\"-11) contains data about gene location on the genome\"",
",",
"\"starting position of gene in 0-based coordinate\"",
",",
"\"end position of the gene in 0-based coordinate\"",
",",
"\"strand\"",
",",
"\"nucleotide gi of genomic sequence where this gene is annotated\"",
"]",
"protclusdf",
".",
"columns",
"=",
"proteinscols",
"protclusdf",
"[",
"\"organism\"",
"]",
"=",
"protclusdf",
"[",
"\"taxid\"",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"taxdi",
".",
"get",
"(",
"x",
")",
")",
"return",
"genedf",
",",
"protclusdf",
",",
"proteinsdf"
] | Returns NBCI's Homolog Gene tables.
:param taxfile: path to local file or to baseURL/taxfile
:param genefile: path to local file or to baseURL/genefile
:param proteinsfile: path to local file or to baseURL/proteinsfile
:param proteinsclusterfile: path to local file or to baseURL/proteinsclusterfile
:param baseURL: baseURL for downloading files
:returns genedf: Homolog gene Pandas dataframe
:returns protclusdf: Pandas dataframe. Lists one protein per gene that were used for homologene clustering.
If a gene has multiple protein accessions derived from alternative splicing,
only one protein isoform that give most protein alignment to proteins in other species
was selected for clustering and it is listed in this file.
:returns proteinsdf: Pandas dataframe. Lists all proteins and their gene information.
If a gene has multple protein accessions derived from alternative splicing event,
each protein accession is list in a separate line. | [
"Returns",
"NBCI",
"s",
"Homolog",
"Gene",
"tables",
"."
] | 887808a7a2c1504f39ce8d8cb36c15c1721cd29f | https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/homology.py#L6-L66 | train |
mpg-age-bioinformatics/AGEpy | AGEpy/fasta.py | getFasta | def getFasta(opened_file, sequence_name):
"""
Retrieves a sequence from an opened multifasta file
:param opened_file: an opened multifasta file eg. opened_file=open("/path/to/file.fa",'r+')
:param sequence_name: the name of the sequence to be retrieved eg. for '>2 dna:chromosome chromosome:GRCm38:2:1:182113224:1 REF' use: sequence_name=str(2)
returns: a string with the sequence of interest
"""
lines = opened_file.readlines()
seq=str("")
for i in range(0, len(lines)):
line = lines[i]
if line[0] == ">":
fChr=line.split(" ")[0].split("\n")[0]
fChr=fChr[1:]
if fChr == sequence_name:
s=i
code=['N','A','C','T','G']
firstbase=lines[s+1][0]
while firstbase in code:
s=s + 1
seq=seq+lines[s]
firstbase=lines[s+1][0]
if len(seq)==0:
seq=None
else:
seq=seq.split("\n")
seq="".join(seq)
return seq | python | def getFasta(opened_file, sequence_name):
"""
Retrieves a sequence from an opened multifasta file
:param opened_file: an opened multifasta file eg. opened_file=open("/path/to/file.fa",'r+')
:param sequence_name: the name of the sequence to be retrieved eg. for '>2 dna:chromosome chromosome:GRCm38:2:1:182113224:1 REF' use: sequence_name=str(2)
returns: a string with the sequence of interest
"""
lines = opened_file.readlines()
seq=str("")
for i in range(0, len(lines)):
line = lines[i]
if line[0] == ">":
fChr=line.split(" ")[0].split("\n")[0]
fChr=fChr[1:]
if fChr == sequence_name:
s=i
code=['N','A','C','T','G']
firstbase=lines[s+1][0]
while firstbase in code:
s=s + 1
seq=seq+lines[s]
firstbase=lines[s+1][0]
if len(seq)==0:
seq=None
else:
seq=seq.split("\n")
seq="".join(seq)
return seq | [
"def",
"getFasta",
"(",
"opened_file",
",",
"sequence_name",
")",
":",
"lines",
"=",
"opened_file",
".",
"readlines",
"(",
")",
"seq",
"=",
"str",
"(",
"\"\"",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"lines",
")",
")",
":",
"line",
"=",
"lines",
"[",
"i",
"]",
"if",
"line",
"[",
"0",
"]",
"==",
"\">\"",
":",
"fChr",
"=",
"line",
".",
"split",
"(",
"\" \"",
")",
"[",
"0",
"]",
".",
"split",
"(",
"\"\\n\"",
")",
"[",
"0",
"]",
"fChr",
"=",
"fChr",
"[",
"1",
":",
"]",
"if",
"fChr",
"==",
"sequence_name",
":",
"s",
"=",
"i",
"code",
"=",
"[",
"'N'",
",",
"'A'",
",",
"'C'",
",",
"'T'",
",",
"'G'",
"]",
"firstbase",
"=",
"lines",
"[",
"s",
"+",
"1",
"]",
"[",
"0",
"]",
"while",
"firstbase",
"in",
"code",
":",
"s",
"=",
"s",
"+",
"1",
"seq",
"=",
"seq",
"+",
"lines",
"[",
"s",
"]",
"firstbase",
"=",
"lines",
"[",
"s",
"+",
"1",
"]",
"[",
"0",
"]",
"if",
"len",
"(",
"seq",
")",
"==",
"0",
":",
"seq",
"=",
"None",
"else",
":",
"seq",
"=",
"seq",
".",
"split",
"(",
"\"\\n\"",
")",
"seq",
"=",
"\"\"",
".",
"join",
"(",
"seq",
")",
"return",
"seq"
] | Retrieves a sequence from an opened multifasta file
:param opened_file: an opened multifasta file eg. opened_file=open("/path/to/file.fa",'r+')
:param sequence_name: the name of the sequence to be retrieved eg. for '>2 dna:chromosome chromosome:GRCm38:2:1:182113224:1 REF' use: sequence_name=str(2)
returns: a string with the sequence of interest | [
"Retrieves",
"a",
"sequence",
"from",
"an",
"opened",
"multifasta",
"file"
] | 887808a7a2c1504f39ce8d8cb36c15c1721cd29f | https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/fasta.py#L2-L34 | train |
mpg-age-bioinformatics/AGEpy | AGEpy/fasta.py | writeFasta | def writeFasta(sequence, sequence_name, output_file):
"""
Writes a fasta sequence into a file.
:param sequence: a string with the sequence to be written
:param sequence_name: name of the the fasta sequence
:param output_file: /path/to/file.fa to be written
:returns: nothing
"""
i=0
f=open(output_file,'w')
f.write(">"+str(sequence_name)+"\n")
while i <= len(sequence):
f.write(sequence[i:i+60]+"\n")
i=i+60
f.close() | python | def writeFasta(sequence, sequence_name, output_file):
"""
Writes a fasta sequence into a file.
:param sequence: a string with the sequence to be written
:param sequence_name: name of the the fasta sequence
:param output_file: /path/to/file.fa to be written
:returns: nothing
"""
i=0
f=open(output_file,'w')
f.write(">"+str(sequence_name)+"\n")
while i <= len(sequence):
f.write(sequence[i:i+60]+"\n")
i=i+60
f.close() | [
"def",
"writeFasta",
"(",
"sequence",
",",
"sequence_name",
",",
"output_file",
")",
":",
"i",
"=",
"0",
"f",
"=",
"open",
"(",
"output_file",
",",
"'w'",
")",
"f",
".",
"write",
"(",
"\">\"",
"+",
"str",
"(",
"sequence_name",
")",
"+",
"\"\\n\"",
")",
"while",
"i",
"<=",
"len",
"(",
"sequence",
")",
":",
"f",
".",
"write",
"(",
"sequence",
"[",
"i",
":",
"i",
"+",
"60",
"]",
"+",
"\"\\n\"",
")",
"i",
"=",
"i",
"+",
"60",
"f",
".",
"close",
"(",
")"
] | Writes a fasta sequence into a file.
:param sequence: a string with the sequence to be written
:param sequence_name: name of the the fasta sequence
:param output_file: /path/to/file.fa to be written
:returns: nothing | [
"Writes",
"a",
"fasta",
"sequence",
"into",
"a",
"file",
"."
] | 887808a7a2c1504f39ce8d8cb36c15c1721cd29f | https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/fasta.py#L36-L52 | train |
mpg-age-bioinformatics/AGEpy | AGEpy/fasta.py | rewriteFasta | def rewriteFasta(sequence, sequence_name, fasta_in, fasta_out):
"""
Rewrites a specific sequence in a multifasta file while keeping the sequence header.
:param sequence: a string with the sequence to be written
:param sequence_name: the name of the sequence to be retrieved eg. for '>2 dna:chromosome chromosome:GRCm38:2:1:182113224:1 REF' use: sequence_name=str(2)
:param fasta_in: /path/to/original.fa
:param fasta_out: /path/to/destination.fa
:returns: nothing
"""
f=open(fasta_in, 'r+')
f2=open(fasta_out,'w')
lines = f.readlines()
i=0
while i < len(lines):
line = lines[i]
if line[0] == ">":
f2.write(line)
fChr=line.split(" ")[0]
fChr=fChr[1:]
if fChr == sequence_name:
code=['N','A','C','T','G']
firstbase=lines[i+1][0]
while firstbase in code:
i=i+1
firstbase=lines[i][0]
s=0
while s <= len(sequence):
f2.write(sequence[s:s+60]+"\n")
s=s+60
else:
i=i+1
else:
f2.write(line)
i=i+1
f2.close
f.close | python | def rewriteFasta(sequence, sequence_name, fasta_in, fasta_out):
"""
Rewrites a specific sequence in a multifasta file while keeping the sequence header.
:param sequence: a string with the sequence to be written
:param sequence_name: the name of the sequence to be retrieved eg. for '>2 dna:chromosome chromosome:GRCm38:2:1:182113224:1 REF' use: sequence_name=str(2)
:param fasta_in: /path/to/original.fa
:param fasta_out: /path/to/destination.fa
:returns: nothing
"""
f=open(fasta_in, 'r+')
f2=open(fasta_out,'w')
lines = f.readlines()
i=0
while i < len(lines):
line = lines[i]
if line[0] == ">":
f2.write(line)
fChr=line.split(" ")[0]
fChr=fChr[1:]
if fChr == sequence_name:
code=['N','A','C','T','G']
firstbase=lines[i+1][0]
while firstbase in code:
i=i+1
firstbase=lines[i][0]
s=0
while s <= len(sequence):
f2.write(sequence[s:s+60]+"\n")
s=s+60
else:
i=i+1
else:
f2.write(line)
i=i+1
f2.close
f.close | [
"def",
"rewriteFasta",
"(",
"sequence",
",",
"sequence_name",
",",
"fasta_in",
",",
"fasta_out",
")",
":",
"f",
"=",
"open",
"(",
"fasta_in",
",",
"'r+'",
")",
"f2",
"=",
"open",
"(",
"fasta_out",
",",
"'w'",
")",
"lines",
"=",
"f",
".",
"readlines",
"(",
")",
"i",
"=",
"0",
"while",
"i",
"<",
"len",
"(",
"lines",
")",
":",
"line",
"=",
"lines",
"[",
"i",
"]",
"if",
"line",
"[",
"0",
"]",
"==",
"\">\"",
":",
"f2",
".",
"write",
"(",
"line",
")",
"fChr",
"=",
"line",
".",
"split",
"(",
"\" \"",
")",
"[",
"0",
"]",
"fChr",
"=",
"fChr",
"[",
"1",
":",
"]",
"if",
"fChr",
"==",
"sequence_name",
":",
"code",
"=",
"[",
"'N'",
",",
"'A'",
",",
"'C'",
",",
"'T'",
",",
"'G'",
"]",
"firstbase",
"=",
"lines",
"[",
"i",
"+",
"1",
"]",
"[",
"0",
"]",
"while",
"firstbase",
"in",
"code",
":",
"i",
"=",
"i",
"+",
"1",
"firstbase",
"=",
"lines",
"[",
"i",
"]",
"[",
"0",
"]",
"s",
"=",
"0",
"while",
"s",
"<=",
"len",
"(",
"sequence",
")",
":",
"f2",
".",
"write",
"(",
"sequence",
"[",
"s",
":",
"s",
"+",
"60",
"]",
"+",
"\"\\n\"",
")",
"s",
"=",
"s",
"+",
"60",
"else",
":",
"i",
"=",
"i",
"+",
"1",
"else",
":",
"f2",
".",
"write",
"(",
"line",
")",
"i",
"=",
"i",
"+",
"1",
"f2",
".",
"close",
"f",
".",
"close"
] | Rewrites a specific sequence in a multifasta file while keeping the sequence header.
:param sequence: a string with the sequence to be written
:param sequence_name: the name of the sequence to be retrieved eg. for '>2 dna:chromosome chromosome:GRCm38:2:1:182113224:1 REF' use: sequence_name=str(2)
:param fasta_in: /path/to/original.fa
:param fasta_out: /path/to/destination.fa
:returns: nothing | [
"Rewrites",
"a",
"specific",
"sequence",
"in",
"a",
"multifasta",
"file",
"while",
"keeping",
"the",
"sequence",
"header",
"."
] | 887808a7a2c1504f39ce8d8cb36c15c1721cd29f | https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/fasta.py#L54-L92 | train |
acutesoftware/AIKIF | aikif/toolbox/Toolbox.py | Toolbox._get_tool_str | def _get_tool_str(self, tool):
"""
get a string representation of the tool
"""
res = tool['file']
try:
res += '.' + tool['function']
except Exception as ex:
print('Warning - no function defined for tool ' + str(tool))
res += '\n'
return res | python | def _get_tool_str(self, tool):
"""
get a string representation of the tool
"""
res = tool['file']
try:
res += '.' + tool['function']
except Exception as ex:
print('Warning - no function defined for tool ' + str(tool))
res += '\n'
return res | [
"def",
"_get_tool_str",
"(",
"self",
",",
"tool",
")",
":",
"res",
"=",
"tool",
"[",
"'file'",
"]",
"try",
":",
"res",
"+=",
"'.'",
"+",
"tool",
"[",
"'function'",
"]",
"except",
"Exception",
"as",
"ex",
":",
"print",
"(",
"'Warning - no function defined for tool '",
"+",
"str",
"(",
"tool",
")",
")",
"res",
"+=",
"'\\n'",
"return",
"res"
] | get a string representation of the tool | [
"get",
"a",
"string",
"representation",
"of",
"the",
"tool"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/Toolbox.py#L42-L52 | train |
acutesoftware/AIKIF | aikif/toolbox/Toolbox.py | Toolbox.get_tool_by_name | def get_tool_by_name(self, nme):
"""
get the tool object by name or file
"""
for t in self.lstTools:
if 'name' in t:
if t['name'] == nme:
return t
if 'file' in t:
if t['file'] == nme:
return t
return None | python | def get_tool_by_name(self, nme):
"""
get the tool object by name or file
"""
for t in self.lstTools:
if 'name' in t:
if t['name'] == nme:
return t
if 'file' in t:
if t['file'] == nme:
return t
return None | [
"def",
"get_tool_by_name",
"(",
"self",
",",
"nme",
")",
":",
"for",
"t",
"in",
"self",
".",
"lstTools",
":",
"if",
"'name'",
"in",
"t",
":",
"if",
"t",
"[",
"'name'",
"]",
"==",
"nme",
":",
"return",
"t",
"if",
"'file'",
"in",
"t",
":",
"if",
"t",
"[",
"'file'",
"]",
"==",
"nme",
":",
"return",
"t",
"return",
"None"
] | get the tool object by name or file | [
"get",
"the",
"tool",
"object",
"by",
"name",
"or",
"file"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/Toolbox.py#L54-L65 | train |
acutesoftware/AIKIF | aikif/toolbox/Toolbox.py | Toolbox.save | def save(self, fname=''):
"""
Save the list of tools to AIKIF core and optionally to local file fname
"""
if fname != '':
with open(fname, 'w') as f:
for t in self.lstTools:
self.verify(t)
f.write(self.tool_as_string(t)) | python | def save(self, fname=''):
"""
Save the list of tools to AIKIF core and optionally to local file fname
"""
if fname != '':
with open(fname, 'w') as f:
for t in self.lstTools:
self.verify(t)
f.write(self.tool_as_string(t)) | [
"def",
"save",
"(",
"self",
",",
"fname",
"=",
"''",
")",
":",
"if",
"fname",
"!=",
"''",
":",
"with",
"open",
"(",
"fname",
",",
"'w'",
")",
"as",
"f",
":",
"for",
"t",
"in",
"self",
".",
"lstTools",
":",
"self",
".",
"verify",
"(",
"t",
")",
"f",
".",
"write",
"(",
"self",
".",
"tool_as_string",
"(",
"t",
")",
")"
] | Save the list of tools to AIKIF core and optionally to local file fname | [
"Save",
"the",
"list",
"of",
"tools",
"to",
"AIKIF",
"core",
"and",
"optionally",
"to",
"local",
"file",
"fname"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/Toolbox.py#L88-L96 | train |
acutesoftware/AIKIF | aikif/toolbox/Toolbox.py | Toolbox.verify | def verify(self, tool):
"""
check that the tool exists
"""
if os.path.isfile(tool['file']):
print('Toolbox: program exists = TOK :: ' + tool['file'])
return True
else:
print('Toolbox: program exists = FAIL :: ' + tool['file'])
return False | python | def verify(self, tool):
"""
check that the tool exists
"""
if os.path.isfile(tool['file']):
print('Toolbox: program exists = TOK :: ' + tool['file'])
return True
else:
print('Toolbox: program exists = FAIL :: ' + tool['file'])
return False | [
"def",
"verify",
"(",
"self",
",",
"tool",
")",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"tool",
"[",
"'file'",
"]",
")",
":",
"print",
"(",
"'Toolbox: program exists = TOK :: '",
"+",
"tool",
"[",
"'file'",
"]",
")",
"return",
"True",
"else",
":",
"print",
"(",
"'Toolbox: program exists = FAIL :: '",
"+",
"tool",
"[",
"'file'",
"]",
")",
"return",
"False"
] | check that the tool exists | [
"check",
"that",
"the",
"tool",
"exists"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/Toolbox.py#L98-L107 | train |
acutesoftware/AIKIF | aikif/toolbox/Toolbox.py | Toolbox.run | def run(self, tool, args, new_import_path=''):
"""
import the tool and call the function, passing the args.
"""
if new_import_path != '':
#print('APPENDING PATH = ', new_import_path)
sys.path.append(new_import_path)
#if silent == 'N':
print('main called ' + tool['file'] + '->' + tool['function'] + ' with ', args, ' = ', tool['return'])
mod = __import__( os.path.basename(tool['file']).split('.')[0]) # for absolute folder names
# mod = __import__( tool['file'][:-2]) # for aikif folders (doesnt work)
func = getattr(mod, tool['function'])
tool['return'] = func(args)
return tool['return'] | python | def run(self, tool, args, new_import_path=''):
"""
import the tool and call the function, passing the args.
"""
if new_import_path != '':
#print('APPENDING PATH = ', new_import_path)
sys.path.append(new_import_path)
#if silent == 'N':
print('main called ' + tool['file'] + '->' + tool['function'] + ' with ', args, ' = ', tool['return'])
mod = __import__( os.path.basename(tool['file']).split('.')[0]) # for absolute folder names
# mod = __import__( tool['file'][:-2]) # for aikif folders (doesnt work)
func = getattr(mod, tool['function'])
tool['return'] = func(args)
return tool['return'] | [
"def",
"run",
"(",
"self",
",",
"tool",
",",
"args",
",",
"new_import_path",
"=",
"''",
")",
":",
"if",
"new_import_path",
"!=",
"''",
":",
"sys",
".",
"path",
".",
"append",
"(",
"new_import_path",
")",
"print",
"(",
"'main called '",
"+",
"tool",
"[",
"'file'",
"]",
"+",
"'->'",
"+",
"tool",
"[",
"'function'",
"]",
"+",
"' with '",
",",
"args",
",",
"' = '",
",",
"tool",
"[",
"'return'",
"]",
")",
"mod",
"=",
"__import__",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"tool",
"[",
"'file'",
"]",
")",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
")",
"func",
"=",
"getattr",
"(",
"mod",
",",
"tool",
"[",
"'function'",
"]",
")",
"tool",
"[",
"'return'",
"]",
"=",
"func",
"(",
"args",
")",
"return",
"tool",
"[",
"'return'",
"]"
] | import the tool and call the function, passing the args. | [
"import",
"the",
"tool",
"and",
"call",
"the",
"function",
"passing",
"the",
"args",
"."
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/Toolbox.py#L109-L123 | train |
Nachtfeuer/pipeline | spline/application.py | main | def main(**kwargs):
"""The Pipeline tool."""
options = ApplicationOptions(**kwargs)
Event.configure(is_logging_enabled=options.event_logging)
application = Application(options)
application.run(options.definition) | python | def main(**kwargs):
"""The Pipeline tool."""
options = ApplicationOptions(**kwargs)
Event.configure(is_logging_enabled=options.event_logging)
application = Application(options)
application.run(options.definition) | [
"def",
"main",
"(",
"**",
"kwargs",
")",
":",
"options",
"=",
"ApplicationOptions",
"(",
"**",
"kwargs",
")",
"Event",
".",
"configure",
"(",
"is_logging_enabled",
"=",
"options",
".",
"event_logging",
")",
"application",
"=",
"Application",
"(",
"options",
")",
"application",
".",
"run",
"(",
"options",
".",
"definition",
")"
] | The Pipeline tool. | [
"The",
"Pipeline",
"tool",
"."
] | 04ca18c4e95e4349532bb45b768206393e1f2c13 | https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/application.py#L209-L214 | train |
Nachtfeuer/pipeline | spline/application.py | Application.setup_logging | def setup_logging(self):
"""Setup of application logging."""
is_custom_logging = len(self.options.logging_config) > 0
is_custom_logging = is_custom_logging and os.path.isfile(self.options.logging_config)
is_custom_logging = is_custom_logging and not self.options.dry_run
if is_custom_logging:
Logger.configure_by_file(self.options.logging_config)
else:
logging_format = "%(asctime)-15s - %(name)s - %(message)s"
if self.options.dry_run:
logging_format = "%(name)s - %(message)s"
Logger.configure_default(logging_format, self.logging_level) | python | def setup_logging(self):
"""Setup of application logging."""
is_custom_logging = len(self.options.logging_config) > 0
is_custom_logging = is_custom_logging and os.path.isfile(self.options.logging_config)
is_custom_logging = is_custom_logging and not self.options.dry_run
if is_custom_logging:
Logger.configure_by_file(self.options.logging_config)
else:
logging_format = "%(asctime)-15s - %(name)s - %(message)s"
if self.options.dry_run:
logging_format = "%(name)s - %(message)s"
Logger.configure_default(logging_format, self.logging_level) | [
"def",
"setup_logging",
"(",
"self",
")",
":",
"is_custom_logging",
"=",
"len",
"(",
"self",
".",
"options",
".",
"logging_config",
")",
">",
"0",
"is_custom_logging",
"=",
"is_custom_logging",
"and",
"os",
".",
"path",
".",
"isfile",
"(",
"self",
".",
"options",
".",
"logging_config",
")",
"is_custom_logging",
"=",
"is_custom_logging",
"and",
"not",
"self",
".",
"options",
".",
"dry_run",
"if",
"is_custom_logging",
":",
"Logger",
".",
"configure_by_file",
"(",
"self",
".",
"options",
".",
"logging_config",
")",
"else",
":",
"logging_format",
"=",
"\"%(asctime)-15s - %(name)s - %(message)s\"",
"if",
"self",
".",
"options",
".",
"dry_run",
":",
"logging_format",
"=",
"\"%(name)s - %(message)s\"",
"Logger",
".",
"configure_default",
"(",
"logging_format",
",",
"self",
".",
"logging_level",
")"
] | Setup of application logging. | [
"Setup",
"of",
"application",
"logging",
"."
] | 04ca18c4e95e4349532bb45b768206393e1f2c13 | https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/application.py#L56-L68 | train |
Nachtfeuer/pipeline | spline/application.py | Application.validate_document | def validate_document(self, definition):
"""
Validate given pipeline document.
The method is trying to load, parse and validate the spline document.
The validator verifies the Python structure B{not} the file format.
Args:
definition (str): path and filename of a yaml file containing a valid spline definition.
Returns:
dict: loaded and validated spline document.
Note:
if validation fails the application does exit!
See Also:
spline.validation.Validator
"""
initial_document = {}
try:
initial_document = Loader.load(definition)
except RuntimeError as exception:
self.logger.error(str(exception))
sys.exit(1)
document = Validator().validate(initial_document)
if document is None:
self.logger.info("Schema validation for '%s' has failed", definition)
sys.exit(1)
self.logger.info("Schema validation for '%s' succeeded", definition)
return document | python | def validate_document(self, definition):
"""
Validate given pipeline document.
The method is trying to load, parse and validate the spline document.
The validator verifies the Python structure B{not} the file format.
Args:
definition (str): path and filename of a yaml file containing a valid spline definition.
Returns:
dict: loaded and validated spline document.
Note:
if validation fails the application does exit!
See Also:
spline.validation.Validator
"""
initial_document = {}
try:
initial_document = Loader.load(definition)
except RuntimeError as exception:
self.logger.error(str(exception))
sys.exit(1)
document = Validator().validate(initial_document)
if document is None:
self.logger.info("Schema validation for '%s' has failed", definition)
sys.exit(1)
self.logger.info("Schema validation for '%s' succeeded", definition)
return document | [
"def",
"validate_document",
"(",
"self",
",",
"definition",
")",
":",
"initial_document",
"=",
"{",
"}",
"try",
":",
"initial_document",
"=",
"Loader",
".",
"load",
"(",
"definition",
")",
"except",
"RuntimeError",
"as",
"exception",
":",
"self",
".",
"logger",
".",
"error",
"(",
"str",
"(",
"exception",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"document",
"=",
"Validator",
"(",
")",
".",
"validate",
"(",
"initial_document",
")",
"if",
"document",
"is",
"None",
":",
"self",
".",
"logger",
".",
"info",
"(",
"\"Schema validation for '%s' has failed\"",
",",
"definition",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"self",
".",
"logger",
".",
"info",
"(",
"\"Schema validation for '%s' succeeded\"",
",",
"definition",
")",
"return",
"document"
] | Validate given pipeline document.
The method is trying to load, parse and validate the spline document.
The validator verifies the Python structure B{not} the file format.
Args:
definition (str): path and filename of a yaml file containing a valid spline definition.
Returns:
dict: loaded and validated spline document.
Note:
if validation fails the application does exit!
See Also:
spline.validation.Validator | [
"Validate",
"given",
"pipeline",
"document",
"."
] | 04ca18c4e95e4349532bb45b768206393e1f2c13 | https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/application.py#L70-L101 | train |
Nachtfeuer/pipeline | spline/application.py | Application.run_matrix | def run_matrix(self, matrix_definition, document):
"""
Running pipeline via a matrix.
Args:
matrix_definition (dict): one concrete matrix item.
document (dict): spline document (complete) as loaded from yaml file.
"""
matrix = Matrix(matrix_definition, 'matrix(parallel)' in document)
process_data = MatrixProcessData()
process_data.options = self.options
process_data.pipeline = document['pipeline']
process_data.model = {} if 'model' not in document else document['model']
process_data.hooks = Hooks(document)
return matrix.process(process_data) | python | def run_matrix(self, matrix_definition, document):
"""
Running pipeline via a matrix.
Args:
matrix_definition (dict): one concrete matrix item.
document (dict): spline document (complete) as loaded from yaml file.
"""
matrix = Matrix(matrix_definition, 'matrix(parallel)' in document)
process_data = MatrixProcessData()
process_data.options = self.options
process_data.pipeline = document['pipeline']
process_data.model = {} if 'model' not in document else document['model']
process_data.hooks = Hooks(document)
return matrix.process(process_data) | [
"def",
"run_matrix",
"(",
"self",
",",
"matrix_definition",
",",
"document",
")",
":",
"matrix",
"=",
"Matrix",
"(",
"matrix_definition",
",",
"'matrix(parallel)'",
"in",
"document",
")",
"process_data",
"=",
"MatrixProcessData",
"(",
")",
"process_data",
".",
"options",
"=",
"self",
".",
"options",
"process_data",
".",
"pipeline",
"=",
"document",
"[",
"'pipeline'",
"]",
"process_data",
".",
"model",
"=",
"{",
"}",
"if",
"'model'",
"not",
"in",
"document",
"else",
"document",
"[",
"'model'",
"]",
"process_data",
".",
"hooks",
"=",
"Hooks",
"(",
"document",
")",
"return",
"matrix",
".",
"process",
"(",
"process_data",
")"
] | Running pipeline via a matrix.
Args:
matrix_definition (dict): one concrete matrix item.
document (dict): spline document (complete) as loaded from yaml file. | [
"Running",
"pipeline",
"via",
"a",
"matrix",
"."
] | 04ca18c4e95e4349532bb45b768206393e1f2c13 | https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/application.py#L103-L119 | train |
Nachtfeuer/pipeline | spline/application.py | Application.shutdown | def shutdown(self, collector, success):
"""Shutdown of the application."""
self.event.delegate(success)
if collector is not None:
collector.queue.put(None)
collector.join()
if not success:
sys.exit(1) | python | def shutdown(self, collector, success):
"""Shutdown of the application."""
self.event.delegate(success)
if collector is not None:
collector.queue.put(None)
collector.join()
if not success:
sys.exit(1) | [
"def",
"shutdown",
"(",
"self",
",",
"collector",
",",
"success",
")",
":",
"self",
".",
"event",
".",
"delegate",
"(",
"success",
")",
"if",
"collector",
"is",
"not",
"None",
":",
"collector",
".",
"queue",
".",
"put",
"(",
"None",
")",
"collector",
".",
"join",
"(",
")",
"if",
"not",
"success",
":",
"sys",
".",
"exit",
"(",
"1",
")"
] | Shutdown of the application. | [
"Shutdown",
"of",
"the",
"application",
"."
] | 04ca18c4e95e4349532bb45b768206393e1f2c13 | https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/application.py#L121-L128 | train |
Nachtfeuer/pipeline | spline/application.py | Application.provide_temporary_scripts_path | def provide_temporary_scripts_path(self):
"""When configured trying to ensure that path does exist."""
if len(self.options.temporary_scripts_path) > 0:
if os.path.isfile(self.options.temporary_scripts_path):
self.logger.error("Error: configured script path seems to be a file!")
# it's ok to leave because called before the collector runs
sys.exit(1)
if not os.path.isdir(self.options.temporary_scripts_path):
os.makedirs(self.options.temporary_scripts_path) | python | def provide_temporary_scripts_path(self):
"""When configured trying to ensure that path does exist."""
if len(self.options.temporary_scripts_path) > 0:
if os.path.isfile(self.options.temporary_scripts_path):
self.logger.error("Error: configured script path seems to be a file!")
# it's ok to leave because called before the collector runs
sys.exit(1)
if not os.path.isdir(self.options.temporary_scripts_path):
os.makedirs(self.options.temporary_scripts_path) | [
"def",
"provide_temporary_scripts_path",
"(",
"self",
")",
":",
"if",
"len",
"(",
"self",
".",
"options",
".",
"temporary_scripts_path",
")",
">",
"0",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"self",
".",
"options",
".",
"temporary_scripts_path",
")",
":",
"self",
".",
"logger",
".",
"error",
"(",
"\"Error: configured script path seems to be a file!\"",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"self",
".",
"options",
".",
"temporary_scripts_path",
")",
":",
"os",
".",
"makedirs",
"(",
"self",
".",
"options",
".",
"temporary_scripts_path",
")"
] | When configured trying to ensure that path does exist. | [
"When",
"configured",
"trying",
"to",
"ensure",
"that",
"path",
"does",
"exist",
"."
] | 04ca18c4e95e4349532bb45b768206393e1f2c13 | https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/application.py#L162-L171 | train |
Nachtfeuer/pipeline | spline/application.py | Application.create_and_run_collector | def create_and_run_collector(document, options):
"""Create and run collector process for report data."""
collector = None
if not options.report == 'off':
collector = Collector()
collector.store.configure(document)
Event.configure(collector_queue=collector.queue)
collector.start()
return collector | python | def create_and_run_collector(document, options):
"""Create and run collector process for report data."""
collector = None
if not options.report == 'off':
collector = Collector()
collector.store.configure(document)
Event.configure(collector_queue=collector.queue)
collector.start()
return collector | [
"def",
"create_and_run_collector",
"(",
"document",
",",
"options",
")",
":",
"collector",
"=",
"None",
"if",
"not",
"options",
".",
"report",
"==",
"'off'",
":",
"collector",
"=",
"Collector",
"(",
")",
"collector",
".",
"store",
".",
"configure",
"(",
"document",
")",
"Event",
".",
"configure",
"(",
"collector_queue",
"=",
"collector",
".",
"queue",
")",
"collector",
".",
"start",
"(",
")",
"return",
"collector"
] | Create and run collector process for report data. | [
"Create",
"and",
"run",
"collector",
"process",
"for",
"report",
"data",
"."
] | 04ca18c4e95e4349532bb45b768206393e1f2c13 | https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/application.py#L174-L182 | train |
Nachtfeuer/pipeline | spline/tools/filters.py | docker_environment | def docker_environment(env):
"""
Transform dictionary of environment variables into Docker -e parameters.
>>> result = docker_environment({'param1': 'val1', 'param2': 'val2'})
>>> result in ['-e "param1=val1" -e "param2=val2"', '-e "param2=val2" -e "param1=val1"']
True
"""
return ' '.join(
["-e \"%s=%s\"" % (key, value.replace("$", "\\$").replace("\"", "\\\"").replace("`", "\\`"))
for key, value in env.items()]) | python | def docker_environment(env):
"""
Transform dictionary of environment variables into Docker -e parameters.
>>> result = docker_environment({'param1': 'val1', 'param2': 'val2'})
>>> result in ['-e "param1=val1" -e "param2=val2"', '-e "param2=val2" -e "param1=val1"']
True
"""
return ' '.join(
["-e \"%s=%s\"" % (key, value.replace("$", "\\$").replace("\"", "\\\"").replace("`", "\\`"))
for key, value in env.items()]) | [
"def",
"docker_environment",
"(",
"env",
")",
":",
"return",
"' '",
".",
"join",
"(",
"[",
"\"-e \\\"%s=%s\\\"\"",
"%",
"(",
"key",
",",
"value",
".",
"replace",
"(",
"\"$\"",
",",
"\"\\\\$\"",
")",
".",
"replace",
"(",
"\"\\\"\"",
",",
"\"\\\\\\\"\"",
")",
".",
"replace",
"(",
"\"`\"",
",",
"\"\\\\`\"",
")",
")",
"for",
"key",
",",
"value",
"in",
"env",
".",
"items",
"(",
")",
"]",
")"
] | Transform dictionary of environment variables into Docker -e parameters.
>>> result = docker_environment({'param1': 'val1', 'param2': 'val2'})
>>> result in ['-e "param1=val1" -e "param2=val2"', '-e "param2=val2" -e "param1=val1"']
True | [
"Transform",
"dictionary",
"of",
"environment",
"variables",
"into",
"Docker",
"-",
"e",
"parameters",
"."
] | 04ca18c4e95e4349532bb45b768206393e1f2c13 | https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/filters.py#L60-L70 | train |
OpenHydrology/floodestimation | floodestimation/fehdata.py | _retrieve_download_url | def _retrieve_download_url():
"""
Retrieves download location for FEH data zip file from hosted json configuration file.
:return: URL for FEH data file
:rtype: str
"""
try:
# Try to obtain the url from the Open Hydrology json config file.
with urlopen(config['nrfa']['oh_json_url'], timeout=10) as f:
remote_config = json.loads(f.read().decode('utf-8'))
# This is just for testing, assuming a relative local file path starting with ./
if remote_config['nrfa_url'].startswith('.'):
remote_config['nrfa_url'] = 'file:' + pathname2url(os.path.abspath(remote_config['nrfa_url']))
# Save retrieved config data
_update_nrfa_metadata(remote_config)
return remote_config['nrfa_url']
except URLError:
# If that fails (for whatever reason) use the fallback constant.
return config['nrfa']['url'] | python | def _retrieve_download_url():
"""
Retrieves download location for FEH data zip file from hosted json configuration file.
:return: URL for FEH data file
:rtype: str
"""
try:
# Try to obtain the url from the Open Hydrology json config file.
with urlopen(config['nrfa']['oh_json_url'], timeout=10) as f:
remote_config = json.loads(f.read().decode('utf-8'))
# This is just for testing, assuming a relative local file path starting with ./
if remote_config['nrfa_url'].startswith('.'):
remote_config['nrfa_url'] = 'file:' + pathname2url(os.path.abspath(remote_config['nrfa_url']))
# Save retrieved config data
_update_nrfa_metadata(remote_config)
return remote_config['nrfa_url']
except URLError:
# If that fails (for whatever reason) use the fallback constant.
return config['nrfa']['url'] | [
"def",
"_retrieve_download_url",
"(",
")",
":",
"try",
":",
"with",
"urlopen",
"(",
"config",
"[",
"'nrfa'",
"]",
"[",
"'oh_json_url'",
"]",
",",
"timeout",
"=",
"10",
")",
"as",
"f",
":",
"remote_config",
"=",
"json",
".",
"loads",
"(",
"f",
".",
"read",
"(",
")",
".",
"decode",
"(",
"'utf-8'",
")",
")",
"if",
"remote_config",
"[",
"'nrfa_url'",
"]",
".",
"startswith",
"(",
"'.'",
")",
":",
"remote_config",
"[",
"'nrfa_url'",
"]",
"=",
"'file:'",
"+",
"pathname2url",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"remote_config",
"[",
"'nrfa_url'",
"]",
")",
")",
"_update_nrfa_metadata",
"(",
"remote_config",
")",
"return",
"remote_config",
"[",
"'nrfa_url'",
"]",
"except",
"URLError",
":",
"return",
"config",
"[",
"'nrfa'",
"]",
"[",
"'url'",
"]"
] | Retrieves download location for FEH data zip file from hosted json configuration file.
:return: URL for FEH data file
:rtype: str | [
"Retrieves",
"download",
"location",
"for",
"FEH",
"data",
"zip",
"file",
"from",
"hosted",
"json",
"configuration",
"file",
"."
] | 782da7c5abd1348923129efe89fb70003ebb088c | https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/fehdata.py#L58-L79 | train |
OpenHydrology/floodestimation | floodestimation/fehdata.py | update_available | def update_available(after_days=1):
"""
Check whether updated NRFA data is available.
:param after_days: Only check if not checked previously since a certain number of days ago
:type after_days: float
:return: `True` if update available, `False` if not, `None` if remote location cannot be reached.
:rtype: bool or None
"""
never_downloaded = not bool(config.get('nrfa', 'downloaded_on', fallback=None) or None)
if never_downloaded:
config.set_datetime('nrfa', 'update_checked_on', datetime.utcnow())
config.save()
return True
last_checked_on = config.get_datetime('nrfa', 'update_checked_on', fallback=None) or datetime.fromtimestamp(0)
if datetime.utcnow() < last_checked_on + timedelta(days=after_days):
return False
current_version = LooseVersion(config.get('nrfa', 'version', fallback='0') or '0')
try:
with urlopen(config['nrfa']['oh_json_url'], timeout=10) as f:
remote_version = LooseVersion(json.loads(f.read().decode('utf-8'))['nrfa_version'])
config.set_datetime('nrfa', 'update_checked_on', datetime.utcnow())
config.save()
return remote_version > current_version
except URLError:
return None | python | def update_available(after_days=1):
"""
Check whether updated NRFA data is available.
:param after_days: Only check if not checked previously since a certain number of days ago
:type after_days: float
:return: `True` if update available, `False` if not, `None` if remote location cannot be reached.
:rtype: bool or None
"""
never_downloaded = not bool(config.get('nrfa', 'downloaded_on', fallback=None) or None)
if never_downloaded:
config.set_datetime('nrfa', 'update_checked_on', datetime.utcnow())
config.save()
return True
last_checked_on = config.get_datetime('nrfa', 'update_checked_on', fallback=None) or datetime.fromtimestamp(0)
if datetime.utcnow() < last_checked_on + timedelta(days=after_days):
return False
current_version = LooseVersion(config.get('nrfa', 'version', fallback='0') or '0')
try:
with urlopen(config['nrfa']['oh_json_url'], timeout=10) as f:
remote_version = LooseVersion(json.loads(f.read().decode('utf-8'))['nrfa_version'])
config.set_datetime('nrfa', 'update_checked_on', datetime.utcnow())
config.save()
return remote_version > current_version
except URLError:
return None | [
"def",
"update_available",
"(",
"after_days",
"=",
"1",
")",
":",
"never_downloaded",
"=",
"not",
"bool",
"(",
"config",
".",
"get",
"(",
"'nrfa'",
",",
"'downloaded_on'",
",",
"fallback",
"=",
"None",
")",
"or",
"None",
")",
"if",
"never_downloaded",
":",
"config",
".",
"set_datetime",
"(",
"'nrfa'",
",",
"'update_checked_on'",
",",
"datetime",
".",
"utcnow",
"(",
")",
")",
"config",
".",
"save",
"(",
")",
"return",
"True",
"last_checked_on",
"=",
"config",
".",
"get_datetime",
"(",
"'nrfa'",
",",
"'update_checked_on'",
",",
"fallback",
"=",
"None",
")",
"or",
"datetime",
".",
"fromtimestamp",
"(",
"0",
")",
"if",
"datetime",
".",
"utcnow",
"(",
")",
"<",
"last_checked_on",
"+",
"timedelta",
"(",
"days",
"=",
"after_days",
")",
":",
"return",
"False",
"current_version",
"=",
"LooseVersion",
"(",
"config",
".",
"get",
"(",
"'nrfa'",
",",
"'version'",
",",
"fallback",
"=",
"'0'",
")",
"or",
"'0'",
")",
"try",
":",
"with",
"urlopen",
"(",
"config",
"[",
"'nrfa'",
"]",
"[",
"'oh_json_url'",
"]",
",",
"timeout",
"=",
"10",
")",
"as",
"f",
":",
"remote_version",
"=",
"LooseVersion",
"(",
"json",
".",
"loads",
"(",
"f",
".",
"read",
"(",
")",
".",
"decode",
"(",
"'utf-8'",
")",
")",
"[",
"'nrfa_version'",
"]",
")",
"config",
".",
"set_datetime",
"(",
"'nrfa'",
",",
"'update_checked_on'",
",",
"datetime",
".",
"utcnow",
"(",
")",
")",
"config",
".",
"save",
"(",
")",
"return",
"remote_version",
">",
"current_version",
"except",
"URLError",
":",
"return",
"None"
] | Check whether updated NRFA data is available.
:param after_days: Only check if not checked previously since a certain number of days ago
:type after_days: float
:return: `True` if update available, `False` if not, `None` if remote location cannot be reached.
:rtype: bool or None | [
"Check",
"whether",
"updated",
"NRFA",
"data",
"is",
"available",
"."
] | 782da7c5abd1348923129efe89fb70003ebb088c | https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/fehdata.py#L82-L109 | train |
OpenHydrology/floodestimation | floodestimation/fehdata.py | download_data | def download_data():
"""
Downloads complete station dataset including catchment descriptors and amax records. And saves it into a cache
folder.
"""
with urlopen(_retrieve_download_url()) as f:
with open(os.path.join(CACHE_FOLDER, CACHE_ZIP), "wb") as local_file:
local_file.write(f.read()) | python | def download_data():
"""
Downloads complete station dataset including catchment descriptors and amax records. And saves it into a cache
folder.
"""
with urlopen(_retrieve_download_url()) as f:
with open(os.path.join(CACHE_FOLDER, CACHE_ZIP), "wb") as local_file:
local_file.write(f.read()) | [
"def",
"download_data",
"(",
")",
":",
"with",
"urlopen",
"(",
"_retrieve_download_url",
"(",
")",
")",
"as",
"f",
":",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"CACHE_FOLDER",
",",
"CACHE_ZIP",
")",
",",
"\"wb\"",
")",
"as",
"local_file",
":",
"local_file",
".",
"write",
"(",
"f",
".",
"read",
"(",
")",
")"
] | Downloads complete station dataset including catchment descriptors and amax records. And saves it into a cache
folder. | [
"Downloads",
"complete",
"station",
"dataset",
"including",
"catchment",
"descriptors",
"and",
"amax",
"records",
".",
"And",
"saves",
"it",
"into",
"a",
"cache",
"folder",
"."
] | 782da7c5abd1348923129efe89fb70003ebb088c | https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/fehdata.py#L112-L119 | train |
OpenHydrology/floodestimation | floodestimation/fehdata.py | _update_nrfa_metadata | def _update_nrfa_metadata(remote_config):
"""
Save NRFA metadata to local config file using retrieved config data
:param remote_config: Downloaded JSON data, not a ConfigParser object!
"""
config['nrfa']['oh_json_url'] = remote_config['nrfa_oh_json_url']
config['nrfa']['version'] = remote_config['nrfa_version']
config['nrfa']['url'] = remote_config['nrfa_url']
config.set_datetime('nrfa', 'published_on', datetime.utcfromtimestamp(remote_config['nrfa_published_on']))
config.set_datetime('nrfa', 'downloaded_on', datetime.utcnow())
config.set_datetime('nrfa', 'update_checked_on', datetime.utcnow())
config.save() | python | def _update_nrfa_metadata(remote_config):
"""
Save NRFA metadata to local config file using retrieved config data
:param remote_config: Downloaded JSON data, not a ConfigParser object!
"""
config['nrfa']['oh_json_url'] = remote_config['nrfa_oh_json_url']
config['nrfa']['version'] = remote_config['nrfa_version']
config['nrfa']['url'] = remote_config['nrfa_url']
config.set_datetime('nrfa', 'published_on', datetime.utcfromtimestamp(remote_config['nrfa_published_on']))
config.set_datetime('nrfa', 'downloaded_on', datetime.utcnow())
config.set_datetime('nrfa', 'update_checked_on', datetime.utcnow())
config.save() | [
"def",
"_update_nrfa_metadata",
"(",
"remote_config",
")",
":",
"config",
"[",
"'nrfa'",
"]",
"[",
"'oh_json_url'",
"]",
"=",
"remote_config",
"[",
"'nrfa_oh_json_url'",
"]",
"config",
"[",
"'nrfa'",
"]",
"[",
"'version'",
"]",
"=",
"remote_config",
"[",
"'nrfa_version'",
"]",
"config",
"[",
"'nrfa'",
"]",
"[",
"'url'",
"]",
"=",
"remote_config",
"[",
"'nrfa_url'",
"]",
"config",
".",
"set_datetime",
"(",
"'nrfa'",
",",
"'published_on'",
",",
"datetime",
".",
"utcfromtimestamp",
"(",
"remote_config",
"[",
"'nrfa_published_on'",
"]",
")",
")",
"config",
".",
"set_datetime",
"(",
"'nrfa'",
",",
"'downloaded_on'",
",",
"datetime",
".",
"utcnow",
"(",
")",
")",
"config",
".",
"set_datetime",
"(",
"'nrfa'",
",",
"'update_checked_on'",
",",
"datetime",
".",
"utcnow",
"(",
")",
")",
"config",
".",
"save",
"(",
")"
] | Save NRFA metadata to local config file using retrieved config data
:param remote_config: Downloaded JSON data, not a ConfigParser object! | [
"Save",
"NRFA",
"metadata",
"to",
"local",
"config",
"file",
"using",
"retrieved",
"config",
"data"
] | 782da7c5abd1348923129efe89fb70003ebb088c | https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/fehdata.py#L122-L134 | train |
OpenHydrology/floodestimation | floodestimation/fehdata.py | nrfa_metadata | def nrfa_metadata():
"""
Return metadata on the NRFA data.
Returned metadata is a dict with the following elements:
- `url`: string with NRFA data download URL
- `version`: string with NRFA version number, e.g. '3.3.4'
- `published_on`: datetime of data release/publication (only month and year are accurate, rest should be ignored)
- `downloaded_on`: datetime of last download
:return: metadata
:rtype: dict
"""
result = {
'url': config.get('nrfa', 'url', fallback=None) or None, # Empty strings '' become None
'version': config.get('nrfa', 'version', fallback=None) or None,
'published_on': config.get_datetime('nrfa', 'published_on', fallback=None) or None,
'downloaded_on': config.get_datetime('nrfa', 'downloaded_on', fallback=None) or None
}
return result | python | def nrfa_metadata():
"""
Return metadata on the NRFA data.
Returned metadata is a dict with the following elements:
- `url`: string with NRFA data download URL
- `version`: string with NRFA version number, e.g. '3.3.4'
- `published_on`: datetime of data release/publication (only month and year are accurate, rest should be ignored)
- `downloaded_on`: datetime of last download
:return: metadata
:rtype: dict
"""
result = {
'url': config.get('nrfa', 'url', fallback=None) or None, # Empty strings '' become None
'version': config.get('nrfa', 'version', fallback=None) or None,
'published_on': config.get_datetime('nrfa', 'published_on', fallback=None) or None,
'downloaded_on': config.get_datetime('nrfa', 'downloaded_on', fallback=None) or None
}
return result | [
"def",
"nrfa_metadata",
"(",
")",
":",
"result",
"=",
"{",
"'url'",
":",
"config",
".",
"get",
"(",
"'nrfa'",
",",
"'url'",
",",
"fallback",
"=",
"None",
")",
"or",
"None",
",",
"'version'",
":",
"config",
".",
"get",
"(",
"'nrfa'",
",",
"'version'",
",",
"fallback",
"=",
"None",
")",
"or",
"None",
",",
"'published_on'",
":",
"config",
".",
"get_datetime",
"(",
"'nrfa'",
",",
"'published_on'",
",",
"fallback",
"=",
"None",
")",
"or",
"None",
",",
"'downloaded_on'",
":",
"config",
".",
"get_datetime",
"(",
"'nrfa'",
",",
"'downloaded_on'",
",",
"fallback",
"=",
"None",
")",
"or",
"None",
"}",
"return",
"result"
] | Return metadata on the NRFA data.
Returned metadata is a dict with the following elements:
- `url`: string with NRFA data download URL
- `version`: string with NRFA version number, e.g. '3.3.4'
- `published_on`: datetime of data release/publication (only month and year are accurate, rest should be ignored)
- `downloaded_on`: datetime of last download
:return: metadata
:rtype: dict | [
"Return",
"metadata",
"on",
"the",
"NRFA",
"data",
"."
] | 782da7c5abd1348923129efe89fb70003ebb088c | https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/fehdata.py#L137-L157 | train |
OpenHydrology/floodestimation | floodestimation/fehdata.py | unzip_data | def unzip_data():
"""
Extract all files from downloaded FEH data zip file.
"""
with ZipFile(os.path.join(CACHE_FOLDER, CACHE_ZIP), 'r') as zf:
zf.extractall(path=CACHE_FOLDER) | python | def unzip_data():
"""
Extract all files from downloaded FEH data zip file.
"""
with ZipFile(os.path.join(CACHE_FOLDER, CACHE_ZIP), 'r') as zf:
zf.extractall(path=CACHE_FOLDER) | [
"def",
"unzip_data",
"(",
")",
":",
"with",
"ZipFile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"CACHE_FOLDER",
",",
"CACHE_ZIP",
")",
",",
"'r'",
")",
"as",
"zf",
":",
"zf",
".",
"extractall",
"(",
"path",
"=",
"CACHE_FOLDER",
")"
] | Extract all files from downloaded FEH data zip file. | [
"Extract",
"all",
"files",
"from",
"downloaded",
"FEH",
"data",
"zip",
"file",
"."
] | 782da7c5abd1348923129efe89fb70003ebb088c | https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/fehdata.py#L160-L165 | train |
acutesoftware/AIKIF | aikif/toolbox/xml_tools.py | get_xml_stats | def get_xml_stats(fname):
"""
return a dictionary of statistics about an
XML file including size in bytes, num lines,
number of elements, count by elements
"""
f = mod_file.TextFile(fname)
res = {}
res['shortname'] = f.name
res['folder'] = f.path
res['filesize'] = str(f.size) + ' bytes'
res['num_lines'] = str(f.lines) + ' lines'
res['date_modified'] = f.GetDateAsString(f.date_modified)
return res | python | def get_xml_stats(fname):
"""
return a dictionary of statistics about an
XML file including size in bytes, num lines,
number of elements, count by elements
"""
f = mod_file.TextFile(fname)
res = {}
res['shortname'] = f.name
res['folder'] = f.path
res['filesize'] = str(f.size) + ' bytes'
res['num_lines'] = str(f.lines) + ' lines'
res['date_modified'] = f.GetDateAsString(f.date_modified)
return res | [
"def",
"get_xml_stats",
"(",
"fname",
")",
":",
"f",
"=",
"mod_file",
".",
"TextFile",
"(",
"fname",
")",
"res",
"=",
"{",
"}",
"res",
"[",
"'shortname'",
"]",
"=",
"f",
".",
"name",
"res",
"[",
"'folder'",
"]",
"=",
"f",
".",
"path",
"res",
"[",
"'filesize'",
"]",
"=",
"str",
"(",
"f",
".",
"size",
")",
"+",
"' bytes'",
"res",
"[",
"'num_lines'",
"]",
"=",
"str",
"(",
"f",
".",
"lines",
")",
"+",
"' lines'",
"res",
"[",
"'date_modified'",
"]",
"=",
"f",
".",
"GetDateAsString",
"(",
"f",
".",
"date_modified",
")",
"return",
"res"
] | return a dictionary of statistics about an
XML file including size in bytes, num lines,
number of elements, count by elements | [
"return",
"a",
"dictionary",
"of",
"statistics",
"about",
"an",
"XML",
"file",
"including",
"size",
"in",
"bytes",
"num",
"lines",
"number",
"of",
"elements",
"count",
"by",
"elements"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/xml_tools.py#L18-L32 | train |
acutesoftware/AIKIF | aikif/toolbox/xml_tools.py | make_random_xml_file | def make_random_xml_file(fname, num_elements=200, depth=3):
"""
makes a random xml file mainly for testing the xml_split
"""
with open(fname, 'w') as f:
f.write('<?xml version="1.0" ?>\n<random>\n')
for dep_num, _ in enumerate(range(1,depth)):
f.write(' <depth>\n <content>\n')
#f.write('<depth' + str(dep_num) + '>\n')
for num, _ in enumerate(range(1, num_elements)):
f.write(' <stuff>data line ' + str(num) + '</stuff>\n')
#f.write('</depth' + str(dep_num) + '>\n')
f.write(' </content>\n </depth>\n')
f.write('</random>\n') | python | def make_random_xml_file(fname, num_elements=200, depth=3):
"""
makes a random xml file mainly for testing the xml_split
"""
with open(fname, 'w') as f:
f.write('<?xml version="1.0" ?>\n<random>\n')
for dep_num, _ in enumerate(range(1,depth)):
f.write(' <depth>\n <content>\n')
#f.write('<depth' + str(dep_num) + '>\n')
for num, _ in enumerate(range(1, num_elements)):
f.write(' <stuff>data line ' + str(num) + '</stuff>\n')
#f.write('</depth' + str(dep_num) + '>\n')
f.write(' </content>\n </depth>\n')
f.write('</random>\n') | [
"def",
"make_random_xml_file",
"(",
"fname",
",",
"num_elements",
"=",
"200",
",",
"depth",
"=",
"3",
")",
":",
"with",
"open",
"(",
"fname",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"'<?xml version=\"1.0\" ?>\\n<random>\\n'",
")",
"for",
"dep_num",
",",
"_",
"in",
"enumerate",
"(",
"range",
"(",
"1",
",",
"depth",
")",
")",
":",
"f",
".",
"write",
"(",
"' <depth>\\n <content>\\n'",
")",
"for",
"num",
",",
"_",
"in",
"enumerate",
"(",
"range",
"(",
"1",
",",
"num_elements",
")",
")",
":",
"f",
".",
"write",
"(",
"' <stuff>data line '",
"+",
"str",
"(",
"num",
")",
"+",
"'</stuff>\\n'",
")",
"f",
".",
"write",
"(",
"' </content>\\n </depth>\\n'",
")",
"f",
".",
"write",
"(",
"'</random>\\n'",
")"
] | makes a random xml file mainly for testing the xml_split | [
"makes",
"a",
"random",
"xml",
"file",
"mainly",
"for",
"testing",
"the",
"xml_split"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/xml_tools.py#L34-L48 | train |
mpg-age-bioinformatics/AGEpy | AGEpy/kegg.py | organismsKEGG | def organismsKEGG():
"""
Lists all organisms present in the KEGG database.
:returns: a dataframe containing one organism per row.
"""
organisms=urlopen("http://rest.kegg.jp/list/organism").read()
organisms=organisms.split("\n")
#for o in organisms:
# print o
# sys.stdout.flush()
organisms=[ s.split("\t") for s in organisms ]
organisms=pd.DataFrame(organisms)
return organisms | python | def organismsKEGG():
"""
Lists all organisms present in the KEGG database.
:returns: a dataframe containing one organism per row.
"""
organisms=urlopen("http://rest.kegg.jp/list/organism").read()
organisms=organisms.split("\n")
#for o in organisms:
# print o
# sys.stdout.flush()
organisms=[ s.split("\t") for s in organisms ]
organisms=pd.DataFrame(organisms)
return organisms | [
"def",
"organismsKEGG",
"(",
")",
":",
"organisms",
"=",
"urlopen",
"(",
"\"http://rest.kegg.jp/list/organism\"",
")",
".",
"read",
"(",
")",
"organisms",
"=",
"organisms",
".",
"split",
"(",
"\"\\n\"",
")",
"organisms",
"=",
"[",
"s",
".",
"split",
"(",
"\"\\t\"",
")",
"for",
"s",
"in",
"organisms",
"]",
"organisms",
"=",
"pd",
".",
"DataFrame",
"(",
"organisms",
")",
"return",
"organisms"
] | Lists all organisms present in the KEGG database.
:returns: a dataframe containing one organism per row. | [
"Lists",
"all",
"organisms",
"present",
"in",
"the",
"KEGG",
"database",
"."
] | 887808a7a2c1504f39ce8d8cb36c15c1721cd29f | https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/kegg.py#L19-L33 | train |
mpg-age-bioinformatics/AGEpy | AGEpy/kegg.py | databasesKEGG | def databasesKEGG(organism,ens_ids):
"""
Finds KEGG database identifiers for a respective organism given example ensembl ids.
:param organism: an organism as listed in organismsKEGG()
:param ens_ids: a list of ensenbl ids of the respective organism
:returns: nothing if no database was found, or a string if a database was found
"""
all_genes=urlopen("http://rest.kegg.jp/list/"+organism).read()
all_genes=all_genes.split("\n")
dbs=[]
while len(dbs) == 0:
for g in all_genes:
if len(dbs) == 0:
kid = g.split("\t")[0]
gene=urlopen("http://rest.kegg.jp/get/"+kid).read()
DBLINKS=gene.split("\n")
DBLINKS=[ s for s in DBLINKS if ":" in s ]
for d in DBLINKS:
test=d.split(" ")
test=test[len(test)-1]
if test in ens_ids:
DBLINK=[ s for s in DBLINKS if test in s ]
DBLINK=DBLINK[0].split(":")
DBLINK=DBLINK[len(DBLINK)-2]
dbs.append(DBLINK)
else:
break
ens_db=dbs[0].split(" ")
ens_db=ens_db[len(ens_db)-1]
test_db=urlopen("http://rest.genome.jp/link/"+ens_db+"/"+organism).read()
test_db=test_db.split("\n")
if len(test_db) == 1:
print("For "+organism+" the following db was found: "+ens_db)
print("This database does not seem to be valid KEGG-linked database identifier")
print("For \n'hsa' use 'ensembl-hsa'\n'mmu' use 'ensembl-mmu'\n'cel' use 'EnsemblGenomes-Gn'\n'dme' use 'FlyBase'")
sys.stdout.flush()
ens_db = None
else:
print("For "+organism+" the following db was found: "+ens_db)
sys.stdout.flush()
return ens_db | python | def databasesKEGG(organism,ens_ids):
"""
Finds KEGG database identifiers for a respective organism given example ensembl ids.
:param organism: an organism as listed in organismsKEGG()
:param ens_ids: a list of ensenbl ids of the respective organism
:returns: nothing if no database was found, or a string if a database was found
"""
all_genes=urlopen("http://rest.kegg.jp/list/"+organism).read()
all_genes=all_genes.split("\n")
dbs=[]
while len(dbs) == 0:
for g in all_genes:
if len(dbs) == 0:
kid = g.split("\t")[0]
gene=urlopen("http://rest.kegg.jp/get/"+kid).read()
DBLINKS=gene.split("\n")
DBLINKS=[ s for s in DBLINKS if ":" in s ]
for d in DBLINKS:
test=d.split(" ")
test=test[len(test)-1]
if test in ens_ids:
DBLINK=[ s for s in DBLINKS if test in s ]
DBLINK=DBLINK[0].split(":")
DBLINK=DBLINK[len(DBLINK)-2]
dbs.append(DBLINK)
else:
break
ens_db=dbs[0].split(" ")
ens_db=ens_db[len(ens_db)-1]
test_db=urlopen("http://rest.genome.jp/link/"+ens_db+"/"+organism).read()
test_db=test_db.split("\n")
if len(test_db) == 1:
print("For "+organism+" the following db was found: "+ens_db)
print("This database does not seem to be valid KEGG-linked database identifier")
print("For \n'hsa' use 'ensembl-hsa'\n'mmu' use 'ensembl-mmu'\n'cel' use 'EnsemblGenomes-Gn'\n'dme' use 'FlyBase'")
sys.stdout.flush()
ens_db = None
else:
print("For "+organism+" the following db was found: "+ens_db)
sys.stdout.flush()
return ens_db | [
"def",
"databasesKEGG",
"(",
"organism",
",",
"ens_ids",
")",
":",
"all_genes",
"=",
"urlopen",
"(",
"\"http://rest.kegg.jp/list/\"",
"+",
"organism",
")",
".",
"read",
"(",
")",
"all_genes",
"=",
"all_genes",
".",
"split",
"(",
"\"\\n\"",
")",
"dbs",
"=",
"[",
"]",
"while",
"len",
"(",
"dbs",
")",
"==",
"0",
":",
"for",
"g",
"in",
"all_genes",
":",
"if",
"len",
"(",
"dbs",
")",
"==",
"0",
":",
"kid",
"=",
"g",
".",
"split",
"(",
"\"\\t\"",
")",
"[",
"0",
"]",
"gene",
"=",
"urlopen",
"(",
"\"http://rest.kegg.jp/get/\"",
"+",
"kid",
")",
".",
"read",
"(",
")",
"DBLINKS",
"=",
"gene",
".",
"split",
"(",
"\"\\n\"",
")",
"DBLINKS",
"=",
"[",
"s",
"for",
"s",
"in",
"DBLINKS",
"if",
"\":\"",
"in",
"s",
"]",
"for",
"d",
"in",
"DBLINKS",
":",
"test",
"=",
"d",
".",
"split",
"(",
"\" \"",
")",
"test",
"=",
"test",
"[",
"len",
"(",
"test",
")",
"-",
"1",
"]",
"if",
"test",
"in",
"ens_ids",
":",
"DBLINK",
"=",
"[",
"s",
"for",
"s",
"in",
"DBLINKS",
"if",
"test",
"in",
"s",
"]",
"DBLINK",
"=",
"DBLINK",
"[",
"0",
"]",
".",
"split",
"(",
"\":\"",
")",
"DBLINK",
"=",
"DBLINK",
"[",
"len",
"(",
"DBLINK",
")",
"-",
"2",
"]",
"dbs",
".",
"append",
"(",
"DBLINK",
")",
"else",
":",
"break",
"ens_db",
"=",
"dbs",
"[",
"0",
"]",
".",
"split",
"(",
"\" \"",
")",
"ens_db",
"=",
"ens_db",
"[",
"len",
"(",
"ens_db",
")",
"-",
"1",
"]",
"test_db",
"=",
"urlopen",
"(",
"\"http://rest.genome.jp/link/\"",
"+",
"ens_db",
"+",
"\"/\"",
"+",
"organism",
")",
".",
"read",
"(",
")",
"test_db",
"=",
"test_db",
".",
"split",
"(",
"\"\\n\"",
")",
"if",
"len",
"(",
"test_db",
")",
"==",
"1",
":",
"print",
"(",
"\"For \"",
"+",
"organism",
"+",
"\" the following db was found: \"",
"+",
"ens_db",
")",
"print",
"(",
"\"This database does not seem to be valid KEGG-linked database identifier\"",
")",
"print",
"(",
"\"For \\n'hsa' use 'ensembl-hsa'\\n'mmu' use 'ensembl-mmu'\\n'cel' use 'EnsemblGenomes-Gn'\\n'dme' use 'FlyBase'\"",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"ens_db",
"=",
"None",
"else",
":",
"print",
"(",
"\"For \"",
"+",
"organism",
"+",
"\" the following db was found: \"",
"+",
"ens_db",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"return",
"ens_db"
] | Finds KEGG database identifiers for a respective organism given example ensembl ids.
:param organism: an organism as listed in organismsKEGG()
:param ens_ids: a list of ensenbl ids of the respective organism
:returns: nothing if no database was found, or a string if a database was found | [
"Finds",
"KEGG",
"database",
"identifiers",
"for",
"a",
"respective",
"organism",
"given",
"example",
"ensembl",
"ids",
"."
] | 887808a7a2c1504f39ce8d8cb36c15c1721cd29f | https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/kegg.py#L36-L80 | train |
mpg-age-bioinformatics/AGEpy | AGEpy/kegg.py | ensembl_to_kegg | def ensembl_to_kegg(organism,kegg_db):
"""
Looks up KEGG mappings of KEGG ids to ensembl ids
:param organism: an organisms as listed in organismsKEGG()
:param kegg_db: a matching KEGG db as reported in databasesKEGG
:returns: a Pandas dataframe of with 'KEGGid' and 'ENSid'.
"""
print("KEGG API: http://rest.genome.jp/link/"+kegg_db+"/"+organism)
sys.stdout.flush()
kegg_ens=urlopen("http://rest.genome.jp/link/"+kegg_db+"/"+organism).read()
kegg_ens=kegg_ens.split("\n")
final=[]
for i in kegg_ens:
final.append(i.split("\t"))
df=pd.DataFrame(final[0:len(final)-1])[[0,1]]
ens_id=pd.DataFrame(df[1].str.split(":").tolist())[1]
df=pd.concat([df,ens_id],axis=1)
df.columns=['KEGGid','ensDB','ENSid']
df=df[['KEGGid','ENSid']]
return df | python | def ensembl_to_kegg(organism,kegg_db):
"""
Looks up KEGG mappings of KEGG ids to ensembl ids
:param organism: an organisms as listed in organismsKEGG()
:param kegg_db: a matching KEGG db as reported in databasesKEGG
:returns: a Pandas dataframe of with 'KEGGid' and 'ENSid'.
"""
print("KEGG API: http://rest.genome.jp/link/"+kegg_db+"/"+organism)
sys.stdout.flush()
kegg_ens=urlopen("http://rest.genome.jp/link/"+kegg_db+"/"+organism).read()
kegg_ens=kegg_ens.split("\n")
final=[]
for i in kegg_ens:
final.append(i.split("\t"))
df=pd.DataFrame(final[0:len(final)-1])[[0,1]]
ens_id=pd.DataFrame(df[1].str.split(":").tolist())[1]
df=pd.concat([df,ens_id],axis=1)
df.columns=['KEGGid','ensDB','ENSid']
df=df[['KEGGid','ENSid']]
return df | [
"def",
"ensembl_to_kegg",
"(",
"organism",
",",
"kegg_db",
")",
":",
"print",
"(",
"\"KEGG API: http://rest.genome.jp/link/\"",
"+",
"kegg_db",
"+",
"\"/\"",
"+",
"organism",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"kegg_ens",
"=",
"urlopen",
"(",
"\"http://rest.genome.jp/link/\"",
"+",
"kegg_db",
"+",
"\"/\"",
"+",
"organism",
")",
".",
"read",
"(",
")",
"kegg_ens",
"=",
"kegg_ens",
".",
"split",
"(",
"\"\\n\"",
")",
"final",
"=",
"[",
"]",
"for",
"i",
"in",
"kegg_ens",
":",
"final",
".",
"append",
"(",
"i",
".",
"split",
"(",
"\"\\t\"",
")",
")",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"final",
"[",
"0",
":",
"len",
"(",
"final",
")",
"-",
"1",
"]",
")",
"[",
"[",
"0",
",",
"1",
"]",
"]",
"ens_id",
"=",
"pd",
".",
"DataFrame",
"(",
"df",
"[",
"1",
"]",
".",
"str",
".",
"split",
"(",
"\":\"",
")",
".",
"tolist",
"(",
")",
")",
"[",
"1",
"]",
"df",
"=",
"pd",
".",
"concat",
"(",
"[",
"df",
",",
"ens_id",
"]",
",",
"axis",
"=",
"1",
")",
"df",
".",
"columns",
"=",
"[",
"'KEGGid'",
",",
"'ensDB'",
",",
"'ENSid'",
"]",
"df",
"=",
"df",
"[",
"[",
"'KEGGid'",
",",
"'ENSid'",
"]",
"]",
"return",
"df"
] | Looks up KEGG mappings of KEGG ids to ensembl ids
:param organism: an organisms as listed in organismsKEGG()
:param kegg_db: a matching KEGG db as reported in databasesKEGG
:returns: a Pandas dataframe of with 'KEGGid' and 'ENSid'. | [
"Looks",
"up",
"KEGG",
"mappings",
"of",
"KEGG",
"ids",
"to",
"ensembl",
"ids"
] | 887808a7a2c1504f39ce8d8cb36c15c1721cd29f | https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/kegg.py#L83-L105 | train |
mpg-age-bioinformatics/AGEpy | AGEpy/kegg.py | ecs_idsKEGG | def ecs_idsKEGG(organism):
"""
Uses KEGG to retrieve all ids and respective ecs for a given KEGG organism
:param organism: an organisms as listed in organismsKEGG()
:returns: a Pandas dataframe of with 'ec' and 'KEGGid'.
"""
kegg_ec=urlopen("http://rest.kegg.jp/link/"+organism+"/enzyme").read()
kegg_ec=kegg_ec.split("\n")
final=[]
for k in kegg_ec:
final.append(k.split("\t"))
df=pd.DataFrame(final[0:len(final)-1])[[0,1]]
df.columns=['ec','KEGGid']
return df | python | def ecs_idsKEGG(organism):
"""
Uses KEGG to retrieve all ids and respective ecs for a given KEGG organism
:param organism: an organisms as listed in organismsKEGG()
:returns: a Pandas dataframe of with 'ec' and 'KEGGid'.
"""
kegg_ec=urlopen("http://rest.kegg.jp/link/"+organism+"/enzyme").read()
kegg_ec=kegg_ec.split("\n")
final=[]
for k in kegg_ec:
final.append(k.split("\t"))
df=pd.DataFrame(final[0:len(final)-1])[[0,1]]
df.columns=['ec','KEGGid']
return df | [
"def",
"ecs_idsKEGG",
"(",
"organism",
")",
":",
"kegg_ec",
"=",
"urlopen",
"(",
"\"http://rest.kegg.jp/link/\"",
"+",
"organism",
"+",
"\"/enzyme\"",
")",
".",
"read",
"(",
")",
"kegg_ec",
"=",
"kegg_ec",
".",
"split",
"(",
"\"\\n\"",
")",
"final",
"=",
"[",
"]",
"for",
"k",
"in",
"kegg_ec",
":",
"final",
".",
"append",
"(",
"k",
".",
"split",
"(",
"\"\\t\"",
")",
")",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"final",
"[",
"0",
":",
"len",
"(",
"final",
")",
"-",
"1",
"]",
")",
"[",
"[",
"0",
",",
"1",
"]",
"]",
"df",
".",
"columns",
"=",
"[",
"'ec'",
",",
"'KEGGid'",
"]",
"return",
"df"
] | Uses KEGG to retrieve all ids and respective ecs for a given KEGG organism
:param organism: an organisms as listed in organismsKEGG()
:returns: a Pandas dataframe of with 'ec' and 'KEGGid'. | [
"Uses",
"KEGG",
"to",
"retrieve",
"all",
"ids",
"and",
"respective",
"ecs",
"for",
"a",
"given",
"KEGG",
"organism"
] | 887808a7a2c1504f39ce8d8cb36c15c1721cd29f | https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/kegg.py#L107-L123 | train |
mpg-age-bioinformatics/AGEpy | AGEpy/kegg.py | idsKEGG | def idsKEGG(organism):
"""
Uses KEGG to retrieve all ids for a given KEGG organism
:param organism: an organism as listed in organismsKEGG()
:returns: a Pandas dataframe of with 'gene_name' and 'KEGGid'.
"""
ORG=urlopen("http://rest.kegg.jp/list/"+organism).read()
ORG=ORG.split("\n")
final=[]
for k in ORG:
final.append(k.split("\t"))
df=pd.DataFrame(final[0:len(final)-1])[[0,1]]
df.columns=['KEGGid','description']
field = pd.DataFrame(df['description'].str.split(';',1).tolist())[0]
field = pd.DataFrame(field)
df = pd.concat([df[['KEGGid']],field],axis=1)
df.columns=['KEGGid','gene_name']
df=df[['gene_name','KEGGid']]
return df | python | def idsKEGG(organism):
"""
Uses KEGG to retrieve all ids for a given KEGG organism
:param organism: an organism as listed in organismsKEGG()
:returns: a Pandas dataframe of with 'gene_name' and 'KEGGid'.
"""
ORG=urlopen("http://rest.kegg.jp/list/"+organism).read()
ORG=ORG.split("\n")
final=[]
for k in ORG:
final.append(k.split("\t"))
df=pd.DataFrame(final[0:len(final)-1])[[0,1]]
df.columns=['KEGGid','description']
field = pd.DataFrame(df['description'].str.split(';',1).tolist())[0]
field = pd.DataFrame(field)
df = pd.concat([df[['KEGGid']],field],axis=1)
df.columns=['KEGGid','gene_name']
df=df[['gene_name','KEGGid']]
return df | [
"def",
"idsKEGG",
"(",
"organism",
")",
":",
"ORG",
"=",
"urlopen",
"(",
"\"http://rest.kegg.jp/list/\"",
"+",
"organism",
")",
".",
"read",
"(",
")",
"ORG",
"=",
"ORG",
".",
"split",
"(",
"\"\\n\"",
")",
"final",
"=",
"[",
"]",
"for",
"k",
"in",
"ORG",
":",
"final",
".",
"append",
"(",
"k",
".",
"split",
"(",
"\"\\t\"",
")",
")",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"final",
"[",
"0",
":",
"len",
"(",
"final",
")",
"-",
"1",
"]",
")",
"[",
"[",
"0",
",",
"1",
"]",
"]",
"df",
".",
"columns",
"=",
"[",
"'KEGGid'",
",",
"'description'",
"]",
"field",
"=",
"pd",
".",
"DataFrame",
"(",
"df",
"[",
"'description'",
"]",
".",
"str",
".",
"split",
"(",
"';'",
",",
"1",
")",
".",
"tolist",
"(",
")",
")",
"[",
"0",
"]",
"field",
"=",
"pd",
".",
"DataFrame",
"(",
"field",
")",
"df",
"=",
"pd",
".",
"concat",
"(",
"[",
"df",
"[",
"[",
"'KEGGid'",
"]",
"]",
",",
"field",
"]",
",",
"axis",
"=",
"1",
")",
"df",
".",
"columns",
"=",
"[",
"'KEGGid'",
",",
"'gene_name'",
"]",
"df",
"=",
"df",
"[",
"[",
"'gene_name'",
",",
"'KEGGid'",
"]",
"]",
"return",
"df"
] | Uses KEGG to retrieve all ids for a given KEGG organism
:param organism: an organism as listed in organismsKEGG()
:returns: a Pandas dataframe of with 'gene_name' and 'KEGGid'. | [
"Uses",
"KEGG",
"to",
"retrieve",
"all",
"ids",
"for",
"a",
"given",
"KEGG",
"organism"
] | 887808a7a2c1504f39ce8d8cb36c15c1721cd29f | https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/kegg.py#L126-L147 | train |
mpg-age-bioinformatics/AGEpy | AGEpy/kegg.py | biomaRtTOkegg | def biomaRtTOkegg(df):
"""
Transforms a pandas dataframe with the columns 'ensembl_gene_id','kegg_enzyme'
to dataframe ready for use in ...
:param df: a pandas dataframe with the following columns: 'ensembl_gene_id','kegg_enzyme'
:returns: a pandas dataframe with the following columns: 'ensembl_gene_id','kegg_enzyme'
"""
df=df.dropna()
ECcols=df.columns.tolist()
df.reset_index(inplace=True,drop=True)
# field = ECsb[['kegg_enzyme']]
field = pd.DataFrame(df['kegg_enzyme'].str.split('+',1).tolist())[1]
field = pd.DataFrame(field)
df=pd.concat([df[['ensembl_gene_id']],field],axis=1)
df.columns=ECcols
df.drop_duplicates(inplace=True)
df.reset_index(inplace=True,drop=True)
plus=df['kegg_enzyme'].tolist()
plus=[ s for s in plus if "+" in s ]
noPlus=df[~df['kegg_enzyme'].isin(plus)]
plus=df[df['kegg_enzyme'].isin(plus)]
noPlus.reset_index(inplace=True, drop=True)
plus.reset_index(inplace=True, drop=True)
for p in range(0,len(plus)):
enz=plus.ix[p]['kegg_enzyme']
enz=enz.split("+")
enz=pd.DataFrame(enz)
enz.colums=['kegg_enzyme']
enz['ensembl_gene_id']=plus.ix[p]['kegg_enzyme']
noPlus=pd.concat([noPlus,enz])
noPlus=noPlus.drop_duplicates()
noPlus=noPlus[['ensembl_gene_id','kegg_enzyme']]
noPlus['fake']='ec:'
noPlus['kegg_enzyme']=noPlus['fake']+noPlus['kegg_enzyme']
noPlus=noPlus[['ensembl_gene_id','kegg_enzyme']]
return noPlus | python | def biomaRtTOkegg(df):
"""
Transforms a pandas dataframe with the columns 'ensembl_gene_id','kegg_enzyme'
to dataframe ready for use in ...
:param df: a pandas dataframe with the following columns: 'ensembl_gene_id','kegg_enzyme'
:returns: a pandas dataframe with the following columns: 'ensembl_gene_id','kegg_enzyme'
"""
df=df.dropna()
ECcols=df.columns.tolist()
df.reset_index(inplace=True,drop=True)
# field = ECsb[['kegg_enzyme']]
field = pd.DataFrame(df['kegg_enzyme'].str.split('+',1).tolist())[1]
field = pd.DataFrame(field)
df=pd.concat([df[['ensembl_gene_id']],field],axis=1)
df.columns=ECcols
df.drop_duplicates(inplace=True)
df.reset_index(inplace=True,drop=True)
plus=df['kegg_enzyme'].tolist()
plus=[ s for s in plus if "+" in s ]
noPlus=df[~df['kegg_enzyme'].isin(plus)]
plus=df[df['kegg_enzyme'].isin(plus)]
noPlus.reset_index(inplace=True, drop=True)
plus.reset_index(inplace=True, drop=True)
for p in range(0,len(plus)):
enz=plus.ix[p]['kegg_enzyme']
enz=enz.split("+")
enz=pd.DataFrame(enz)
enz.colums=['kegg_enzyme']
enz['ensembl_gene_id']=plus.ix[p]['kegg_enzyme']
noPlus=pd.concat([noPlus,enz])
noPlus=noPlus.drop_duplicates()
noPlus=noPlus[['ensembl_gene_id','kegg_enzyme']]
noPlus['fake']='ec:'
noPlus['kegg_enzyme']=noPlus['fake']+noPlus['kegg_enzyme']
noPlus=noPlus[['ensembl_gene_id','kegg_enzyme']]
return noPlus | [
"def",
"biomaRtTOkegg",
"(",
"df",
")",
":",
"df",
"=",
"df",
".",
"dropna",
"(",
")",
"ECcols",
"=",
"df",
".",
"columns",
".",
"tolist",
"(",
")",
"df",
".",
"reset_index",
"(",
"inplace",
"=",
"True",
",",
"drop",
"=",
"True",
")",
"field",
"=",
"pd",
".",
"DataFrame",
"(",
"df",
"[",
"'kegg_enzyme'",
"]",
".",
"str",
".",
"split",
"(",
"'+'",
",",
"1",
")",
".",
"tolist",
"(",
")",
")",
"[",
"1",
"]",
"field",
"=",
"pd",
".",
"DataFrame",
"(",
"field",
")",
"df",
"=",
"pd",
".",
"concat",
"(",
"[",
"df",
"[",
"[",
"'ensembl_gene_id'",
"]",
"]",
",",
"field",
"]",
",",
"axis",
"=",
"1",
")",
"df",
".",
"columns",
"=",
"ECcols",
"df",
".",
"drop_duplicates",
"(",
"inplace",
"=",
"True",
")",
"df",
".",
"reset_index",
"(",
"inplace",
"=",
"True",
",",
"drop",
"=",
"True",
")",
"plus",
"=",
"df",
"[",
"'kegg_enzyme'",
"]",
".",
"tolist",
"(",
")",
"plus",
"=",
"[",
"s",
"for",
"s",
"in",
"plus",
"if",
"\"+\"",
"in",
"s",
"]",
"noPlus",
"=",
"df",
"[",
"~",
"df",
"[",
"'kegg_enzyme'",
"]",
".",
"isin",
"(",
"plus",
")",
"]",
"plus",
"=",
"df",
"[",
"df",
"[",
"'kegg_enzyme'",
"]",
".",
"isin",
"(",
"plus",
")",
"]",
"noPlus",
".",
"reset_index",
"(",
"inplace",
"=",
"True",
",",
"drop",
"=",
"True",
")",
"plus",
".",
"reset_index",
"(",
"inplace",
"=",
"True",
",",
"drop",
"=",
"True",
")",
"for",
"p",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"plus",
")",
")",
":",
"enz",
"=",
"plus",
".",
"ix",
"[",
"p",
"]",
"[",
"'kegg_enzyme'",
"]",
"enz",
"=",
"enz",
".",
"split",
"(",
"\"+\"",
")",
"enz",
"=",
"pd",
".",
"DataFrame",
"(",
"enz",
")",
"enz",
".",
"colums",
"=",
"[",
"'kegg_enzyme'",
"]",
"enz",
"[",
"'ensembl_gene_id'",
"]",
"=",
"plus",
".",
"ix",
"[",
"p",
"]",
"[",
"'kegg_enzyme'",
"]",
"noPlus",
"=",
"pd",
".",
"concat",
"(",
"[",
"noPlus",
",",
"enz",
"]",
")",
"noPlus",
"=",
"noPlus",
".",
"drop_duplicates",
"(",
")",
"noPlus",
"=",
"noPlus",
"[",
"[",
"'ensembl_gene_id'",
",",
"'kegg_enzyme'",
"]",
"]",
"noPlus",
"[",
"'fake'",
"]",
"=",
"'ec:'",
"noPlus",
"[",
"'kegg_enzyme'",
"]",
"=",
"noPlus",
"[",
"'fake'",
"]",
"+",
"noPlus",
"[",
"'kegg_enzyme'",
"]",
"noPlus",
"=",
"noPlus",
"[",
"[",
"'ensembl_gene_id'",
",",
"'kegg_enzyme'",
"]",
"]",
"return",
"noPlus"
] | Transforms a pandas dataframe with the columns 'ensembl_gene_id','kegg_enzyme'
to dataframe ready for use in ...
:param df: a pandas dataframe with the following columns: 'ensembl_gene_id','kegg_enzyme'
:returns: a pandas dataframe with the following columns: 'ensembl_gene_id','kegg_enzyme' | [
"Transforms",
"a",
"pandas",
"dataframe",
"with",
"the",
"columns",
"ensembl_gene_id",
"kegg_enzyme",
"to",
"dataframe",
"ready",
"for",
"use",
"in",
"..."
] | 887808a7a2c1504f39ce8d8cb36c15c1721cd29f | https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/kegg.py#L194-L232 | train |
mpg-age-bioinformatics/AGEpy | AGEpy/kegg.py | expKEGG | def expKEGG(organism,names_KEGGids):
"""
Gets all KEGG pathways for an organism
:param organism: an organism as listed in organismsKEGG()
:param names_KEGGids: a Pandas dataframe with the columns 'gene_name': and 'KEGGid' as reported from idsKEGG(organism) (or a subset of it).
:returns df: a Pandas dataframe with 'KEGGid','pathID(1):pathNAME(1)', 'pathID(n):pathNAME(n)'
:returns paths: a list of retrieved KEGG pathways
"""
#print "KEGG API: http://rest.kegg.jp/list/pathway/"+organism
#sys.stdout.flush()
kegg_paths=urlopen("http://rest.kegg.jp/list/pathway/"+organism).read()
kegg_paths=kegg_paths.split("\n")
final=[]
for k in kegg_paths:
final.append(k.split("\t"))
df=pd.DataFrame(final[0:len(final)-1])[[0,1]]
df.columns=['pathID','pathName']
print("Collecting genes for pathways")
sys.stdout.flush()
df_pg=pd.DataFrame()
for i in df['pathID'].tolist():
print(i)
sys.stdout.flush()
path_genes=urlopen("http://rest.kegg.jp/link/genes/"+i).read()
path_genes=path_genes.split("\n")
final=[]
for k in path_genes:
final.append(k.split("\t"))
if len(final[0]) > 1:
df_tmp=pd.DataFrame(final[0:len(final)-1])[[0,1]]
df_tmp.columns=['pathID','KEGGid']
df_pg=pd.concat([df_pg,df_tmp])
df=pd.merge(df,df_pg,on=["pathID"], how="outer")
df=df[df['KEGGid'].isin(names_KEGGids['KEGGid'].tolist())]
df=pd.merge(df,names_KEGGids,how='left',on=['KEGGid'])
df_fA=pd.DataFrame(columns=['KEGGid'])
paths=[]
for k in df[['pathID']].drop_duplicates()['pathID'].tolist():
df_tmp=df[df['pathID']==k]
pathName=df_tmp['pathName'].tolist()[0]
pathName=" : ".join([k,pathName])
keggIDs_in_path=df_tmp[['KEGGid']].drop_duplicates()['KEGGid'].tolist()
a={pathName:keggIDs_in_path}
a=pd.DataFrame(a,index=range(len(keggIDs_in_path)))
a['KEGGid']=a[pathName].copy()
df_fA=pd.merge(df_fA,a,how='outer',on=['KEGGid'])
paths.append(pathName)
return df_fA, paths | python | def expKEGG(organism,names_KEGGids):
"""
Gets all KEGG pathways for an organism
:param organism: an organism as listed in organismsKEGG()
:param names_KEGGids: a Pandas dataframe with the columns 'gene_name': and 'KEGGid' as reported from idsKEGG(organism) (or a subset of it).
:returns df: a Pandas dataframe with 'KEGGid','pathID(1):pathNAME(1)', 'pathID(n):pathNAME(n)'
:returns paths: a list of retrieved KEGG pathways
"""
#print "KEGG API: http://rest.kegg.jp/list/pathway/"+organism
#sys.stdout.flush()
kegg_paths=urlopen("http://rest.kegg.jp/list/pathway/"+organism).read()
kegg_paths=kegg_paths.split("\n")
final=[]
for k in kegg_paths:
final.append(k.split("\t"))
df=pd.DataFrame(final[0:len(final)-1])[[0,1]]
df.columns=['pathID','pathName']
print("Collecting genes for pathways")
sys.stdout.flush()
df_pg=pd.DataFrame()
for i in df['pathID'].tolist():
print(i)
sys.stdout.flush()
path_genes=urlopen("http://rest.kegg.jp/link/genes/"+i).read()
path_genes=path_genes.split("\n")
final=[]
for k in path_genes:
final.append(k.split("\t"))
if len(final[0]) > 1:
df_tmp=pd.DataFrame(final[0:len(final)-1])[[0,1]]
df_tmp.columns=['pathID','KEGGid']
df_pg=pd.concat([df_pg,df_tmp])
df=pd.merge(df,df_pg,on=["pathID"], how="outer")
df=df[df['KEGGid'].isin(names_KEGGids['KEGGid'].tolist())]
df=pd.merge(df,names_KEGGids,how='left',on=['KEGGid'])
df_fA=pd.DataFrame(columns=['KEGGid'])
paths=[]
for k in df[['pathID']].drop_duplicates()['pathID'].tolist():
df_tmp=df[df['pathID']==k]
pathName=df_tmp['pathName'].tolist()[0]
pathName=" : ".join([k,pathName])
keggIDs_in_path=df_tmp[['KEGGid']].drop_duplicates()['KEGGid'].tolist()
a={pathName:keggIDs_in_path}
a=pd.DataFrame(a,index=range(len(keggIDs_in_path)))
a['KEGGid']=a[pathName].copy()
df_fA=pd.merge(df_fA,a,how='outer',on=['KEGGid'])
paths.append(pathName)
return df_fA, paths | [
"def",
"expKEGG",
"(",
"organism",
",",
"names_KEGGids",
")",
":",
"kegg_paths",
"=",
"urlopen",
"(",
"\"http://rest.kegg.jp/list/pathway/\"",
"+",
"organism",
")",
".",
"read",
"(",
")",
"kegg_paths",
"=",
"kegg_paths",
".",
"split",
"(",
"\"\\n\"",
")",
"final",
"=",
"[",
"]",
"for",
"k",
"in",
"kegg_paths",
":",
"final",
".",
"append",
"(",
"k",
".",
"split",
"(",
"\"\\t\"",
")",
")",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"final",
"[",
"0",
":",
"len",
"(",
"final",
")",
"-",
"1",
"]",
")",
"[",
"[",
"0",
",",
"1",
"]",
"]",
"df",
".",
"columns",
"=",
"[",
"'pathID'",
",",
"'pathName'",
"]",
"print",
"(",
"\"Collecting genes for pathways\"",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"df_pg",
"=",
"pd",
".",
"DataFrame",
"(",
")",
"for",
"i",
"in",
"df",
"[",
"'pathID'",
"]",
".",
"tolist",
"(",
")",
":",
"print",
"(",
"i",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"path_genes",
"=",
"urlopen",
"(",
"\"http://rest.kegg.jp/link/genes/\"",
"+",
"i",
")",
".",
"read",
"(",
")",
"path_genes",
"=",
"path_genes",
".",
"split",
"(",
"\"\\n\"",
")",
"final",
"=",
"[",
"]",
"for",
"k",
"in",
"path_genes",
":",
"final",
".",
"append",
"(",
"k",
".",
"split",
"(",
"\"\\t\"",
")",
")",
"if",
"len",
"(",
"final",
"[",
"0",
"]",
")",
">",
"1",
":",
"df_tmp",
"=",
"pd",
".",
"DataFrame",
"(",
"final",
"[",
"0",
":",
"len",
"(",
"final",
")",
"-",
"1",
"]",
")",
"[",
"[",
"0",
",",
"1",
"]",
"]",
"df_tmp",
".",
"columns",
"=",
"[",
"'pathID'",
",",
"'KEGGid'",
"]",
"df_pg",
"=",
"pd",
".",
"concat",
"(",
"[",
"df_pg",
",",
"df_tmp",
"]",
")",
"df",
"=",
"pd",
".",
"merge",
"(",
"df",
",",
"df_pg",
",",
"on",
"=",
"[",
"\"pathID\"",
"]",
",",
"how",
"=",
"\"outer\"",
")",
"df",
"=",
"df",
"[",
"df",
"[",
"'KEGGid'",
"]",
".",
"isin",
"(",
"names_KEGGids",
"[",
"'KEGGid'",
"]",
".",
"tolist",
"(",
")",
")",
"]",
"df",
"=",
"pd",
".",
"merge",
"(",
"df",
",",
"names_KEGGids",
",",
"how",
"=",
"'left'",
",",
"on",
"=",
"[",
"'KEGGid'",
"]",
")",
"df_fA",
"=",
"pd",
".",
"DataFrame",
"(",
"columns",
"=",
"[",
"'KEGGid'",
"]",
")",
"paths",
"=",
"[",
"]",
"for",
"k",
"in",
"df",
"[",
"[",
"'pathID'",
"]",
"]",
".",
"drop_duplicates",
"(",
")",
"[",
"'pathID'",
"]",
".",
"tolist",
"(",
")",
":",
"df_tmp",
"=",
"df",
"[",
"df",
"[",
"'pathID'",
"]",
"==",
"k",
"]",
"pathName",
"=",
"df_tmp",
"[",
"'pathName'",
"]",
".",
"tolist",
"(",
")",
"[",
"0",
"]",
"pathName",
"=",
"\" : \"",
".",
"join",
"(",
"[",
"k",
",",
"pathName",
"]",
")",
"keggIDs_in_path",
"=",
"df_tmp",
"[",
"[",
"'KEGGid'",
"]",
"]",
".",
"drop_duplicates",
"(",
")",
"[",
"'KEGGid'",
"]",
".",
"tolist",
"(",
")",
"a",
"=",
"{",
"pathName",
":",
"keggIDs_in_path",
"}",
"a",
"=",
"pd",
".",
"DataFrame",
"(",
"a",
",",
"index",
"=",
"range",
"(",
"len",
"(",
"keggIDs_in_path",
")",
")",
")",
"a",
"[",
"'KEGGid'",
"]",
"=",
"a",
"[",
"pathName",
"]",
".",
"copy",
"(",
")",
"df_fA",
"=",
"pd",
".",
"merge",
"(",
"df_fA",
",",
"a",
",",
"how",
"=",
"'outer'",
",",
"on",
"=",
"[",
"'KEGGid'",
"]",
")",
"paths",
".",
"append",
"(",
"pathName",
")",
"return",
"df_fA",
",",
"paths"
] | Gets all KEGG pathways for an organism
:param organism: an organism as listed in organismsKEGG()
:param names_KEGGids: a Pandas dataframe with the columns 'gene_name': and 'KEGGid' as reported from idsKEGG(organism) (or a subset of it).
:returns df: a Pandas dataframe with 'KEGGid','pathID(1):pathNAME(1)', 'pathID(n):pathNAME(n)'
:returns paths: a list of retrieved KEGG pathways | [
"Gets",
"all",
"KEGG",
"pathways",
"for",
"an",
"organism"
] | 887808a7a2c1504f39ce8d8cb36c15c1721cd29f | https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/kegg.py#L236-L287 | train |
mpg-age-bioinformatics/AGEpy | AGEpy/rbiom.py | RdatabasesBM | def RdatabasesBM(host=rbiomart_host):
"""
Lists BioMart databases through a RPY2 connection.
:param host: address of the host server, default='www.ensembl.org'
:returns: nothing
"""
biomaRt = importr("biomaRt")
print(biomaRt.listMarts(host=host)) | python | def RdatabasesBM(host=rbiomart_host):
"""
Lists BioMart databases through a RPY2 connection.
:param host: address of the host server, default='www.ensembl.org'
:returns: nothing
"""
biomaRt = importr("biomaRt")
print(biomaRt.listMarts(host=host)) | [
"def",
"RdatabasesBM",
"(",
"host",
"=",
"rbiomart_host",
")",
":",
"biomaRt",
"=",
"importr",
"(",
"\"biomaRt\"",
")",
"print",
"(",
"biomaRt",
".",
"listMarts",
"(",
"host",
"=",
"host",
")",
")"
] | Lists BioMart databases through a RPY2 connection.
:param host: address of the host server, default='www.ensembl.org'
:returns: nothing | [
"Lists",
"BioMart",
"databases",
"through",
"a",
"RPY2",
"connection",
"."
] | 887808a7a2c1504f39ce8d8cb36c15c1721cd29f | https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/rbiom.py#L16-L26 | train |
mpg-age-bioinformatics/AGEpy | AGEpy/rbiom.py | RdatasetsBM | def RdatasetsBM(database,host=rbiomart_host):
"""
Lists BioMart datasets through a RPY2 connection.
:param database: a database listed in RdatabasesBM()
:param host: address of the host server, default='www.ensembl.org'
:returns: nothing
"""
biomaRt = importr("biomaRt")
ensemblMart=biomaRt.useMart(database, host=host)
print(biomaRt.listDatasets(ensemblMart)) | python | def RdatasetsBM(database,host=rbiomart_host):
"""
Lists BioMart datasets through a RPY2 connection.
:param database: a database listed in RdatabasesBM()
:param host: address of the host server, default='www.ensembl.org'
:returns: nothing
"""
biomaRt = importr("biomaRt")
ensemblMart=biomaRt.useMart(database, host=host)
print(biomaRt.listDatasets(ensemblMart)) | [
"def",
"RdatasetsBM",
"(",
"database",
",",
"host",
"=",
"rbiomart_host",
")",
":",
"biomaRt",
"=",
"importr",
"(",
"\"biomaRt\"",
")",
"ensemblMart",
"=",
"biomaRt",
".",
"useMart",
"(",
"database",
",",
"host",
"=",
"host",
")",
"print",
"(",
"biomaRt",
".",
"listDatasets",
"(",
"ensemblMart",
")",
")"
] | Lists BioMart datasets through a RPY2 connection.
:param database: a database listed in RdatabasesBM()
:param host: address of the host server, default='www.ensembl.org'
:returns: nothing | [
"Lists",
"BioMart",
"datasets",
"through",
"a",
"RPY2",
"connection",
"."
] | 887808a7a2c1504f39ce8d8cb36c15c1721cd29f | https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/rbiom.py#L28-L40 | train |
mpg-age-bioinformatics/AGEpy | AGEpy/rbiom.py | RfiltersBM | def RfiltersBM(dataset,database,host=rbiomart_host):
"""
Lists BioMart filters through a RPY2 connection.
:param dataset: a dataset listed in RdatasetsBM()
:param database: a database listed in RdatabasesBM()
:param host: address of the host server, default='www.ensembl.org'
:returns: nothing
"""
biomaRt = importr("biomaRt")
ensemblMart=biomaRt.useMart(database, host=host)
ensembl=biomaRt.useDataset(dataset, mart=ensemblMart)
print(biomaRt.listFilters(ensembl)) | python | def RfiltersBM(dataset,database,host=rbiomart_host):
"""
Lists BioMart filters through a RPY2 connection.
:param dataset: a dataset listed in RdatasetsBM()
:param database: a database listed in RdatabasesBM()
:param host: address of the host server, default='www.ensembl.org'
:returns: nothing
"""
biomaRt = importr("biomaRt")
ensemblMart=biomaRt.useMart(database, host=host)
ensembl=biomaRt.useDataset(dataset, mart=ensemblMart)
print(biomaRt.listFilters(ensembl)) | [
"def",
"RfiltersBM",
"(",
"dataset",
",",
"database",
",",
"host",
"=",
"rbiomart_host",
")",
":",
"biomaRt",
"=",
"importr",
"(",
"\"biomaRt\"",
")",
"ensemblMart",
"=",
"biomaRt",
".",
"useMart",
"(",
"database",
",",
"host",
"=",
"host",
")",
"ensembl",
"=",
"biomaRt",
".",
"useDataset",
"(",
"dataset",
",",
"mart",
"=",
"ensemblMart",
")",
"print",
"(",
"biomaRt",
".",
"listFilters",
"(",
"ensembl",
")",
")"
] | Lists BioMart filters through a RPY2 connection.
:param dataset: a dataset listed in RdatasetsBM()
:param database: a database listed in RdatabasesBM()
:param host: address of the host server, default='www.ensembl.org'
:returns: nothing | [
"Lists",
"BioMart",
"filters",
"through",
"a",
"RPY2",
"connection",
"."
] | 887808a7a2c1504f39ce8d8cb36c15c1721cd29f | https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/rbiom.py#L42-L56 | train |
mpg-age-bioinformatics/AGEpy | AGEpy/rbiom.py | RattributesBM | def RattributesBM(dataset,database,host=rbiomart_host):
"""
Lists BioMart attributes through a RPY2 connection.
:param dataset: a dataset listed in RdatasetsBM()
:param database: a database listed in RdatabasesBM()
:param host: address of the host server, default='www.ensembl.org'
:returns: nothing
"""
biomaRt = importr("biomaRt")
ensemblMart=biomaRt.useMart(database, host=rbiomart_host)
ensembl=biomaRt.useDataset(dataset, mart=ensemblMart)
print(biomaRt.listAttributes(ensembl)) | python | def RattributesBM(dataset,database,host=rbiomart_host):
"""
Lists BioMart attributes through a RPY2 connection.
:param dataset: a dataset listed in RdatasetsBM()
:param database: a database listed in RdatabasesBM()
:param host: address of the host server, default='www.ensembl.org'
:returns: nothing
"""
biomaRt = importr("biomaRt")
ensemblMart=biomaRt.useMart(database, host=rbiomart_host)
ensembl=biomaRt.useDataset(dataset, mart=ensemblMart)
print(biomaRt.listAttributes(ensembl)) | [
"def",
"RattributesBM",
"(",
"dataset",
",",
"database",
",",
"host",
"=",
"rbiomart_host",
")",
":",
"biomaRt",
"=",
"importr",
"(",
"\"biomaRt\"",
")",
"ensemblMart",
"=",
"biomaRt",
".",
"useMart",
"(",
"database",
",",
"host",
"=",
"rbiomart_host",
")",
"ensembl",
"=",
"biomaRt",
".",
"useDataset",
"(",
"dataset",
",",
"mart",
"=",
"ensemblMart",
")",
"print",
"(",
"biomaRt",
".",
"listAttributes",
"(",
"ensembl",
")",
")"
] | Lists BioMart attributes through a RPY2 connection.
:param dataset: a dataset listed in RdatasetsBM()
:param database: a database listed in RdatabasesBM()
:param host: address of the host server, default='www.ensembl.org'
:returns: nothing | [
"Lists",
"BioMart",
"attributes",
"through",
"a",
"RPY2",
"connection",
"."
] | 887808a7a2c1504f39ce8d8cb36c15c1721cd29f | https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/rbiom.py#L58-L72 | train |
acutesoftware/AIKIF | scripts/examples/document_AIKIF.py | get_list_of_applications | def get_list_of_applications():
"""
Get list of applications
"""
apps = mod_prg.Programs('Applications', 'C:\\apps')
fl = mod_fl.FileList(['C:\\apps'], ['*.exe'], ["\\bk\\"])
for f in fl.get_list():
apps.add(f, 'autogenerated list')
apps.list()
apps.save() | python | def get_list_of_applications():
"""
Get list of applications
"""
apps = mod_prg.Programs('Applications', 'C:\\apps')
fl = mod_fl.FileList(['C:\\apps'], ['*.exe'], ["\\bk\\"])
for f in fl.get_list():
apps.add(f, 'autogenerated list')
apps.list()
apps.save() | [
"def",
"get_list_of_applications",
"(",
")",
":",
"apps",
"=",
"mod_prg",
".",
"Programs",
"(",
"'Applications'",
",",
"'C:\\\\apps'",
")",
"fl",
"=",
"mod_fl",
".",
"FileList",
"(",
"[",
"'C:\\\\apps'",
"]",
",",
"[",
"'*.exe'",
"]",
",",
"[",
"\"\\\\bk\\\\\"",
"]",
")",
"for",
"f",
"in",
"fl",
".",
"get_list",
"(",
")",
":",
"apps",
".",
"add",
"(",
"f",
",",
"'autogenerated list'",
")",
"apps",
".",
"list",
"(",
")",
"apps",
".",
"save",
"(",
")"
] | Get list of applications | [
"Get",
"list",
"of",
"applications"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/scripts/examples/document_AIKIF.py#L147-L156 | train |
Timusan/wtforms-dynamic-fields | wtforms_dynamic_fields/wtforms_dynamic_fields.py | WTFormsDynamicFields.add_field | def add_field(self, name, label, field_type, *args, **kwargs):
""" Add the field to the internal configuration dictionary. """
if name in self._dyn_fields:
raise AttributeError('Field already added to the form.')
else:
self._dyn_fields[name] = {'label': label, 'type': field_type,
'args': args, 'kwargs': kwargs} | python | def add_field(self, name, label, field_type, *args, **kwargs):
""" Add the field to the internal configuration dictionary. """
if name in self._dyn_fields:
raise AttributeError('Field already added to the form.')
else:
self._dyn_fields[name] = {'label': label, 'type': field_type,
'args': args, 'kwargs': kwargs} | [
"def",
"add_field",
"(",
"self",
",",
"name",
",",
"label",
",",
"field_type",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"if",
"name",
"in",
"self",
".",
"_dyn_fields",
":",
"raise",
"AttributeError",
"(",
"'Field already added to the form.'",
")",
"else",
":",
"self",
".",
"_dyn_fields",
"[",
"name",
"]",
"=",
"{",
"'label'",
":",
"label",
",",
"'type'",
":",
"field_type",
",",
"'args'",
":",
"args",
",",
"'kwargs'",
":",
"kwargs",
"}"
] | Add the field to the internal configuration dictionary. | [
"Add",
"the",
"field",
"to",
"the",
"internal",
"configuration",
"dictionary",
"."
] | d984a646075219a6f8a0e931c96035ca3e44be56 | https://github.com/Timusan/wtforms-dynamic-fields/blob/d984a646075219a6f8a0e931c96035ca3e44be56/wtforms_dynamic_fields/wtforms_dynamic_fields.py#L44-L50 | train |
Timusan/wtforms-dynamic-fields | wtforms_dynamic_fields/wtforms_dynamic_fields.py | WTFormsDynamicFields.add_validator | def add_validator(self, name, validator, *args, **kwargs):
""" Add the validator to the internal configuration dictionary.
:param name:
The field machine name to apply the validator on
:param validator:
The WTForms validator object
The rest are optional arguments and keyword arguments that
belong to the validator. We let them simply pass through
to be checked and bound later.
"""
if name in self._dyn_fields:
if 'validators' in self._dyn_fields[name]:
self._dyn_fields[name]['validators'].append(validator)
self._dyn_fields[name][validator.__name__] = {}
if args:
self._dyn_fields[name][validator.__name__]['args'] = args
if kwargs:
self._dyn_fields[name][validator.__name__]['kwargs'] = kwargs
else:
self._dyn_fields[name]['validators'] = []
self.add_validator(name, validator, *args, **kwargs)
else:
raise AttributeError('Field "{0}" does not exist. '
'Did you forget to add it?'.format(name)) | python | def add_validator(self, name, validator, *args, **kwargs):
""" Add the validator to the internal configuration dictionary.
:param name:
The field machine name to apply the validator on
:param validator:
The WTForms validator object
The rest are optional arguments and keyword arguments that
belong to the validator. We let them simply pass through
to be checked and bound later.
"""
if name in self._dyn_fields:
if 'validators' in self._dyn_fields[name]:
self._dyn_fields[name]['validators'].append(validator)
self._dyn_fields[name][validator.__name__] = {}
if args:
self._dyn_fields[name][validator.__name__]['args'] = args
if kwargs:
self._dyn_fields[name][validator.__name__]['kwargs'] = kwargs
else:
self._dyn_fields[name]['validators'] = []
self.add_validator(name, validator, *args, **kwargs)
else:
raise AttributeError('Field "{0}" does not exist. '
'Did you forget to add it?'.format(name)) | [
"def",
"add_validator",
"(",
"self",
",",
"name",
",",
"validator",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"if",
"name",
"in",
"self",
".",
"_dyn_fields",
":",
"if",
"'validators'",
"in",
"self",
".",
"_dyn_fields",
"[",
"name",
"]",
":",
"self",
".",
"_dyn_fields",
"[",
"name",
"]",
"[",
"'validators'",
"]",
".",
"append",
"(",
"validator",
")",
"self",
".",
"_dyn_fields",
"[",
"name",
"]",
"[",
"validator",
".",
"__name__",
"]",
"=",
"{",
"}",
"if",
"args",
":",
"self",
".",
"_dyn_fields",
"[",
"name",
"]",
"[",
"validator",
".",
"__name__",
"]",
"[",
"'args'",
"]",
"=",
"args",
"if",
"kwargs",
":",
"self",
".",
"_dyn_fields",
"[",
"name",
"]",
"[",
"validator",
".",
"__name__",
"]",
"[",
"'kwargs'",
"]",
"=",
"kwargs",
"else",
":",
"self",
".",
"_dyn_fields",
"[",
"name",
"]",
"[",
"'validators'",
"]",
"=",
"[",
"]",
"self",
".",
"add_validator",
"(",
"name",
",",
"validator",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
"else",
":",
"raise",
"AttributeError",
"(",
"'Field \"{0}\" does not exist. '",
"'Did you forget to add it?'",
".",
"format",
"(",
"name",
")",
")"
] | Add the validator to the internal configuration dictionary.
:param name:
The field machine name to apply the validator on
:param validator:
The WTForms validator object
The rest are optional arguments and keyword arguments that
belong to the validator. We let them simply pass through
to be checked and bound later. | [
"Add",
"the",
"validator",
"to",
"the",
"internal",
"configuration",
"dictionary",
"."
] | d984a646075219a6f8a0e931c96035ca3e44be56 | https://github.com/Timusan/wtforms-dynamic-fields/blob/d984a646075219a6f8a0e931c96035ca3e44be56/wtforms_dynamic_fields/wtforms_dynamic_fields.py#L52-L76 | train |
Timusan/wtforms-dynamic-fields | wtforms_dynamic_fields/wtforms_dynamic_fields.py | WTFormsDynamicFields.process | def process(self, form, post):
""" Process the given WTForm Form object.
Itterate over the POST values and check each field
against the configuration that was made.
For each field that is valid, check all the validator
parameters for possible %field% replacement, then bind
these parameters to their validator.
Finally, add the field together with their validators
to the form.
:param form:
A valid WTForm Form object
:param post:
A MultiDict with the POST variables
"""
if not isinstance(form, FormMeta):
raise TypeError('Given form is not a valid WTForm.')
re_field_name = re.compile(r'\%([a-zA-Z0-9_]*)\%')
class F(form):
pass
for field, data in post.iteritems():
if field in F():
# Skip it if the POST field is one of the standard form fields.
continue
else:
if field in self._dyn_fields:
# If we can find the field name directly, it means the field
# is not a set so just set the canonical name and go on.
field_cname = field
# Since we are not in a set, (re)set the current set.
current_set_number = None
elif (field.split('_')[-1].isdigit()
and field[:-(len(field.split('_')[-1]))-1] in self._dyn_fields.keys()):
# If the field can be split on underscore characters,
# the last part contains only digits and the
# everything *but* the last part is found in the
# field configuration, we are good to go.
# (Cowardly refusing to use regex here).
field_cname = field[:-(len(field.split('_')[-1]))-1]
# Since we apparently are in a set, remember the
# the set number we are at.
current_set_number = str(field.split('_')[-1])
else:
# The field did not match to a canonical name
# from the fields dictionary or the name
# was malformed, throw it out.
continue
# Since the field seems to be a valid one, let us
# prepare the validator arguments and, if we are in a set
# replace the %field_name% convention where we find it.
validators = []
if 'validators' in self._dyn_fields[field_cname]:
for validator in self._dyn_fields[field_cname]['validators']:
args = []
kwargs = {}
if 'args' in self._dyn_fields[field_cname]\
[validator.__name__]:
if not current_set_number:
args = self._dyn_fields[field_cname]\
[validator.__name__]['args']
else:
# If we are currently in a set, append the set number
# to all the words that are decorated with %'s within
# the arguments.
for arg in self._dyn_fields[field_cname]\
[validator.__name__]['args']:
try:
arg = re_field_name.sub(r'\1'+'_'+current_set_number,
arg)
except:
# The argument does not seem to be regex-able
# Probably not a string, thus we can skip it.
pass
args.append(arg)
if 'kwargs' in self._dyn_fields[field_cname]\
[validator.__name__]:
if not current_set_number:
kwargs = self._dyn_fields[field_cname]\
[validator.__name__]['kwargs']
else:
# If we are currently in a set, append the set number
# to all the words that are decorated with %'s within
# the arguments.
for key, arg in self.iteritems(self._dyn_fields[field_cname]\
[validator.__name__]['kwargs']):
try:
arg = re_field_name.sub(r'\1'+'_'+current_set_number,
arg)
except:
# The argument does not seem to be regex-able
# Probably not a string, thus we can skip it.
pass
kwargs[key] = arg
# Finally, bind arguments to the validator
# and add it to the list
validators.append(validator(*args, **kwargs))
# The field is setup, it is time to add it to the form.
field_type = self._dyn_fields[field_cname]['type']
field_label = self._dyn_fields[field_cname]['label']
field_args = self._dyn_fields[field_cname]['args']
field_kwargs = self._dyn_fields[field_cname]['kwargs']
setattr(F, field, field_type(field_label,
validators=validators,
*field_args,
**field_kwargs))
# Create an instance of the form with the newly
# created fields and give it back to the caller.
if self.flask_wtf:
# Flask WTF overrides the form initialization
# and already injects the POST variables.
form = F()
else:
form = F(post)
return form | python | def process(self, form, post):
""" Process the given WTForm Form object.
Itterate over the POST values and check each field
against the configuration that was made.
For each field that is valid, check all the validator
parameters for possible %field% replacement, then bind
these parameters to their validator.
Finally, add the field together with their validators
to the form.
:param form:
A valid WTForm Form object
:param post:
A MultiDict with the POST variables
"""
if not isinstance(form, FormMeta):
raise TypeError('Given form is not a valid WTForm.')
re_field_name = re.compile(r'\%([a-zA-Z0-9_]*)\%')
class F(form):
pass
for field, data in post.iteritems():
if field in F():
# Skip it if the POST field is one of the standard form fields.
continue
else:
if field in self._dyn_fields:
# If we can find the field name directly, it means the field
# is not a set so just set the canonical name and go on.
field_cname = field
# Since we are not in a set, (re)set the current set.
current_set_number = None
elif (field.split('_')[-1].isdigit()
and field[:-(len(field.split('_')[-1]))-1] in self._dyn_fields.keys()):
# If the field can be split on underscore characters,
# the last part contains only digits and the
# everything *but* the last part is found in the
# field configuration, we are good to go.
# (Cowardly refusing to use regex here).
field_cname = field[:-(len(field.split('_')[-1]))-1]
# Since we apparently are in a set, remember the
# the set number we are at.
current_set_number = str(field.split('_')[-1])
else:
# The field did not match to a canonical name
# from the fields dictionary or the name
# was malformed, throw it out.
continue
# Since the field seems to be a valid one, let us
# prepare the validator arguments and, if we are in a set
# replace the %field_name% convention where we find it.
validators = []
if 'validators' in self._dyn_fields[field_cname]:
for validator in self._dyn_fields[field_cname]['validators']:
args = []
kwargs = {}
if 'args' in self._dyn_fields[field_cname]\
[validator.__name__]:
if not current_set_number:
args = self._dyn_fields[field_cname]\
[validator.__name__]['args']
else:
# If we are currently in a set, append the set number
# to all the words that are decorated with %'s within
# the arguments.
for arg in self._dyn_fields[field_cname]\
[validator.__name__]['args']:
try:
arg = re_field_name.sub(r'\1'+'_'+current_set_number,
arg)
except:
# The argument does not seem to be regex-able
# Probably not a string, thus we can skip it.
pass
args.append(arg)
if 'kwargs' in self._dyn_fields[field_cname]\
[validator.__name__]:
if not current_set_number:
kwargs = self._dyn_fields[field_cname]\
[validator.__name__]['kwargs']
else:
# If we are currently in a set, append the set number
# to all the words that are decorated with %'s within
# the arguments.
for key, arg in self.iteritems(self._dyn_fields[field_cname]\
[validator.__name__]['kwargs']):
try:
arg = re_field_name.sub(r'\1'+'_'+current_set_number,
arg)
except:
# The argument does not seem to be regex-able
# Probably not a string, thus we can skip it.
pass
kwargs[key] = arg
# Finally, bind arguments to the validator
# and add it to the list
validators.append(validator(*args, **kwargs))
# The field is setup, it is time to add it to the form.
field_type = self._dyn_fields[field_cname]['type']
field_label = self._dyn_fields[field_cname]['label']
field_args = self._dyn_fields[field_cname]['args']
field_kwargs = self._dyn_fields[field_cname]['kwargs']
setattr(F, field, field_type(field_label,
validators=validators,
*field_args,
**field_kwargs))
# Create an instance of the form with the newly
# created fields and give it back to the caller.
if self.flask_wtf:
# Flask WTF overrides the form initialization
# and already injects the POST variables.
form = F()
else:
form = F(post)
return form | [
"def",
"process",
"(",
"self",
",",
"form",
",",
"post",
")",
":",
"if",
"not",
"isinstance",
"(",
"form",
",",
"FormMeta",
")",
":",
"raise",
"TypeError",
"(",
"'Given form is not a valid WTForm.'",
")",
"re_field_name",
"=",
"re",
".",
"compile",
"(",
"r'\\%([a-zA-Z0-9_]*)\\%'",
")",
"class",
"F",
"(",
"form",
")",
":",
"pass",
"for",
"field",
",",
"data",
"in",
"post",
".",
"iteritems",
"(",
")",
":",
"if",
"field",
"in",
"F",
"(",
")",
":",
"continue",
"else",
":",
"if",
"field",
"in",
"self",
".",
"_dyn_fields",
":",
"field_cname",
"=",
"field",
"current_set_number",
"=",
"None",
"elif",
"(",
"field",
".",
"split",
"(",
"'_'",
")",
"[",
"-",
"1",
"]",
".",
"isdigit",
"(",
")",
"and",
"field",
"[",
":",
"-",
"(",
"len",
"(",
"field",
".",
"split",
"(",
"'_'",
")",
"[",
"-",
"1",
"]",
")",
")",
"-",
"1",
"]",
"in",
"self",
".",
"_dyn_fields",
".",
"keys",
"(",
")",
")",
":",
"field_cname",
"=",
"field",
"[",
":",
"-",
"(",
"len",
"(",
"field",
".",
"split",
"(",
"'_'",
")",
"[",
"-",
"1",
"]",
")",
")",
"-",
"1",
"]",
"current_set_number",
"=",
"str",
"(",
"field",
".",
"split",
"(",
"'_'",
")",
"[",
"-",
"1",
"]",
")",
"else",
":",
"continue",
"validators",
"=",
"[",
"]",
"if",
"'validators'",
"in",
"self",
".",
"_dyn_fields",
"[",
"field_cname",
"]",
":",
"for",
"validator",
"in",
"self",
".",
"_dyn_fields",
"[",
"field_cname",
"]",
"[",
"'validators'",
"]",
":",
"args",
"=",
"[",
"]",
"kwargs",
"=",
"{",
"}",
"if",
"'args'",
"in",
"self",
".",
"_dyn_fields",
"[",
"field_cname",
"]",
"[",
"validator",
".",
"__name__",
"]",
":",
"if",
"not",
"current_set_number",
":",
"args",
"=",
"self",
".",
"_dyn_fields",
"[",
"field_cname",
"]",
"[",
"validator",
".",
"__name__",
"]",
"[",
"'args'",
"]",
"else",
":",
"for",
"arg",
"in",
"self",
".",
"_dyn_fields",
"[",
"field_cname",
"]",
"[",
"validator",
".",
"__name__",
"]",
"[",
"'args'",
"]",
":",
"try",
":",
"arg",
"=",
"re_field_name",
".",
"sub",
"(",
"r'\\1'",
"+",
"'_'",
"+",
"current_set_number",
",",
"arg",
")",
"except",
":",
"pass",
"args",
".",
"append",
"(",
"arg",
")",
"if",
"'kwargs'",
"in",
"self",
".",
"_dyn_fields",
"[",
"field_cname",
"]",
"[",
"validator",
".",
"__name__",
"]",
":",
"if",
"not",
"current_set_number",
":",
"kwargs",
"=",
"self",
".",
"_dyn_fields",
"[",
"field_cname",
"]",
"[",
"validator",
".",
"__name__",
"]",
"[",
"'kwargs'",
"]",
"else",
":",
"for",
"key",
",",
"arg",
"in",
"self",
".",
"iteritems",
"(",
"self",
".",
"_dyn_fields",
"[",
"field_cname",
"]",
"[",
"validator",
".",
"__name__",
"]",
"[",
"'kwargs'",
"]",
")",
":",
"try",
":",
"arg",
"=",
"re_field_name",
".",
"sub",
"(",
"r'\\1'",
"+",
"'_'",
"+",
"current_set_number",
",",
"arg",
")",
"except",
":",
"pass",
"kwargs",
"[",
"key",
"]",
"=",
"arg",
"validators",
".",
"append",
"(",
"validator",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
")",
"field_type",
"=",
"self",
".",
"_dyn_fields",
"[",
"field_cname",
"]",
"[",
"'type'",
"]",
"field_label",
"=",
"self",
".",
"_dyn_fields",
"[",
"field_cname",
"]",
"[",
"'label'",
"]",
"field_args",
"=",
"self",
".",
"_dyn_fields",
"[",
"field_cname",
"]",
"[",
"'args'",
"]",
"field_kwargs",
"=",
"self",
".",
"_dyn_fields",
"[",
"field_cname",
"]",
"[",
"'kwargs'",
"]",
"setattr",
"(",
"F",
",",
"field",
",",
"field_type",
"(",
"field_label",
",",
"validators",
"=",
"validators",
",",
"*",
"field_args",
",",
"**",
"field_kwargs",
")",
")",
"if",
"self",
".",
"flask_wtf",
":",
"form",
"=",
"F",
"(",
")",
"else",
":",
"form",
"=",
"F",
"(",
"post",
")",
"return",
"form"
] | Process the given WTForm Form object.
Itterate over the POST values and check each field
against the configuration that was made.
For each field that is valid, check all the validator
parameters for possible %field% replacement, then bind
these parameters to their validator.
Finally, add the field together with their validators
to the form.
:param form:
A valid WTForm Form object
:param post:
A MultiDict with the POST variables | [
"Process",
"the",
"given",
"WTForm",
"Form",
"object",
"."
] | d984a646075219a6f8a0e931c96035ca3e44be56 | https://github.com/Timusan/wtforms-dynamic-fields/blob/d984a646075219a6f8a0e931c96035ca3e44be56/wtforms_dynamic_fields/wtforms_dynamic_fields.py#L90-L214 | train |
mpg-age-bioinformatics/AGEpy | AGEpy/bed.py | GetBEDnarrowPeakgz | def GetBEDnarrowPeakgz(URL_or_PATH_TO_file):
"""
Reads a gz compressed BED narrow peak file from a web address or local file
:param URL_or_PATH_TO_file: web address of path to local file
:returns: a Pandas dataframe
"""
if os.path.isfile(URL_or_PATH_TO_file):
response=open(URL_or_PATH_TO_file, "r")
compressedFile = StringIO.StringIO(response.read())
else:
response = urllib2.urlopen(URL_or_PATH_TO_file)
compressedFile = StringIO.StringIO(response.read())
decompressedFile = gzip.GzipFile(fileobj=compressedFile)
out=decompressedFile.read().split("\n")
out=[ s.split("\t") for s in out]
out=pd.DataFrame(out)
out.columns=["chrom","chromStart","chromEnd","name","score","strand","signalValue","-log10(pValue)","-log10(qvalue)","peak"]
out["name"]=out.index.tolist()
out["name"]="Peak_"+out["name"].astype(str)
out=out[:-1]
return out | python | def GetBEDnarrowPeakgz(URL_or_PATH_TO_file):
"""
Reads a gz compressed BED narrow peak file from a web address or local file
:param URL_or_PATH_TO_file: web address of path to local file
:returns: a Pandas dataframe
"""
if os.path.isfile(URL_or_PATH_TO_file):
response=open(URL_or_PATH_TO_file, "r")
compressedFile = StringIO.StringIO(response.read())
else:
response = urllib2.urlopen(URL_or_PATH_TO_file)
compressedFile = StringIO.StringIO(response.read())
decompressedFile = gzip.GzipFile(fileobj=compressedFile)
out=decompressedFile.read().split("\n")
out=[ s.split("\t") for s in out]
out=pd.DataFrame(out)
out.columns=["chrom","chromStart","chromEnd","name","score","strand","signalValue","-log10(pValue)","-log10(qvalue)","peak"]
out["name"]=out.index.tolist()
out["name"]="Peak_"+out["name"].astype(str)
out=out[:-1]
return out | [
"def",
"GetBEDnarrowPeakgz",
"(",
"URL_or_PATH_TO_file",
")",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"URL_or_PATH_TO_file",
")",
":",
"response",
"=",
"open",
"(",
"URL_or_PATH_TO_file",
",",
"\"r\"",
")",
"compressedFile",
"=",
"StringIO",
".",
"StringIO",
"(",
"response",
".",
"read",
"(",
")",
")",
"else",
":",
"response",
"=",
"urllib2",
".",
"urlopen",
"(",
"URL_or_PATH_TO_file",
")",
"compressedFile",
"=",
"StringIO",
".",
"StringIO",
"(",
"response",
".",
"read",
"(",
")",
")",
"decompressedFile",
"=",
"gzip",
".",
"GzipFile",
"(",
"fileobj",
"=",
"compressedFile",
")",
"out",
"=",
"decompressedFile",
".",
"read",
"(",
")",
".",
"split",
"(",
"\"\\n\"",
")",
"out",
"=",
"[",
"s",
".",
"split",
"(",
"\"\\t\"",
")",
"for",
"s",
"in",
"out",
"]",
"out",
"=",
"pd",
".",
"DataFrame",
"(",
"out",
")",
"out",
".",
"columns",
"=",
"[",
"\"chrom\"",
",",
"\"chromStart\"",
",",
"\"chromEnd\"",
",",
"\"name\"",
",",
"\"score\"",
",",
"\"strand\"",
",",
"\"signalValue\"",
",",
"\"-log10(pValue)\"",
",",
"\"-log10(qvalue)\"",
",",
"\"peak\"",
"]",
"out",
"[",
"\"name\"",
"]",
"=",
"out",
".",
"index",
".",
"tolist",
"(",
")",
"out",
"[",
"\"name\"",
"]",
"=",
"\"Peak_\"",
"+",
"out",
"[",
"\"name\"",
"]",
".",
"astype",
"(",
"str",
")",
"out",
"=",
"out",
"[",
":",
"-",
"1",
"]",
"return",
"out"
] | Reads a gz compressed BED narrow peak file from a web address or local file
:param URL_or_PATH_TO_file: web address of path to local file
:returns: a Pandas dataframe | [
"Reads",
"a",
"gz",
"compressed",
"BED",
"narrow",
"peak",
"file",
"from",
"a",
"web",
"address",
"or",
"local",
"file"
] | 887808a7a2c1504f39ce8d8cb36c15c1721cd29f | https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/bed.py#L30-L53 | train |
mpg-age-bioinformatics/AGEpy | AGEpy/bed.py | dfTObedtool | def dfTObedtool(df):
"""
Transforms a pandas dataframe into a bedtool
:param df: Pandas dataframe
:returns: a bedtool
"""
df=df.astype(str)
df=df.drop_duplicates()
df=df.values.tolist()
df=["\t".join(s) for s in df ]
df="\n".join(df)
df=BedTool(df, from_string=True)
return df | python | def dfTObedtool(df):
"""
Transforms a pandas dataframe into a bedtool
:param df: Pandas dataframe
:returns: a bedtool
"""
df=df.astype(str)
df=df.drop_duplicates()
df=df.values.tolist()
df=["\t".join(s) for s in df ]
df="\n".join(df)
df=BedTool(df, from_string=True)
return df | [
"def",
"dfTObedtool",
"(",
"df",
")",
":",
"df",
"=",
"df",
".",
"astype",
"(",
"str",
")",
"df",
"=",
"df",
".",
"drop_duplicates",
"(",
")",
"df",
"=",
"df",
".",
"values",
".",
"tolist",
"(",
")",
"df",
"=",
"[",
"\"\\t\"",
".",
"join",
"(",
"s",
")",
"for",
"s",
"in",
"df",
"]",
"df",
"=",
"\"\\n\"",
".",
"join",
"(",
"df",
")",
"df",
"=",
"BedTool",
"(",
"df",
",",
"from_string",
"=",
"True",
")",
"return",
"df"
] | Transforms a pandas dataframe into a bedtool
:param df: Pandas dataframe
:returns: a bedtool | [
"Transforms",
"a",
"pandas",
"dataframe",
"into",
"a",
"bedtool"
] | 887808a7a2c1504f39ce8d8cb36c15c1721cd29f | https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/bed.py#L55-L70 | train |
Nachtfeuer/pipeline | spline/tools/event.py | Event.configure | def configure(**kwargs):
"""Global configuration for event handling."""
for key in kwargs:
if key == 'is_logging_enabled':
Event.is_logging_enabled = kwargs[key]
elif key == 'collector_queue':
Event.collector_queue = kwargs[key]
else:
Logger.get_logger(__name__).error("Unknown key %s in configure or bad type %s",
key, type(kwargs[key])) | python | def configure(**kwargs):
"""Global configuration for event handling."""
for key in kwargs:
if key == 'is_logging_enabled':
Event.is_logging_enabled = kwargs[key]
elif key == 'collector_queue':
Event.collector_queue = kwargs[key]
else:
Logger.get_logger(__name__).error("Unknown key %s in configure or bad type %s",
key, type(kwargs[key])) | [
"def",
"configure",
"(",
"**",
"kwargs",
")",
":",
"for",
"key",
"in",
"kwargs",
":",
"if",
"key",
"==",
"'is_logging_enabled'",
":",
"Event",
".",
"is_logging_enabled",
"=",
"kwargs",
"[",
"key",
"]",
"elif",
"key",
"==",
"'collector_queue'",
":",
"Event",
".",
"collector_queue",
"=",
"kwargs",
"[",
"key",
"]",
"else",
":",
"Logger",
".",
"get_logger",
"(",
"__name__",
")",
".",
"error",
"(",
"\"Unknown key %s in configure or bad type %s\"",
",",
"key",
",",
"type",
"(",
"kwargs",
"[",
"key",
"]",
")",
")"
] | Global configuration for event handling. | [
"Global",
"configuration",
"for",
"event",
"handling",
"."
] | 04ca18c4e95e4349532bb45b768206393e1f2c13 | https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/event.py#L46-L55 | train |
Nachtfeuer/pipeline | spline/tools/event.py | Event.failed | def failed(self, **kwargs):
"""Finish event as failed with optional additional information."""
self.finished = datetime.now()
self.status = 'failed'
self.information.update(kwargs)
self.logger.info("Failed - took %f seconds.", self.duration())
self.update_report_collector(int(time.mktime(self.finished.timetuple()))) | python | def failed(self, **kwargs):
"""Finish event as failed with optional additional information."""
self.finished = datetime.now()
self.status = 'failed'
self.information.update(kwargs)
self.logger.info("Failed - took %f seconds.", self.duration())
self.update_report_collector(int(time.mktime(self.finished.timetuple()))) | [
"def",
"failed",
"(",
"self",
",",
"**",
"kwargs",
")",
":",
"self",
".",
"finished",
"=",
"datetime",
".",
"now",
"(",
")",
"self",
".",
"status",
"=",
"'failed'",
"self",
".",
"information",
".",
"update",
"(",
"kwargs",
")",
"self",
".",
"logger",
".",
"info",
"(",
"\"Failed - took %f seconds.\"",
",",
"self",
".",
"duration",
"(",
")",
")",
"self",
".",
"update_report_collector",
"(",
"int",
"(",
"time",
".",
"mktime",
"(",
"self",
".",
"finished",
".",
"timetuple",
"(",
")",
")",
")",
")"
] | Finish event as failed with optional additional information. | [
"Finish",
"event",
"as",
"failed",
"with",
"optional",
"additional",
"information",
"."
] | 04ca18c4e95e4349532bb45b768206393e1f2c13 | https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/event.py#L69-L75 | train |
Nachtfeuer/pipeline | spline/tools/event.py | Event.update_report_collector | def update_report_collector(self, timestamp):
"""Updating report collector for pipeline details."""
report_enabled = 'report' in self.information and self.information['report'] == 'html'
report_enabled = report_enabled and 'stage' in self.information
report_enabled = report_enabled and Event.collector_queue is not None
if report_enabled:
Event.collector_queue.put(CollectorUpdate(
matrix=self.information['matrix'] if 'matrix' in self.information else 'default',
stage=self.information['stage'],
status=self.status,
timestamp=timestamp,
information=self.information
)) | python | def update_report_collector(self, timestamp):
"""Updating report collector for pipeline details."""
report_enabled = 'report' in self.information and self.information['report'] == 'html'
report_enabled = report_enabled and 'stage' in self.information
report_enabled = report_enabled and Event.collector_queue is not None
if report_enabled:
Event.collector_queue.put(CollectorUpdate(
matrix=self.information['matrix'] if 'matrix' in self.information else 'default',
stage=self.information['stage'],
status=self.status,
timestamp=timestamp,
information=self.information
)) | [
"def",
"update_report_collector",
"(",
"self",
",",
"timestamp",
")",
":",
"report_enabled",
"=",
"'report'",
"in",
"self",
".",
"information",
"and",
"self",
".",
"information",
"[",
"'report'",
"]",
"==",
"'html'",
"report_enabled",
"=",
"report_enabled",
"and",
"'stage'",
"in",
"self",
".",
"information",
"report_enabled",
"=",
"report_enabled",
"and",
"Event",
".",
"collector_queue",
"is",
"not",
"None",
"if",
"report_enabled",
":",
"Event",
".",
"collector_queue",
".",
"put",
"(",
"CollectorUpdate",
"(",
"matrix",
"=",
"self",
".",
"information",
"[",
"'matrix'",
"]",
"if",
"'matrix'",
"in",
"self",
".",
"information",
"else",
"'default'",
",",
"stage",
"=",
"self",
".",
"information",
"[",
"'stage'",
"]",
",",
"status",
"=",
"self",
".",
"status",
",",
"timestamp",
"=",
"timestamp",
",",
"information",
"=",
"self",
".",
"information",
")",
")"
] | Updating report collector for pipeline details. | [
"Updating",
"report",
"collector",
"for",
"pipeline",
"details",
"."
] | 04ca18c4e95e4349532bb45b768206393e1f2c13 | https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/event.py#L89-L102 | train |
acutesoftware/AIKIF | aikif/toolbox/sql_tools.py | count_lines_in_file | def count_lines_in_file(src_file ):
"""
test function.
"""
tot = 0
res = ''
try:
with open(src_file, 'r') as f:
for line in f:
tot += 1
res = str(tot) + ' recs read'
except:
res = 'ERROR -couldnt open file'
return res | python | def count_lines_in_file(src_file ):
"""
test function.
"""
tot = 0
res = ''
try:
with open(src_file, 'r') as f:
for line in f:
tot += 1
res = str(tot) + ' recs read'
except:
res = 'ERROR -couldnt open file'
return res | [
"def",
"count_lines_in_file",
"(",
"src_file",
")",
":",
"tot",
"=",
"0",
"res",
"=",
"''",
"try",
":",
"with",
"open",
"(",
"src_file",
",",
"'r'",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
":",
"tot",
"+=",
"1",
"res",
"=",
"str",
"(",
"tot",
")",
"+",
"' recs read'",
"except",
":",
"res",
"=",
"'ERROR -couldnt open file'",
"return",
"res"
] | test function. | [
"test",
"function",
"."
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/sql_tools.py#L10-L23 | train |
acutesoftware/AIKIF | aikif/toolbox/sql_tools.py | load_txt_to_sql | def load_txt_to_sql(tbl_name, src_file_and_path, src_file, op_folder):
"""
creates a SQL loader script to load a text file into a database
and then executes it.
Note that src_file is
"""
if op_folder == '':
pth = ''
else:
pth = op_folder + os.sep
fname_create_script = pth + 'CREATE_' + tbl_name + '.SQL'
fname_backout_file = pth + 'BACKOUT_' + tbl_name + '.SQL'
fname_control_file = pth + tbl_name + '.CTL'
cols = read_csv_cols_to_table_cols(src_file)
create_script_staging_table(fname_create_script, tbl_name, cols)
create_file(fname_backout_file, 'DROP TABLE ' + tbl_name + ' CASCADE CONSTRAINTS;\n')
create_CTL(fname_control_file, tbl_name, cols, 'TRUNCATE') | python | def load_txt_to_sql(tbl_name, src_file_and_path, src_file, op_folder):
"""
creates a SQL loader script to load a text file into a database
and then executes it.
Note that src_file is
"""
if op_folder == '':
pth = ''
else:
pth = op_folder + os.sep
fname_create_script = pth + 'CREATE_' + tbl_name + '.SQL'
fname_backout_file = pth + 'BACKOUT_' + tbl_name + '.SQL'
fname_control_file = pth + tbl_name + '.CTL'
cols = read_csv_cols_to_table_cols(src_file)
create_script_staging_table(fname_create_script, tbl_name, cols)
create_file(fname_backout_file, 'DROP TABLE ' + tbl_name + ' CASCADE CONSTRAINTS;\n')
create_CTL(fname_control_file, tbl_name, cols, 'TRUNCATE') | [
"def",
"load_txt_to_sql",
"(",
"tbl_name",
",",
"src_file_and_path",
",",
"src_file",
",",
"op_folder",
")",
":",
"if",
"op_folder",
"==",
"''",
":",
"pth",
"=",
"''",
"else",
":",
"pth",
"=",
"op_folder",
"+",
"os",
".",
"sep",
"fname_create_script",
"=",
"pth",
"+",
"'CREATE_'",
"+",
"tbl_name",
"+",
"'.SQL'",
"fname_backout_file",
"=",
"pth",
"+",
"'BACKOUT_'",
"+",
"tbl_name",
"+",
"'.SQL'",
"fname_control_file",
"=",
"pth",
"+",
"tbl_name",
"+",
"'.CTL'",
"cols",
"=",
"read_csv_cols_to_table_cols",
"(",
"src_file",
")",
"create_script_staging_table",
"(",
"fname_create_script",
",",
"tbl_name",
",",
"cols",
")",
"create_file",
"(",
"fname_backout_file",
",",
"'DROP TABLE '",
"+",
"tbl_name",
"+",
"' CASCADE CONSTRAINTS;\\n'",
")",
"create_CTL",
"(",
"fname_control_file",
",",
"tbl_name",
",",
"cols",
",",
"'TRUNCATE'",
")"
] | creates a SQL loader script to load a text file into a database
and then executes it.
Note that src_file is | [
"creates",
"a",
"SQL",
"loader",
"script",
"to",
"load",
"a",
"text",
"file",
"into",
"a",
"database",
"and",
"then",
"executes",
"it",
".",
"Note",
"that",
"src_file",
"is"
] | fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03 | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/sql_tools.py#L26-L44 | train |
asyncdef/aitertools | aitertools/__init__.py | anext | async def anext(*args):
"""Return the next item from an async iterator.
Args:
iterable: An async iterable.
default: An optional default value to return if the iterable is empty.
Return:
The next value of the iterable.
Raises:
TypeError: The iterable given is not async.
This function will return the next value form an async iterable. If the
iterable is empty the StopAsyncIteration will be propogated. However, if
a default value is given as a second argument the exception is silenced and
the default value is returned instead.
"""
if not args:
raise TypeError('anext() expected at least 1 arguments, got 0')
if len(args) > 2:
raise TypeError(
'anext() expected at most 2 arguments, got {}'.format(len(args))
)
iterable, default, has_default = args[0], None, False
if len(args) == 2:
iterable, default = args
has_default = True
try:
return await iterable.__anext__()
except StopAsyncIteration as exc:
if has_default:
return default
raise StopAsyncIteration() from exc | python | async def anext(*args):
"""Return the next item from an async iterator.
Args:
iterable: An async iterable.
default: An optional default value to return if the iterable is empty.
Return:
The next value of the iterable.
Raises:
TypeError: The iterable given is not async.
This function will return the next value form an async iterable. If the
iterable is empty the StopAsyncIteration will be propogated. However, if
a default value is given as a second argument the exception is silenced and
the default value is returned instead.
"""
if not args:
raise TypeError('anext() expected at least 1 arguments, got 0')
if len(args) > 2:
raise TypeError(
'anext() expected at most 2 arguments, got {}'.format(len(args))
)
iterable, default, has_default = args[0], None, False
if len(args) == 2:
iterable, default = args
has_default = True
try:
return await iterable.__anext__()
except StopAsyncIteration as exc:
if has_default:
return default
raise StopAsyncIteration() from exc | [
"async",
"def",
"anext",
"(",
"*",
"args",
")",
":",
"if",
"not",
"args",
":",
"raise",
"TypeError",
"(",
"'anext() expected at least 1 arguments, got 0'",
")",
"if",
"len",
"(",
"args",
")",
">",
"2",
":",
"raise",
"TypeError",
"(",
"'anext() expected at most 2 arguments, got {}'",
".",
"format",
"(",
"len",
"(",
"args",
")",
")",
")",
"iterable",
",",
"default",
",",
"has_default",
"=",
"args",
"[",
"0",
"]",
",",
"None",
",",
"False",
"if",
"len",
"(",
"args",
")",
"==",
"2",
":",
"iterable",
",",
"default",
"=",
"args",
"has_default",
"=",
"True",
"try",
":",
"return",
"await",
"iterable",
".",
"__anext__",
"(",
")",
"except",
"StopAsyncIteration",
"as",
"exc",
":",
"if",
"has_default",
":",
"return",
"default",
"raise",
"StopAsyncIteration",
"(",
")",
"from",
"exc"
] | Return the next item from an async iterator.
Args:
iterable: An async iterable.
default: An optional default value to return if the iterable is empty.
Return:
The next value of the iterable.
Raises:
TypeError: The iterable given is not async.
This function will return the next value form an async iterable. If the
iterable is empty the StopAsyncIteration will be propogated. However, if
a default value is given as a second argument the exception is silenced and
the default value is returned instead. | [
"Return",
"the",
"next",
"item",
"from",
"an",
"async",
"iterator",
"."
] | 26a6c7e71e87dd1ddc4acb755d70ca30894f7928 | https://github.com/asyncdef/aitertools/blob/26a6c7e71e87dd1ddc4acb755d70ca30894f7928/aitertools/__init__.py#L102-L146 | train |
asyncdef/aitertools | aitertools/__init__.py | repeat | def repeat(obj, times=None):
"""Make an iterator that returns object over and over again."""
if times is None:
return AsyncIterWrapper(sync_itertools.repeat(obj))
return AsyncIterWrapper(sync_itertools.repeat(obj, times)) | python | def repeat(obj, times=None):
"""Make an iterator that returns object over and over again."""
if times is None:
return AsyncIterWrapper(sync_itertools.repeat(obj))
return AsyncIterWrapper(sync_itertools.repeat(obj, times)) | [
"def",
"repeat",
"(",
"obj",
",",
"times",
"=",
"None",
")",
":",
"if",
"times",
"is",
"None",
":",
"return",
"AsyncIterWrapper",
"(",
"sync_itertools",
".",
"repeat",
"(",
"obj",
")",
")",
"return",
"AsyncIterWrapper",
"(",
"sync_itertools",
".",
"repeat",
"(",
"obj",
",",
"times",
")",
")"
] | Make an iterator that returns object over and over again. | [
"Make",
"an",
"iterator",
"that",
"returns",
"object",
"over",
"and",
"over",
"again",
"."
] | 26a6c7e71e87dd1ddc4acb755d70ca30894f7928 | https://github.com/asyncdef/aitertools/blob/26a6c7e71e87dd1ddc4acb755d70ca30894f7928/aitertools/__init__.py#L240-L246 | train |
asyncdef/aitertools | aitertools/__init__.py | _async_callable | def _async_callable(func):
"""Ensure the callable is an async def."""
if isinstance(func, types.CoroutineType):
return func
@functools.wraps(func)
async def _async_def_wrapper(*args, **kwargs):
"""Wrap a a sync callable in an async def."""
return func(*args, **kwargs)
return _async_def_wrapper | python | def _async_callable(func):
"""Ensure the callable is an async def."""
if isinstance(func, types.CoroutineType):
return func
@functools.wraps(func)
async def _async_def_wrapper(*args, **kwargs):
"""Wrap a a sync callable in an async def."""
return func(*args, **kwargs)
return _async_def_wrapper | [
"def",
"_async_callable",
"(",
"func",
")",
":",
"if",
"isinstance",
"(",
"func",
",",
"types",
".",
"CoroutineType",
")",
":",
"return",
"func",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"async",
"def",
"_async_def_wrapper",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"return",
"func",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
"return",
"_async_def_wrapper"
] | Ensure the callable is an async def. | [
"Ensure",
"the",
"callable",
"is",
"an",
"async",
"def",
"."
] | 26a6c7e71e87dd1ddc4acb755d70ca30894f7928 | https://github.com/asyncdef/aitertools/blob/26a6c7e71e87dd1ddc4acb755d70ca30894f7928/aitertools/__init__.py#L249-L260 | train |
asyncdef/aitertools | aitertools/__init__.py | tee | def tee(iterable, n=2):
"""Return n independent iterators from a single iterable.
Once tee() has made a split, the original iterable should not be used
anywhere else; otherwise, the iterable could get advanced without the tee
objects being informed.
This itertool may require significant auxiliary storage (depending on how
much temporary data needs to be stored). In general, if one iterator uses
most or all of the data before another iterator starts, it is faster to use
list() instead of tee().
"""
tees = tuple(AsyncTeeIterable(iterable) for _ in range(n))
for tee in tees:
tee._siblings = tees
return tees | python | def tee(iterable, n=2):
"""Return n independent iterators from a single iterable.
Once tee() has made a split, the original iterable should not be used
anywhere else; otherwise, the iterable could get advanced without the tee
objects being informed.
This itertool may require significant auxiliary storage (depending on how
much temporary data needs to be stored). In general, if one iterator uses
most or all of the data before another iterator starts, it is faster to use
list() instead of tee().
"""
tees = tuple(AsyncTeeIterable(iterable) for _ in range(n))
for tee in tees:
tee._siblings = tees
return tees | [
"def",
"tee",
"(",
"iterable",
",",
"n",
"=",
"2",
")",
":",
"tees",
"=",
"tuple",
"(",
"AsyncTeeIterable",
"(",
"iterable",
")",
"for",
"_",
"in",
"range",
"(",
"n",
")",
")",
"for",
"tee",
"in",
"tees",
":",
"tee",
".",
"_siblings",
"=",
"tees",
"return",
"tees"
] | Return n independent iterators from a single iterable.
Once tee() has made a split, the original iterable should not be used
anywhere else; otherwise, the iterable could get advanced without the tee
objects being informed.
This itertool may require significant auxiliary storage (depending on how
much temporary data needs to be stored). In general, if one iterator uses
most or all of the data before another iterator starts, it is faster to use
list() instead of tee(). | [
"Return",
"n",
"independent",
"iterators",
"from",
"a",
"single",
"iterable",
"."
] | 26a6c7e71e87dd1ddc4acb755d70ca30894f7928 | https://github.com/asyncdef/aitertools/blob/26a6c7e71e87dd1ddc4acb755d70ca30894f7928/aitertools/__init__.py#L890-L907 | train |
nocarryr/python-dispatch | pydispatch/properties.py | Property._on_change | def _on_change(self, obj, old, value, **kwargs):
"""Called internally to emit changes from the instance object
The keyword arguments here will be passed to callbacks through the
instance object's :meth:`~pydispatch.dispatch.Dispatcher.emit` method.
Keyword Args:
property: The :class:`Property` instance. This is useful if multiple
properties are bound to the same callback. The attribute name
keys (optional): If the :class:`Property` is a container type
(:class:`ListProperty` or :class:`DictProperty`), the changes
may be found here.
This is not implemented for nested containers and will only be
available for operations that do not alter the size of the
container.
"""
kwargs['property'] = self
obj.emit(self.name, obj, value, old=old, **kwargs) | python | def _on_change(self, obj, old, value, **kwargs):
"""Called internally to emit changes from the instance object
The keyword arguments here will be passed to callbacks through the
instance object's :meth:`~pydispatch.dispatch.Dispatcher.emit` method.
Keyword Args:
property: The :class:`Property` instance. This is useful if multiple
properties are bound to the same callback. The attribute name
keys (optional): If the :class:`Property` is a container type
(:class:`ListProperty` or :class:`DictProperty`), the changes
may be found here.
This is not implemented for nested containers and will only be
available for operations that do not alter the size of the
container.
"""
kwargs['property'] = self
obj.emit(self.name, obj, value, old=old, **kwargs) | [
"def",
"_on_change",
"(",
"self",
",",
"obj",
",",
"old",
",",
"value",
",",
"**",
"kwargs",
")",
":",
"kwargs",
"[",
"'property'",
"]",
"=",
"self",
"obj",
".",
"emit",
"(",
"self",
".",
"name",
",",
"obj",
",",
"value",
",",
"old",
"=",
"old",
",",
"**",
"kwargs",
")"
] | Called internally to emit changes from the instance object
The keyword arguments here will be passed to callbacks through the
instance object's :meth:`~pydispatch.dispatch.Dispatcher.emit` method.
Keyword Args:
property: The :class:`Property` instance. This is useful if multiple
properties are bound to the same callback. The attribute name
keys (optional): If the :class:`Property` is a container type
(:class:`ListProperty` or :class:`DictProperty`), the changes
may be found here.
This is not implemented for nested containers and will only be
available for operations that do not alter the size of the
container. | [
"Called",
"internally",
"to",
"emit",
"changes",
"from",
"the",
"instance",
"object"
] | 7c5ca03835c922cbfdfd62772c9e560062c954c7 | https://github.com/nocarryr/python-dispatch/blob/7c5ca03835c922cbfdfd62772c9e560062c954c7/pydispatch/properties.py#L96-L114 | train |
OpenHydrology/floodestimation | floodestimation/parsers.py | FehFileParser.parse_str | def parse_str(self, s):
"""
Parse string and return relevant object
:param s: string to parse
:type s: str
:return: Parsed object
"""
self.object = self.parsed_class()
in_section = None # Holds name of FEH file section while traversing through file.
for line in s.split('\n'):
if line.lower().startswith('[end]'):
# Leave section
in_section = None
elif line.startswith('['):
# Enter section, sanitise `[Section Name]` to `section_name`
in_section = line.strip().strip('[]').lower().replace(' ', '_')
elif in_section:
try:
# Call method `_section_section_name(line)`
getattr(self, '_section_' + in_section)(line.strip())
except AttributeError:
pass # Skip unsupported section
return self.object | python | def parse_str(self, s):
"""
Parse string and return relevant object
:param s: string to parse
:type s: str
:return: Parsed object
"""
self.object = self.parsed_class()
in_section = None # Holds name of FEH file section while traversing through file.
for line in s.split('\n'):
if line.lower().startswith('[end]'):
# Leave section
in_section = None
elif line.startswith('['):
# Enter section, sanitise `[Section Name]` to `section_name`
in_section = line.strip().strip('[]').lower().replace(' ', '_')
elif in_section:
try:
# Call method `_section_section_name(line)`
getattr(self, '_section_' + in_section)(line.strip())
except AttributeError:
pass # Skip unsupported section
return self.object | [
"def",
"parse_str",
"(",
"self",
",",
"s",
")",
":",
"self",
".",
"object",
"=",
"self",
".",
"parsed_class",
"(",
")",
"in_section",
"=",
"None",
"for",
"line",
"in",
"s",
".",
"split",
"(",
"'\\n'",
")",
":",
"if",
"line",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"'[end]'",
")",
":",
"in_section",
"=",
"None",
"elif",
"line",
".",
"startswith",
"(",
"'['",
")",
":",
"in_section",
"=",
"line",
".",
"strip",
"(",
")",
".",
"strip",
"(",
"'[]'",
")",
".",
"lower",
"(",
")",
".",
"replace",
"(",
"' '",
",",
"'_'",
")",
"elif",
"in_section",
":",
"try",
":",
"getattr",
"(",
"self",
",",
"'_section_'",
"+",
"in_section",
")",
"(",
"line",
".",
"strip",
"(",
")",
")",
"except",
"AttributeError",
":",
"pass",
"return",
"self",
".",
"object"
] | Parse string and return relevant object
:param s: string to parse
:type s: str
:return: Parsed object | [
"Parse",
"string",
"and",
"return",
"relevant",
"object"
] | 782da7c5abd1348923129efe89fb70003ebb088c | https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/parsers.py#L70-L93 | train |
Subsets and Splits