id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
|
---|---|---|---|---|---|---|---|---|---|---|---|
4,700 | ladybug-tools/ladybug | ladybug/designday.py | OriginalClearSkyCondition.from_analysis_period | def from_analysis_period(cls, analysis_period, clearness=1,
daylight_savings_indicator='No'):
""""Initialize a OriginalClearSkyCondition from an analysis_period"""
_check_analysis_period(analysis_period)
return cls(analysis_period.st_month, analysis_period.st_day, clearness,
daylight_savings_indicator) | python | def from_analysis_period(cls, analysis_period, clearness=1,
daylight_savings_indicator='No'):
""""Initialize a OriginalClearSkyCondition from an analysis_period"""
_check_analysis_period(analysis_period)
return cls(analysis_period.st_month, analysis_period.st_day, clearness,
daylight_savings_indicator) | [
"def",
"from_analysis_period",
"(",
"cls",
",",
"analysis_period",
",",
"clearness",
"=",
"1",
",",
"daylight_savings_indicator",
"=",
"'No'",
")",
":",
"_check_analysis_period",
"(",
"analysis_period",
")",
"return",
"cls",
"(",
"analysis_period",
".",
"st_month",
",",
"analysis_period",
".",
"st_day",
",",
"clearness",
",",
"daylight_savings_indicator",
")"
] | Initialize a OriginalClearSkyCondition from an analysis_period | [
"Initialize",
"a",
"OriginalClearSkyCondition",
"from",
"an",
"analysis_period"
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/designday.py#L1289-L1294 |
4,701 | ladybug-tools/ladybug | ladybug/designday.py | OriginalClearSkyCondition.radiation_values | def radiation_values(self, location, timestep=1):
"""Lists of driect normal, diffuse horiz, and global horiz rad at each timestep.
"""
# create sunpath and get altitude at every timestep of the design day
sp = Sunpath.from_location(location)
altitudes = []
dates = self._get_datetimes(timestep)
for t_date in dates:
sun = sp.calculate_sun_from_date_time(t_date)
altitudes.append(sun.altitude)
dir_norm, diff_horiz = ashrae_clear_sky(
altitudes, self._month, self._clearness)
glob_horiz = [dhr + dnr * math.sin(math.radians(alt)) for
alt, dnr, dhr in zip(altitudes, dir_norm, diff_horiz)]
return dir_norm, diff_horiz, glob_horiz | python | def radiation_values(self, location, timestep=1):
"""Lists of driect normal, diffuse horiz, and global horiz rad at each timestep.
"""
# create sunpath and get altitude at every timestep of the design day
sp = Sunpath.from_location(location)
altitudes = []
dates = self._get_datetimes(timestep)
for t_date in dates:
sun = sp.calculate_sun_from_date_time(t_date)
altitudes.append(sun.altitude)
dir_norm, diff_horiz = ashrae_clear_sky(
altitudes, self._month, self._clearness)
glob_horiz = [dhr + dnr * math.sin(math.radians(alt)) for
alt, dnr, dhr in zip(altitudes, dir_norm, diff_horiz)]
return dir_norm, diff_horiz, glob_horiz | [
"def",
"radiation_values",
"(",
"self",
",",
"location",
",",
"timestep",
"=",
"1",
")",
":",
"# create sunpath and get altitude at every timestep of the design day",
"sp",
"=",
"Sunpath",
".",
"from_location",
"(",
"location",
")",
"altitudes",
"=",
"[",
"]",
"dates",
"=",
"self",
".",
"_get_datetimes",
"(",
"timestep",
")",
"for",
"t_date",
"in",
"dates",
":",
"sun",
"=",
"sp",
".",
"calculate_sun_from_date_time",
"(",
"t_date",
")",
"altitudes",
".",
"append",
"(",
"sun",
".",
"altitude",
")",
"dir_norm",
",",
"diff_horiz",
"=",
"ashrae_clear_sky",
"(",
"altitudes",
",",
"self",
".",
"_month",
",",
"self",
".",
"_clearness",
")",
"glob_horiz",
"=",
"[",
"dhr",
"+",
"dnr",
"*",
"math",
".",
"sin",
"(",
"math",
".",
"radians",
"(",
"alt",
")",
")",
"for",
"alt",
",",
"dnr",
",",
"dhr",
"in",
"zip",
"(",
"altitudes",
",",
"dir_norm",
",",
"diff_horiz",
")",
"]",
"return",
"dir_norm",
",",
"diff_horiz",
",",
"glob_horiz"
] | Lists of driect normal, diffuse horiz, and global horiz rad at each timestep. | [
"Lists",
"of",
"driect",
"normal",
"diffuse",
"horiz",
"and",
"global",
"horiz",
"rad",
"at",
"each",
"timestep",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/designday.py#L1341-L1355 |
4,702 | ladybug-tools/ladybug | ladybug/designday.py | RevisedClearSkyCondition.from_analysis_period | def from_analysis_period(cls, analysis_period, tau_b, tau_d,
daylight_savings_indicator='No'):
""""Initialize a RevisedClearSkyCondition from an analysis_period"""
_check_analysis_period(analysis_period)
return cls(analysis_period.st_month, analysis_period.st_day, tau_b, tau_d,
daylight_savings_indicator) | python | def from_analysis_period(cls, analysis_period, tau_b, tau_d,
daylight_savings_indicator='No'):
""""Initialize a RevisedClearSkyCondition from an analysis_period"""
_check_analysis_period(analysis_period)
return cls(analysis_period.st_month, analysis_period.st_day, tau_b, tau_d,
daylight_savings_indicator) | [
"def",
"from_analysis_period",
"(",
"cls",
",",
"analysis_period",
",",
"tau_b",
",",
"tau_d",
",",
"daylight_savings_indicator",
"=",
"'No'",
")",
":",
"_check_analysis_period",
"(",
"analysis_period",
")",
"return",
"cls",
"(",
"analysis_period",
".",
"st_month",
",",
"analysis_period",
".",
"st_day",
",",
"tau_b",
",",
"tau_d",
",",
"daylight_savings_indicator",
")"
] | Initialize a RevisedClearSkyCondition from an analysis_period | [
"Initialize",
"a",
"RevisedClearSkyCondition",
"from",
"an",
"analysis_period"
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/designday.py#L1387-L1392 |
4,703 | ladybug-tools/ladybug | ladybug/_datacollectionbase.py | BaseCollection.convert_to_unit | def convert_to_unit(self, unit):
"""Convert the Data Collection to the input unit."""
self._values = self._header.data_type.to_unit(
self._values, unit, self._header.unit)
self._header._unit = unit | python | def convert_to_unit(self, unit):
"""Convert the Data Collection to the input unit."""
self._values = self._header.data_type.to_unit(
self._values, unit, self._header.unit)
self._header._unit = unit | [
"def",
"convert_to_unit",
"(",
"self",
",",
"unit",
")",
":",
"self",
".",
"_values",
"=",
"self",
".",
"_header",
".",
"data_type",
".",
"to_unit",
"(",
"self",
".",
"_values",
",",
"unit",
",",
"self",
".",
"_header",
".",
"unit",
")",
"self",
".",
"_header",
".",
"_unit",
"=",
"unit"
] | Convert the Data Collection to the input unit. | [
"Convert",
"the",
"Data",
"Collection",
"to",
"the",
"input",
"unit",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/_datacollectionbase.py#L126-L130 |
4,704 | ladybug-tools/ladybug | ladybug/_datacollectionbase.py | BaseCollection.convert_to_ip | def convert_to_ip(self):
"""Convert the Data Collection to IP units."""
self._values, self._header._unit = self._header.data_type.to_ip(
self._values, self._header.unit) | python | def convert_to_ip(self):
"""Convert the Data Collection to IP units."""
self._values, self._header._unit = self._header.data_type.to_ip(
self._values, self._header.unit) | [
"def",
"convert_to_ip",
"(",
"self",
")",
":",
"self",
".",
"_values",
",",
"self",
".",
"_header",
".",
"_unit",
"=",
"self",
".",
"_header",
".",
"data_type",
".",
"to_ip",
"(",
"self",
".",
"_values",
",",
"self",
".",
"_header",
".",
"unit",
")"
] | Convert the Data Collection to IP units. | [
"Convert",
"the",
"Data",
"Collection",
"to",
"IP",
"units",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/_datacollectionbase.py#L132-L135 |
4,705 | ladybug-tools/ladybug | ladybug/_datacollectionbase.py | BaseCollection.convert_to_si | def convert_to_si(self):
"""Convert the Data Collection to SI units."""
self._values, self._header._unit = self._header.data_type.to_si(
self._values, self._header.unit) | python | def convert_to_si(self):
"""Convert the Data Collection to SI units."""
self._values, self._header._unit = self._header.data_type.to_si(
self._values, self._header.unit) | [
"def",
"convert_to_si",
"(",
"self",
")",
":",
"self",
".",
"_values",
",",
"self",
".",
"_header",
".",
"_unit",
"=",
"self",
".",
"_header",
".",
"data_type",
".",
"to_si",
"(",
"self",
".",
"_values",
",",
"self",
".",
"_header",
".",
"unit",
")"
] | Convert the Data Collection to SI units. | [
"Convert",
"the",
"Data",
"Collection",
"to",
"SI",
"units",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/_datacollectionbase.py#L137-L140 |
4,706 | ladybug-tools/ladybug | ladybug/_datacollectionbase.py | BaseCollection.to_unit | def to_unit(self, unit):
"""Return a Data Collection in the input unit."""
new_data_c = self.duplicate()
new_data_c.convert_to_unit(unit)
return new_data_c | python | def to_unit(self, unit):
"""Return a Data Collection in the input unit."""
new_data_c = self.duplicate()
new_data_c.convert_to_unit(unit)
return new_data_c | [
"def",
"to_unit",
"(",
"self",
",",
"unit",
")",
":",
"new_data_c",
"=",
"self",
".",
"duplicate",
"(",
")",
"new_data_c",
".",
"convert_to_unit",
"(",
"unit",
")",
"return",
"new_data_c"
] | Return a Data Collection in the input unit. | [
"Return",
"a",
"Data",
"Collection",
"in",
"the",
"input",
"unit",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/_datacollectionbase.py#L142-L146 |
4,707 | ladybug-tools/ladybug | ladybug/_datacollectionbase.py | BaseCollection.is_in_data_type_range | def is_in_data_type_range(self, raise_exception=True):
"""Check if collection values are in physically possible ranges for the data_type.
If this method returns False, the Data Collection's data is
physically or mathematically impossible for the data_type."""
return self._header.data_type.is_in_range(
self._values, self._header.unit, raise_exception) | python | def is_in_data_type_range(self, raise_exception=True):
"""Check if collection values are in physically possible ranges for the data_type.
If this method returns False, the Data Collection's data is
physically or mathematically impossible for the data_type."""
return self._header.data_type.is_in_range(
self._values, self._header.unit, raise_exception) | [
"def",
"is_in_data_type_range",
"(",
"self",
",",
"raise_exception",
"=",
"True",
")",
":",
"return",
"self",
".",
"_header",
".",
"data_type",
".",
"is_in_range",
"(",
"self",
".",
"_values",
",",
"self",
".",
"_header",
".",
"unit",
",",
"raise_exception",
")"
] | Check if collection values are in physically possible ranges for the data_type.
If this method returns False, the Data Collection's data is
physically or mathematically impossible for the data_type. | [
"Check",
"if",
"collection",
"values",
"are",
"in",
"physically",
"possible",
"ranges",
"for",
"the",
"data_type",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/_datacollectionbase.py#L160-L166 |
4,708 | ladybug-tools/ladybug | ladybug/_datacollectionbase.py | BaseCollection.get_highest_values | def get_highest_values(self, count):
"""Get a list of the the x highest values of the Data Collection and their indices.
This is useful for situations where one needs to know the times of
the year when the largest values of a data collection occur. For example,
there is a European dayight code that requires an analysis for the hours
of the year with the greatest exterior illuminance level. This method
can be used to help build a shcedule for such a study.
Args:
count: Integer representing the number of highest values to account for.
Returns:
highest_values: The n highest values in data list, ordered from
highest to lowest.
highest_values_index: Indicies of the n highest values in data
list, ordered from highest to lowest.
"""
count = int(count)
assert count <= len(self._values), \
'count must be smaller than or equal to values length. {} > {}.'.format(
count, len(self._values))
assert count > 0, \
'count must be greater than 0. Got {}.'.format(count)
highest_values = sorted(self._values, reverse=True)[0:count]
highest_values_index = sorted(list(xrange(len(self._values))),
key=lambda k: self._values[k],
reverse=True)[0:count]
return highest_values, highest_values_index | python | def get_highest_values(self, count):
"""Get a list of the the x highest values of the Data Collection and their indices.
This is useful for situations where one needs to know the times of
the year when the largest values of a data collection occur. For example,
there is a European dayight code that requires an analysis for the hours
of the year with the greatest exterior illuminance level. This method
can be used to help build a shcedule for such a study.
Args:
count: Integer representing the number of highest values to account for.
Returns:
highest_values: The n highest values in data list, ordered from
highest to lowest.
highest_values_index: Indicies of the n highest values in data
list, ordered from highest to lowest.
"""
count = int(count)
assert count <= len(self._values), \
'count must be smaller than or equal to values length. {} > {}.'.format(
count, len(self._values))
assert count > 0, \
'count must be greater than 0. Got {}.'.format(count)
highest_values = sorted(self._values, reverse=True)[0:count]
highest_values_index = sorted(list(xrange(len(self._values))),
key=lambda k: self._values[k],
reverse=True)[0:count]
return highest_values, highest_values_index | [
"def",
"get_highest_values",
"(",
"self",
",",
"count",
")",
":",
"count",
"=",
"int",
"(",
"count",
")",
"assert",
"count",
"<=",
"len",
"(",
"self",
".",
"_values",
")",
",",
"'count must be smaller than or equal to values length. {} > {}.'",
".",
"format",
"(",
"count",
",",
"len",
"(",
"self",
".",
"_values",
")",
")",
"assert",
"count",
">",
"0",
",",
"'count must be greater than 0. Got {}.'",
".",
"format",
"(",
"count",
")",
"highest_values",
"=",
"sorted",
"(",
"self",
".",
"_values",
",",
"reverse",
"=",
"True",
")",
"[",
"0",
":",
"count",
"]",
"highest_values_index",
"=",
"sorted",
"(",
"list",
"(",
"xrange",
"(",
"len",
"(",
"self",
".",
"_values",
")",
")",
")",
",",
"key",
"=",
"lambda",
"k",
":",
"self",
".",
"_values",
"[",
"k",
"]",
",",
"reverse",
"=",
"True",
")",
"[",
"0",
":",
"count",
"]",
"return",
"highest_values",
",",
"highest_values_index"
] | Get a list of the the x highest values of the Data Collection and their indices.
This is useful for situations where one needs to know the times of
the year when the largest values of a data collection occur. For example,
there is a European dayight code that requires an analysis for the hours
of the year with the greatest exterior illuminance level. This method
can be used to help build a shcedule for such a study.
Args:
count: Integer representing the number of highest values to account for.
Returns:
highest_values: The n highest values in data list, ordered from
highest to lowest.
highest_values_index: Indicies of the n highest values in data
list, ordered from highest to lowest. | [
"Get",
"a",
"list",
"of",
"the",
"the",
"x",
"highest",
"values",
"of",
"the",
"Data",
"Collection",
"and",
"their",
"indices",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/_datacollectionbase.py#L179-L207 |
4,709 | ladybug-tools/ladybug | ladybug/_datacollectionbase.py | BaseCollection.get_lowest_values | def get_lowest_values(self, count):
"""Get a list of the the x lowest values of the Data Collection and their indices.
This is useful for situations where one needs to know the times of
the year when the smallest values of a data collection occur.
Args:
count: Integer representing the number of lowest values to account for.
Returns:
highest_values: The n lowest values in data list, ordered from
lowest to lowest.
lowest_values_index: Indicies of the n lowest values in data
list, ordered from lowest to lowest.
"""
count = int(count)
assert count <= len(self._values), \
'count must be <= to Data Collection len. {} > {}.'.format(
count, len(self._values))
assert count > 0, \
'count must be greater than 0. Got {}.'.format(count)
lowest_values = sorted(self._values)[0:count]
lowest_values_index = sorted(list(xrange(len(self._values))),
key=lambda k: self._values[k])[0:count]
return lowest_values, lowest_values_index | python | def get_lowest_values(self, count):
"""Get a list of the the x lowest values of the Data Collection and their indices.
This is useful for situations where one needs to know the times of
the year when the smallest values of a data collection occur.
Args:
count: Integer representing the number of lowest values to account for.
Returns:
highest_values: The n lowest values in data list, ordered from
lowest to lowest.
lowest_values_index: Indicies of the n lowest values in data
list, ordered from lowest to lowest.
"""
count = int(count)
assert count <= len(self._values), \
'count must be <= to Data Collection len. {} > {}.'.format(
count, len(self._values))
assert count > 0, \
'count must be greater than 0. Got {}.'.format(count)
lowest_values = sorted(self._values)[0:count]
lowest_values_index = sorted(list(xrange(len(self._values))),
key=lambda k: self._values[k])[0:count]
return lowest_values, lowest_values_index | [
"def",
"get_lowest_values",
"(",
"self",
",",
"count",
")",
":",
"count",
"=",
"int",
"(",
"count",
")",
"assert",
"count",
"<=",
"len",
"(",
"self",
".",
"_values",
")",
",",
"'count must be <= to Data Collection len. {} > {}.'",
".",
"format",
"(",
"count",
",",
"len",
"(",
"self",
".",
"_values",
")",
")",
"assert",
"count",
">",
"0",
",",
"'count must be greater than 0. Got {}.'",
".",
"format",
"(",
"count",
")",
"lowest_values",
"=",
"sorted",
"(",
"self",
".",
"_values",
")",
"[",
"0",
":",
"count",
"]",
"lowest_values_index",
"=",
"sorted",
"(",
"list",
"(",
"xrange",
"(",
"len",
"(",
"self",
".",
"_values",
")",
")",
")",
",",
"key",
"=",
"lambda",
"k",
":",
"self",
".",
"_values",
"[",
"k",
"]",
")",
"[",
"0",
":",
"count",
"]",
"return",
"lowest_values",
",",
"lowest_values_index"
] | Get a list of the the x lowest values of the Data Collection and their indices.
This is useful for situations where one needs to know the times of
the year when the smallest values of a data collection occur.
Args:
count: Integer representing the number of lowest values to account for.
Returns:
highest_values: The n lowest values in data list, ordered from
lowest to lowest.
lowest_values_index: Indicies of the n lowest values in data
list, ordered from lowest to lowest. | [
"Get",
"a",
"list",
"of",
"the",
"the",
"x",
"lowest",
"values",
"of",
"the",
"Data",
"Collection",
"and",
"their",
"indices",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/_datacollectionbase.py#L209-L233 |
4,710 | ladybug-tools/ladybug | ladybug/_datacollectionbase.py | BaseCollection.get_percentile | def get_percentile(self, percentile):
"""Get a value representing a the input percentile of the Data Collection.
Args:
percentile: A float value from 0 to 100 representing the
requested percentile.
Return:
The Data Collection value at the input percentile
"""
assert 0 <= percentile <= 100, \
'percentile must be between 0 and 100. Got {}'.format(percentile)
return self._percentile(self._values, percentile) | python | def get_percentile(self, percentile):
"""Get a value representing a the input percentile of the Data Collection.
Args:
percentile: A float value from 0 to 100 representing the
requested percentile.
Return:
The Data Collection value at the input percentile
"""
assert 0 <= percentile <= 100, \
'percentile must be between 0 and 100. Got {}'.format(percentile)
return self._percentile(self._values, percentile) | [
"def",
"get_percentile",
"(",
"self",
",",
"percentile",
")",
":",
"assert",
"0",
"<=",
"percentile",
"<=",
"100",
",",
"'percentile must be between 0 and 100. Got {}'",
".",
"format",
"(",
"percentile",
")",
"return",
"self",
".",
"_percentile",
"(",
"self",
".",
"_values",
",",
"percentile",
")"
] | Get a value representing a the input percentile of the Data Collection.
Args:
percentile: A float value from 0 to 100 representing the
requested percentile.
Return:
The Data Collection value at the input percentile | [
"Get",
"a",
"value",
"representing",
"a",
"the",
"input",
"percentile",
"of",
"the",
"Data",
"Collection",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/_datacollectionbase.py#L235-L247 |
4,711 | ladybug-tools/ladybug | ladybug/_datacollectionbase.py | BaseCollection.get_aligned_collection | def get_aligned_collection(self, value=0, data_type=None, unit=None, mutable=None):
"""Return a Collection aligned with this one composed of one repeated value.
Aligned Data Collections are of the same Data Collection class, have the same
number of values and have matching datetimes.
Args:
value: A value to be repeated in the aliged collection values or
A list of values that has the same length as this collection.
Default: 0.
data_type: The data type of the aligned collection. Default is to
use the data type of this collection.
unit: The unit of the aligned collection. Default is to
use the unit of this collection or the base unit of the
input data_type (if it exists).
mutable: An optional Boolean to set whether the returned aligned
collection is mutable (True) or immutable (False). The default is
None, which will simply set the aligned collection to have the
same mutability as the starting collection.
"""
# set up the header of the new collection
header = self._check_aligned_header(data_type, unit)
# set up the values of the new collection
values = self._check_aligned_value(value)
# get the correct base class for the aligned collection (mutable or immutable)
if mutable is None:
collection = self.__class__(header, values, self.datetimes)
else:
if self._enumeration is None:
self._get_mutable_enumeration()
if mutable is False:
col_obj = self._enumeration['immutable'][self._collection_type]
else:
col_obj = self._enumeration['mutable'][self._collection_type]
collection = col_obj(header, values, self.datetimes)
collection._validated_a_period = self._validated_a_period
return collection | python | def get_aligned_collection(self, value=0, data_type=None, unit=None, mutable=None):
"""Return a Collection aligned with this one composed of one repeated value.
Aligned Data Collections are of the same Data Collection class, have the same
number of values and have matching datetimes.
Args:
value: A value to be repeated in the aliged collection values or
A list of values that has the same length as this collection.
Default: 0.
data_type: The data type of the aligned collection. Default is to
use the data type of this collection.
unit: The unit of the aligned collection. Default is to
use the unit of this collection or the base unit of the
input data_type (if it exists).
mutable: An optional Boolean to set whether the returned aligned
collection is mutable (True) or immutable (False). The default is
None, which will simply set the aligned collection to have the
same mutability as the starting collection.
"""
# set up the header of the new collection
header = self._check_aligned_header(data_type, unit)
# set up the values of the new collection
values = self._check_aligned_value(value)
# get the correct base class for the aligned collection (mutable or immutable)
if mutable is None:
collection = self.__class__(header, values, self.datetimes)
else:
if self._enumeration is None:
self._get_mutable_enumeration()
if mutable is False:
col_obj = self._enumeration['immutable'][self._collection_type]
else:
col_obj = self._enumeration['mutable'][self._collection_type]
collection = col_obj(header, values, self.datetimes)
collection._validated_a_period = self._validated_a_period
return collection | [
"def",
"get_aligned_collection",
"(",
"self",
",",
"value",
"=",
"0",
",",
"data_type",
"=",
"None",
",",
"unit",
"=",
"None",
",",
"mutable",
"=",
"None",
")",
":",
"# set up the header of the new collection",
"header",
"=",
"self",
".",
"_check_aligned_header",
"(",
"data_type",
",",
"unit",
")",
"# set up the values of the new collection",
"values",
"=",
"self",
".",
"_check_aligned_value",
"(",
"value",
")",
"# get the correct base class for the aligned collection (mutable or immutable)",
"if",
"mutable",
"is",
"None",
":",
"collection",
"=",
"self",
".",
"__class__",
"(",
"header",
",",
"values",
",",
"self",
".",
"datetimes",
")",
"else",
":",
"if",
"self",
".",
"_enumeration",
"is",
"None",
":",
"self",
".",
"_get_mutable_enumeration",
"(",
")",
"if",
"mutable",
"is",
"False",
":",
"col_obj",
"=",
"self",
".",
"_enumeration",
"[",
"'immutable'",
"]",
"[",
"self",
".",
"_collection_type",
"]",
"else",
":",
"col_obj",
"=",
"self",
".",
"_enumeration",
"[",
"'mutable'",
"]",
"[",
"self",
".",
"_collection_type",
"]",
"collection",
"=",
"col_obj",
"(",
"header",
",",
"values",
",",
"self",
".",
"datetimes",
")",
"collection",
".",
"_validated_a_period",
"=",
"self",
".",
"_validated_a_period",
"return",
"collection"
] | Return a Collection aligned with this one composed of one repeated value.
Aligned Data Collections are of the same Data Collection class, have the same
number of values and have matching datetimes.
Args:
value: A value to be repeated in the aliged collection values or
A list of values that has the same length as this collection.
Default: 0.
data_type: The data type of the aligned collection. Default is to
use the data type of this collection.
unit: The unit of the aligned collection. Default is to
use the unit of this collection or the base unit of the
input data_type (if it exists).
mutable: An optional Boolean to set whether the returned aligned
collection is mutable (True) or immutable (False). The default is
None, which will simply set the aligned collection to have the
same mutability as the starting collection. | [
"Return",
"a",
"Collection",
"aligned",
"with",
"this",
"one",
"composed",
"of",
"one",
"repeated",
"value",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/_datacollectionbase.py#L308-L346 |
4,712 | ladybug-tools/ladybug | ladybug/_datacollectionbase.py | BaseCollection.duplicate | def duplicate(self):
"""Return a copy of the current Data Collection."""
collection = self.__class__(self.header.duplicate(), self.values, self.datetimes)
collection._validated_a_period = self._validated_a_period
return collection | python | def duplicate(self):
"""Return a copy of the current Data Collection."""
collection = self.__class__(self.header.duplicate(), self.values, self.datetimes)
collection._validated_a_period = self._validated_a_period
return collection | [
"def",
"duplicate",
"(",
"self",
")",
":",
"collection",
"=",
"self",
".",
"__class__",
"(",
"self",
".",
"header",
".",
"duplicate",
"(",
")",
",",
"self",
".",
"values",
",",
"self",
".",
"datetimes",
")",
"collection",
".",
"_validated_a_period",
"=",
"self",
".",
"_validated_a_period",
"return",
"collection"
] | Return a copy of the current Data Collection. | [
"Return",
"a",
"copy",
"of",
"the",
"current",
"Data",
"Collection",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/_datacollectionbase.py#L348-L352 |
4,713 | ladybug-tools/ladybug | ladybug/_datacollectionbase.py | BaseCollection.to_json | def to_json(self):
"""Convert Data Collection to a dictionary."""
return {
'header': self.header.to_json(),
'values': self._values,
'datetimes': self.datetimes,
'validated_a_period': self._validated_a_period
} | python | def to_json(self):
"""Convert Data Collection to a dictionary."""
return {
'header': self.header.to_json(),
'values': self._values,
'datetimes': self.datetimes,
'validated_a_period': self._validated_a_period
} | [
"def",
"to_json",
"(",
"self",
")",
":",
"return",
"{",
"'header'",
":",
"self",
".",
"header",
".",
"to_json",
"(",
")",
",",
"'values'",
":",
"self",
".",
"_values",
",",
"'datetimes'",
":",
"self",
".",
"datetimes",
",",
"'validated_a_period'",
":",
"self",
".",
"_validated_a_period",
"}"
] | Convert Data Collection to a dictionary. | [
"Convert",
"Data",
"Collection",
"to",
"a",
"dictionary",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/_datacollectionbase.py#L354-L361 |
4,714 | ladybug-tools/ladybug | ladybug/_datacollectionbase.py | BaseCollection.filter_collections_by_statement | def filter_collections_by_statement(data_collections, statement):
"""Generate a filtered data collections according to a conditional statement.
Args:
data_collections: A list of aligned Data Collections to be evaluated
against the statement.
statement: A conditional statement as a string (e.g. a>25 and a%5==0).
The variable should always be named as 'a' (without quotations).
Return:
collections: A list of Data Collections that have been filtered based
on the statement.
"""
pattern = BaseCollection.pattern_from_collections_and_statement(
data_collections, statement)
collections = [coll.filter_by_pattern(pattern) for coll in data_collections]
return collections | python | def filter_collections_by_statement(data_collections, statement):
"""Generate a filtered data collections according to a conditional statement.
Args:
data_collections: A list of aligned Data Collections to be evaluated
against the statement.
statement: A conditional statement as a string (e.g. a>25 and a%5==0).
The variable should always be named as 'a' (without quotations).
Return:
collections: A list of Data Collections that have been filtered based
on the statement.
"""
pattern = BaseCollection.pattern_from_collections_and_statement(
data_collections, statement)
collections = [coll.filter_by_pattern(pattern) for coll in data_collections]
return collections | [
"def",
"filter_collections_by_statement",
"(",
"data_collections",
",",
"statement",
")",
":",
"pattern",
"=",
"BaseCollection",
".",
"pattern_from_collections_and_statement",
"(",
"data_collections",
",",
"statement",
")",
"collections",
"=",
"[",
"coll",
".",
"filter_by_pattern",
"(",
"pattern",
")",
"for",
"coll",
"in",
"data_collections",
"]",
"return",
"collections"
] | Generate a filtered data collections according to a conditional statement.
Args:
data_collections: A list of aligned Data Collections to be evaluated
against the statement.
statement: A conditional statement as a string (e.g. a>25 and a%5==0).
The variable should always be named as 'a' (without quotations).
Return:
collections: A list of Data Collections that have been filtered based
on the statement. | [
"Generate",
"a",
"filtered",
"data",
"collections",
"according",
"to",
"a",
"conditional",
"statement",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/_datacollectionbase.py#L364-L380 |
4,715 | ladybug-tools/ladybug | ladybug/_datacollectionbase.py | BaseCollection.pattern_from_collections_and_statement | def pattern_from_collections_and_statement(data_collections, statement):
"""Generate a list of booleans from data collections and a conditional statement.
Args:
data_collections: A list of aligned Data Collections to be evaluated
against the statement.
statement: A conditional statement as a string (e.g. a>25 and a%5==0).
The variable should always be named as 'a' (without quotations).
Return:
pattern: A list of True/False booleans with the length of the
Data Collections where True meets the conditional statement
and False does not.
"""
BaseCollection.are_collections_aligned(data_collections)
correct_var = BaseCollection._check_conditional_statement(
statement, len(data_collections))
# replace the operators of the statement with non-alphanumeric characters
# necessary to avoid replacing the characters of the operators
num_statement_clean = BaseCollection._replace_operators(statement)
pattern = []
for i in xrange(len(data_collections[0])):
num_statement = num_statement_clean
# replace the variable names with their numerical values
for j, coll in enumerate(data_collections):
var = correct_var[j]
num_statement = num_statement.replace(var, str(coll[i]))
# put back the operators
num_statement = BaseCollection._restore_operators(num_statement)
pattern.append(eval(num_statement, {}))
return pattern | python | def pattern_from_collections_and_statement(data_collections, statement):
"""Generate a list of booleans from data collections and a conditional statement.
Args:
data_collections: A list of aligned Data Collections to be evaluated
against the statement.
statement: A conditional statement as a string (e.g. a>25 and a%5==0).
The variable should always be named as 'a' (without quotations).
Return:
pattern: A list of True/False booleans with the length of the
Data Collections where True meets the conditional statement
and False does not.
"""
BaseCollection.are_collections_aligned(data_collections)
correct_var = BaseCollection._check_conditional_statement(
statement, len(data_collections))
# replace the operators of the statement with non-alphanumeric characters
# necessary to avoid replacing the characters of the operators
num_statement_clean = BaseCollection._replace_operators(statement)
pattern = []
for i in xrange(len(data_collections[0])):
num_statement = num_statement_clean
# replace the variable names with their numerical values
for j, coll in enumerate(data_collections):
var = correct_var[j]
num_statement = num_statement.replace(var, str(coll[i]))
# put back the operators
num_statement = BaseCollection._restore_operators(num_statement)
pattern.append(eval(num_statement, {}))
return pattern | [
"def",
"pattern_from_collections_and_statement",
"(",
"data_collections",
",",
"statement",
")",
":",
"BaseCollection",
".",
"are_collections_aligned",
"(",
"data_collections",
")",
"correct_var",
"=",
"BaseCollection",
".",
"_check_conditional_statement",
"(",
"statement",
",",
"len",
"(",
"data_collections",
")",
")",
"# replace the operators of the statement with non-alphanumeric characters",
"# necessary to avoid replacing the characters of the operators",
"num_statement_clean",
"=",
"BaseCollection",
".",
"_replace_operators",
"(",
"statement",
")",
"pattern",
"=",
"[",
"]",
"for",
"i",
"in",
"xrange",
"(",
"len",
"(",
"data_collections",
"[",
"0",
"]",
")",
")",
":",
"num_statement",
"=",
"num_statement_clean",
"# replace the variable names with their numerical values",
"for",
"j",
",",
"coll",
"in",
"enumerate",
"(",
"data_collections",
")",
":",
"var",
"=",
"correct_var",
"[",
"j",
"]",
"num_statement",
"=",
"num_statement",
".",
"replace",
"(",
"var",
",",
"str",
"(",
"coll",
"[",
"i",
"]",
")",
")",
"# put back the operators",
"num_statement",
"=",
"BaseCollection",
".",
"_restore_operators",
"(",
"num_statement",
")",
"pattern",
".",
"append",
"(",
"eval",
"(",
"num_statement",
",",
"{",
"}",
")",
")",
"return",
"pattern"
] | Generate a list of booleans from data collections and a conditional statement.
Args:
data_collections: A list of aligned Data Collections to be evaluated
against the statement.
statement: A conditional statement as a string (e.g. a>25 and a%5==0).
The variable should always be named as 'a' (without quotations).
Return:
pattern: A list of True/False booleans with the length of the
Data Collections where True meets the conditional statement
and False does not. | [
"Generate",
"a",
"list",
"of",
"booleans",
"from",
"data",
"collections",
"and",
"a",
"conditional",
"statement",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/_datacollectionbase.py#L383-L415 |
4,716 | ladybug-tools/ladybug | ladybug/_datacollectionbase.py | BaseCollection.are_collections_aligned | def are_collections_aligned(data_collections, raise_exception=True):
"""Test if a series of Data Collections are aligned with one another.
Aligned Data Collections are of the same Data Collection class, have the
same number of values and have matching datetimes.
Args:
data_collections: A list of Data Collections for which you want to
test if they are al aligned with one another.
Return:
True if collections are aligned, False if not aligned
"""
if len(data_collections) > 1:
first_coll = data_collections[0]
for coll in data_collections[1:]:
if not first_coll.is_collection_aligned(coll):
if raise_exception is True:
error_msg = '{} Data Collection is not aligned with '\
'{} Data Collection.'.format(
first_coll.header.data_type, coll.header.data_type)
raise ValueError(error_msg)
return False
return True | python | def are_collections_aligned(data_collections, raise_exception=True):
"""Test if a series of Data Collections are aligned with one another.
Aligned Data Collections are of the same Data Collection class, have the
same number of values and have matching datetimes.
Args:
data_collections: A list of Data Collections for which you want to
test if they are al aligned with one another.
Return:
True if collections are aligned, False if not aligned
"""
if len(data_collections) > 1:
first_coll = data_collections[0]
for coll in data_collections[1:]:
if not first_coll.is_collection_aligned(coll):
if raise_exception is True:
error_msg = '{} Data Collection is not aligned with '\
'{} Data Collection.'.format(
first_coll.header.data_type, coll.header.data_type)
raise ValueError(error_msg)
return False
return True | [
"def",
"are_collections_aligned",
"(",
"data_collections",
",",
"raise_exception",
"=",
"True",
")",
":",
"if",
"len",
"(",
"data_collections",
")",
">",
"1",
":",
"first_coll",
"=",
"data_collections",
"[",
"0",
"]",
"for",
"coll",
"in",
"data_collections",
"[",
"1",
":",
"]",
":",
"if",
"not",
"first_coll",
".",
"is_collection_aligned",
"(",
"coll",
")",
":",
"if",
"raise_exception",
"is",
"True",
":",
"error_msg",
"=",
"'{} Data Collection is not aligned with '",
"'{} Data Collection.'",
".",
"format",
"(",
"first_coll",
".",
"header",
".",
"data_type",
",",
"coll",
".",
"header",
".",
"data_type",
")",
"raise",
"ValueError",
"(",
"error_msg",
")",
"return",
"False",
"return",
"True"
] | Test if a series of Data Collections are aligned with one another.
Aligned Data Collections are of the same Data Collection class, have the
same number of values and have matching datetimes.
Args:
data_collections: A list of Data Collections for which you want to
test if they are al aligned with one another.
Return:
True if collections are aligned, False if not aligned | [
"Test",
"if",
"a",
"series",
"of",
"Data",
"Collections",
"are",
"aligned",
"with",
"one",
"another",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/_datacollectionbase.py#L418-L441 |
4,717 | ladybug-tools/ladybug | ladybug/_datacollectionbase.py | BaseCollection.compute_function_aligned | def compute_function_aligned(funct, data_collections, data_type, unit):
"""Compute a function with a list of aligned data collections or individual values.
Args:
funct: A function with a single numerical value as output and one or
more numerical values as input.
data_collections: A list with a length equal to the number of arguments
for the function. Items of the list can be either Data Collections
or individual values to be used at each datetime of other collections.
data_type: An instance of a Ladybug data type that describes the results
of the funct.
unit: The units of the funct results.
Return:
A Data Collection with the results function. If all items in this list of
data_collections are individual values, only a single value will be returned.
Usage:
from ladybug.datacollection import HourlyContinuousCollection
from ladybug.epw import EPW
from ladybug.psychrometrics import humid_ratio_from_db_rh
from ladybug.datatype.percentage import HumidityRatio
epw_file_path = './epws/denver.epw'
denver_epw = EPW(epw_file_path)
pressure_at_denver = 85000
hr_inputs = [denver_epw.dry_bulb_temperature,
denver_epw.relative_humidity,
pressure_at_denver]
humid_ratio = HourlyContinuousCollection.compute_function_aligned(
humid_ratio_from_db_rh, hr_inputs, HumidityRatio(), 'fraction')
# humid_ratio will be a Data Colleciton of humidity ratios at Denver
"""
# check that all inputs are either data collections or floats
data_colls = []
for i, func_input in enumerate(data_collections):
if isinstance(func_input, BaseCollection):
data_colls.append(func_input)
else:
try:
data_collections[i] = float(func_input)
except ValueError:
raise TypeError('Expected a number or a Data Colleciton. '
'Got {}'.format(type(func_input)))
# run the function and return the result
if len(data_colls) == 0:
return funct(*data_collections)
else:
BaseCollection.are_collections_aligned(data_colls)
val_len = len(data_colls[0].values)
for i, col in enumerate(data_collections):
data_collections[i] = [col] * val_len if isinstance(col, float) else col
result = data_colls[0].get_aligned_collection(data_type=data_type, unit=unit)
for i in xrange(val_len):
result[i] = funct(*[col[i] for col in data_collections])
return result | python | def compute_function_aligned(funct, data_collections, data_type, unit):
"""Compute a function with a list of aligned data collections or individual values.
Args:
funct: A function with a single numerical value as output and one or
more numerical values as input.
data_collections: A list with a length equal to the number of arguments
for the function. Items of the list can be either Data Collections
or individual values to be used at each datetime of other collections.
data_type: An instance of a Ladybug data type that describes the results
of the funct.
unit: The units of the funct results.
Return:
A Data Collection with the results function. If all items in this list of
data_collections are individual values, only a single value will be returned.
Usage:
from ladybug.datacollection import HourlyContinuousCollection
from ladybug.epw import EPW
from ladybug.psychrometrics import humid_ratio_from_db_rh
from ladybug.datatype.percentage import HumidityRatio
epw_file_path = './epws/denver.epw'
denver_epw = EPW(epw_file_path)
pressure_at_denver = 85000
hr_inputs = [denver_epw.dry_bulb_temperature,
denver_epw.relative_humidity,
pressure_at_denver]
humid_ratio = HourlyContinuousCollection.compute_function_aligned(
humid_ratio_from_db_rh, hr_inputs, HumidityRatio(), 'fraction')
# humid_ratio will be a Data Colleciton of humidity ratios at Denver
"""
# check that all inputs are either data collections or floats
data_colls = []
for i, func_input in enumerate(data_collections):
if isinstance(func_input, BaseCollection):
data_colls.append(func_input)
else:
try:
data_collections[i] = float(func_input)
except ValueError:
raise TypeError('Expected a number or a Data Colleciton. '
'Got {}'.format(type(func_input)))
# run the function and return the result
if len(data_colls) == 0:
return funct(*data_collections)
else:
BaseCollection.are_collections_aligned(data_colls)
val_len = len(data_colls[0].values)
for i, col in enumerate(data_collections):
data_collections[i] = [col] * val_len if isinstance(col, float) else col
result = data_colls[0].get_aligned_collection(data_type=data_type, unit=unit)
for i in xrange(val_len):
result[i] = funct(*[col[i] for col in data_collections])
return result | [
"def",
"compute_function_aligned",
"(",
"funct",
",",
"data_collections",
",",
"data_type",
",",
"unit",
")",
":",
"# check that all inputs are either data collections or floats",
"data_colls",
"=",
"[",
"]",
"for",
"i",
",",
"func_input",
"in",
"enumerate",
"(",
"data_collections",
")",
":",
"if",
"isinstance",
"(",
"func_input",
",",
"BaseCollection",
")",
":",
"data_colls",
".",
"append",
"(",
"func_input",
")",
"else",
":",
"try",
":",
"data_collections",
"[",
"i",
"]",
"=",
"float",
"(",
"func_input",
")",
"except",
"ValueError",
":",
"raise",
"TypeError",
"(",
"'Expected a number or a Data Colleciton. '",
"'Got {}'",
".",
"format",
"(",
"type",
"(",
"func_input",
")",
")",
")",
"# run the function and return the result",
"if",
"len",
"(",
"data_colls",
")",
"==",
"0",
":",
"return",
"funct",
"(",
"*",
"data_collections",
")",
"else",
":",
"BaseCollection",
".",
"are_collections_aligned",
"(",
"data_colls",
")",
"val_len",
"=",
"len",
"(",
"data_colls",
"[",
"0",
"]",
".",
"values",
")",
"for",
"i",
",",
"col",
"in",
"enumerate",
"(",
"data_collections",
")",
":",
"data_collections",
"[",
"i",
"]",
"=",
"[",
"col",
"]",
"*",
"val_len",
"if",
"isinstance",
"(",
"col",
",",
"float",
")",
"else",
"col",
"result",
"=",
"data_colls",
"[",
"0",
"]",
".",
"get_aligned_collection",
"(",
"data_type",
"=",
"data_type",
",",
"unit",
"=",
"unit",
")",
"for",
"i",
"in",
"xrange",
"(",
"val_len",
")",
":",
"result",
"[",
"i",
"]",
"=",
"funct",
"(",
"*",
"[",
"col",
"[",
"i",
"]",
"for",
"col",
"in",
"data_collections",
"]",
")",
"return",
"result"
] | Compute a function with a list of aligned data collections or individual values.
Args:
funct: A function with a single numerical value as output and one or
more numerical values as input.
data_collections: A list with a length equal to the number of arguments
for the function. Items of the list can be either Data Collections
or individual values to be used at each datetime of other collections.
data_type: An instance of a Ladybug data type that describes the results
of the funct.
unit: The units of the funct results.
Return:
A Data Collection with the results function. If all items in this list of
data_collections are individual values, only a single value will be returned.
Usage:
from ladybug.datacollection import HourlyContinuousCollection
from ladybug.epw import EPW
from ladybug.psychrometrics import humid_ratio_from_db_rh
from ladybug.datatype.percentage import HumidityRatio
epw_file_path = './epws/denver.epw'
denver_epw = EPW(epw_file_path)
pressure_at_denver = 85000
hr_inputs = [denver_epw.dry_bulb_temperature,
denver_epw.relative_humidity,
pressure_at_denver]
humid_ratio = HourlyContinuousCollection.compute_function_aligned(
humid_ratio_from_db_rh, hr_inputs, HumidityRatio(), 'fraction')
# humid_ratio will be a Data Colleciton of humidity ratios at Denver | [
"Compute",
"a",
"function",
"with",
"a",
"list",
"of",
"aligned",
"data",
"collections",
"or",
"individual",
"values",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/_datacollectionbase.py#L444-L500 |
4,718 | ladybug-tools/ladybug | ladybug/_datacollectionbase.py | BaseCollection._check_conditional_statement | def _check_conditional_statement(statement, num_collections):
"""Method to check conditional statements to be sure that they are valid.
Args:
statement: A conditional statement as a string (e.g. a>25 and a%5==0).
The variable should always be named as 'a' (without quotations).
num_collections: An integer representing the number of data collections
that the statement will be evaluating.
Return:
correct_var: A list of the correct variable names that should be
used within the statement (eg. ['a', 'b', 'c'])
"""
# Determine what the list of variables should be based on the num_collections
correct_var = list(ascii_lowercase)[:num_collections]
# Clean out the operators of the statement
st_statement = BaseCollection._remove_operators(statement)
parsed_st = [s for s in st_statement if s.isalpha()]
# Perform the check
for var in parsed_st:
if var not in correct_var:
raise ValueError(
'Invalid conditional statement: {}\n '
'Statement should be a valid Python statement'
' and the variables should be named as follows: {}'.format(
statement, ', '.join(correct_var))
)
return correct_var | python | def _check_conditional_statement(statement, num_collections):
"""Method to check conditional statements to be sure that they are valid.
Args:
statement: A conditional statement as a string (e.g. a>25 and a%5==0).
The variable should always be named as 'a' (without quotations).
num_collections: An integer representing the number of data collections
that the statement will be evaluating.
Return:
correct_var: A list of the correct variable names that should be
used within the statement (eg. ['a', 'b', 'c'])
"""
# Determine what the list of variables should be based on the num_collections
correct_var = list(ascii_lowercase)[:num_collections]
# Clean out the operators of the statement
st_statement = BaseCollection._remove_operators(statement)
parsed_st = [s for s in st_statement if s.isalpha()]
# Perform the check
for var in parsed_st:
if var not in correct_var:
raise ValueError(
'Invalid conditional statement: {}\n '
'Statement should be a valid Python statement'
' and the variables should be named as follows: {}'.format(
statement, ', '.join(correct_var))
)
return correct_var | [
"def",
"_check_conditional_statement",
"(",
"statement",
",",
"num_collections",
")",
":",
"# Determine what the list of variables should be based on the num_collections",
"correct_var",
"=",
"list",
"(",
"ascii_lowercase",
")",
"[",
":",
"num_collections",
"]",
"# Clean out the operators of the statement",
"st_statement",
"=",
"BaseCollection",
".",
"_remove_operators",
"(",
"statement",
")",
"parsed_st",
"=",
"[",
"s",
"for",
"s",
"in",
"st_statement",
"if",
"s",
".",
"isalpha",
"(",
")",
"]",
"# Perform the check",
"for",
"var",
"in",
"parsed_st",
":",
"if",
"var",
"not",
"in",
"correct_var",
":",
"raise",
"ValueError",
"(",
"'Invalid conditional statement: {}\\n '",
"'Statement should be a valid Python statement'",
"' and the variables should be named as follows: {}'",
".",
"format",
"(",
"statement",
",",
"', '",
".",
"join",
"(",
"correct_var",
")",
")",
")",
"return",
"correct_var"
] | Method to check conditional statements to be sure that they are valid.
Args:
statement: A conditional statement as a string (e.g. a>25 and a%5==0).
The variable should always be named as 'a' (without quotations).
num_collections: An integer representing the number of data collections
that the statement will be evaluating.
Return:
correct_var: A list of the correct variable names that should be
used within the statement (eg. ['a', 'b', 'c']) | [
"Method",
"to",
"check",
"conditional",
"statements",
"to",
"be",
"sure",
"that",
"they",
"are",
"valid",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/_datacollectionbase.py#L503-L532 |
4,719 | ladybug-tools/ladybug | ladybug/_datacollectionbase.py | BaseCollection._filter_by_statement | def _filter_by_statement(self, statement):
"""Filter the data collection based on a conditional statement."""
self.__class__._check_conditional_statement(statement, 1)
_filt_values, _filt_datetimes = [], []
for i, a in enumerate(self._values):
if eval(statement, {'a': a}):
_filt_values.append(a)
_filt_datetimes.append(self.datetimes[i])
return _filt_values, _filt_datetimes | python | def _filter_by_statement(self, statement):
"""Filter the data collection based on a conditional statement."""
self.__class__._check_conditional_statement(statement, 1)
_filt_values, _filt_datetimes = [], []
for i, a in enumerate(self._values):
if eval(statement, {'a': a}):
_filt_values.append(a)
_filt_datetimes.append(self.datetimes[i])
return _filt_values, _filt_datetimes | [
"def",
"_filter_by_statement",
"(",
"self",
",",
"statement",
")",
":",
"self",
".",
"__class__",
".",
"_check_conditional_statement",
"(",
"statement",
",",
"1",
")",
"_filt_values",
",",
"_filt_datetimes",
"=",
"[",
"]",
",",
"[",
"]",
"for",
"i",
",",
"a",
"in",
"enumerate",
"(",
"self",
".",
"_values",
")",
":",
"if",
"eval",
"(",
"statement",
",",
"{",
"'a'",
":",
"a",
"}",
")",
":",
"_filt_values",
".",
"append",
"(",
"a",
")",
"_filt_datetimes",
".",
"append",
"(",
"self",
".",
"datetimes",
"[",
"i",
"]",
")",
"return",
"_filt_values",
",",
"_filt_datetimes"
] | Filter the data collection based on a conditional statement. | [
"Filter",
"the",
"data",
"collection",
"based",
"on",
"a",
"conditional",
"statement",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/_datacollectionbase.py#L552-L560 |
4,720 | ladybug-tools/ladybug | ladybug/_datacollectionbase.py | BaseCollection._filter_by_pattern | def _filter_by_pattern(self, pattern):
"""Filter the Filter the Data Collection based on a list of booleans."""
try:
_len = len(pattern)
except TypeError:
raise TypeError("pattern is not a list of Booleans. Got {}".format(
type(pattern)))
_filt_values = [d for i, d in enumerate(self._values) if pattern[i % _len]]
_filt_datetimes = [d for i, d in enumerate(self.datetimes) if pattern[i % _len]]
return _filt_values, _filt_datetimes | python | def _filter_by_pattern(self, pattern):
"""Filter the Filter the Data Collection based on a list of booleans."""
try:
_len = len(pattern)
except TypeError:
raise TypeError("pattern is not a list of Booleans. Got {}".format(
type(pattern)))
_filt_values = [d for i, d in enumerate(self._values) if pattern[i % _len]]
_filt_datetimes = [d for i, d in enumerate(self.datetimes) if pattern[i % _len]]
return _filt_values, _filt_datetimes | [
"def",
"_filter_by_pattern",
"(",
"self",
",",
"pattern",
")",
":",
"try",
":",
"_len",
"=",
"len",
"(",
"pattern",
")",
"except",
"TypeError",
":",
"raise",
"TypeError",
"(",
"\"pattern is not a list of Booleans. Got {}\"",
".",
"format",
"(",
"type",
"(",
"pattern",
")",
")",
")",
"_filt_values",
"=",
"[",
"d",
"for",
"i",
",",
"d",
"in",
"enumerate",
"(",
"self",
".",
"_values",
")",
"if",
"pattern",
"[",
"i",
"%",
"_len",
"]",
"]",
"_filt_datetimes",
"=",
"[",
"d",
"for",
"i",
",",
"d",
"in",
"enumerate",
"(",
"self",
".",
"datetimes",
")",
"if",
"pattern",
"[",
"i",
"%",
"_len",
"]",
"]",
"return",
"_filt_values",
",",
"_filt_datetimes"
] | Filter the Filter the Data Collection based on a list of booleans. | [
"Filter",
"the",
"Filter",
"the",
"Data",
"Collection",
"based",
"on",
"a",
"list",
"of",
"booleans",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/_datacollectionbase.py#L562-L571 |
4,721 | ladybug-tools/ladybug | ladybug/_datacollectionbase.py | BaseCollection._check_aligned_header | def _check_aligned_header(self, data_type, unit):
"""Check the header inputs whenever get_aligned_collection is called."""
if data_type is not None:
assert isinstance(data_type, DataTypeBase), \
'data_type must be a Ladybug DataType. Got {}'.format(type(data_type))
if unit is None:
unit = data_type.units[0]
else:
data_type = self.header.data_type
unit = unit or self.header.unit
return Header(data_type, unit, self.header.analysis_period, self.header.metadata) | python | def _check_aligned_header(self, data_type, unit):
"""Check the header inputs whenever get_aligned_collection is called."""
if data_type is not None:
assert isinstance(data_type, DataTypeBase), \
'data_type must be a Ladybug DataType. Got {}'.format(type(data_type))
if unit is None:
unit = data_type.units[0]
else:
data_type = self.header.data_type
unit = unit or self.header.unit
return Header(data_type, unit, self.header.analysis_period, self.header.metadata) | [
"def",
"_check_aligned_header",
"(",
"self",
",",
"data_type",
",",
"unit",
")",
":",
"if",
"data_type",
"is",
"not",
"None",
":",
"assert",
"isinstance",
"(",
"data_type",
",",
"DataTypeBase",
")",
",",
"'data_type must be a Ladybug DataType. Got {}'",
".",
"format",
"(",
"type",
"(",
"data_type",
")",
")",
"if",
"unit",
"is",
"None",
":",
"unit",
"=",
"data_type",
".",
"units",
"[",
"0",
"]",
"else",
":",
"data_type",
"=",
"self",
".",
"header",
".",
"data_type",
"unit",
"=",
"unit",
"or",
"self",
".",
"header",
".",
"unit",
"return",
"Header",
"(",
"data_type",
",",
"unit",
",",
"self",
".",
"header",
".",
"analysis_period",
",",
"self",
".",
"header",
".",
"metadata",
")"
] | Check the header inputs whenever get_aligned_collection is called. | [
"Check",
"the",
"header",
"inputs",
"whenever",
"get_aligned_collection",
"is",
"called",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/_datacollectionbase.py#L583-L593 |
4,722 | ladybug-tools/ladybug | ladybug/_datacollectionbase.py | BaseCollection._check_aligned_value | def _check_aligned_value(self, value):
"""Check the value input whenever get_aligned_collection is called."""
if isinstance(value, Iterable) and not isinstance(
value, (str, dict, bytes, bytearray)):
assert len(value) == len(self._values), "Length of value ({}) must match "\
"the length of this collection's values ({})".format(
len(value), len(self._values))
values = value
else:
values = [value] * len(self._values)
return values | python | def _check_aligned_value(self, value):
"""Check the value input whenever get_aligned_collection is called."""
if isinstance(value, Iterable) and not isinstance(
value, (str, dict, bytes, bytearray)):
assert len(value) == len(self._values), "Length of value ({}) must match "\
"the length of this collection's values ({})".format(
len(value), len(self._values))
values = value
else:
values = [value] * len(self._values)
return values | [
"def",
"_check_aligned_value",
"(",
"self",
",",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"Iterable",
")",
"and",
"not",
"isinstance",
"(",
"value",
",",
"(",
"str",
",",
"dict",
",",
"bytes",
",",
"bytearray",
")",
")",
":",
"assert",
"len",
"(",
"value",
")",
"==",
"len",
"(",
"self",
".",
"_values",
")",
",",
"\"Length of value ({}) must match \"",
"\"the length of this collection's values ({})\"",
".",
"format",
"(",
"len",
"(",
"value",
")",
",",
"len",
"(",
"self",
".",
"_values",
")",
")",
"values",
"=",
"value",
"else",
":",
"values",
"=",
"[",
"value",
"]",
"*",
"len",
"(",
"self",
".",
"_values",
")",
"return",
"values"
] | Check the value input whenever get_aligned_collection is called. | [
"Check",
"the",
"value",
"input",
"whenever",
"get_aligned_collection",
"is",
"called",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/_datacollectionbase.py#L595-L605 |
4,723 | ladybug-tools/ladybug | ladybug/dt.py | DateTime.from_json | def from_json(cls, data):
"""Creat datetime from a dictionary.
Args:
data: {
'month': A value for month between 1-12. (Defualt: 1)
'day': A value for day between 1-31. (Defualt: 1)
'hour': A value for hour between 0-23. (Defualt: 0)
'minute': A value for month between 0-59. (Defualt: 0)
}
"""
if 'month' not in data:
data['month'] = 1
if 'day' not in data:
data['day'] = 1
if 'hour' not in data:
data['hour'] = 0
if 'minute' not in data:
data['minute'] = 0
if 'year' not in data:
data['year'] = 2017
leap_year = True if int(data['year']) == 2016 else False
return cls(data['month'], data['day'], data['hour'], data['minute'], leap_year) | python | def from_json(cls, data):
"""Creat datetime from a dictionary.
Args:
data: {
'month': A value for month between 1-12. (Defualt: 1)
'day': A value for day between 1-31. (Defualt: 1)
'hour': A value for hour between 0-23. (Defualt: 0)
'minute': A value for month between 0-59. (Defualt: 0)
}
"""
if 'month' not in data:
data['month'] = 1
if 'day' not in data:
data['day'] = 1
if 'hour' not in data:
data['hour'] = 0
if 'minute' not in data:
data['minute'] = 0
if 'year' not in data:
data['year'] = 2017
leap_year = True if int(data['year']) == 2016 else False
return cls(data['month'], data['day'], data['hour'], data['minute'], leap_year) | [
"def",
"from_json",
"(",
"cls",
",",
"data",
")",
":",
"if",
"'month'",
"not",
"in",
"data",
":",
"data",
"[",
"'month'",
"]",
"=",
"1",
"if",
"'day'",
"not",
"in",
"data",
":",
"data",
"[",
"'day'",
"]",
"=",
"1",
"if",
"'hour'",
"not",
"in",
"data",
":",
"data",
"[",
"'hour'",
"]",
"=",
"0",
"if",
"'minute'",
"not",
"in",
"data",
":",
"data",
"[",
"'minute'",
"]",
"=",
"0",
"if",
"'year'",
"not",
"in",
"data",
":",
"data",
"[",
"'year'",
"]",
"=",
"2017",
"leap_year",
"=",
"True",
"if",
"int",
"(",
"data",
"[",
"'year'",
"]",
")",
"==",
"2016",
"else",
"False",
"return",
"cls",
"(",
"data",
"[",
"'month'",
"]",
",",
"data",
"[",
"'day'",
"]",
",",
"data",
"[",
"'hour'",
"]",
",",
"data",
"[",
"'minute'",
"]",
",",
"leap_year",
")"
] | Creat datetime from a dictionary.
Args:
data: {
'month': A value for month between 1-12. (Defualt: 1)
'day': A value for day between 1-31. (Defualt: 1)
'hour': A value for hour between 0-23. (Defualt: 0)
'minute': A value for month between 0-59. (Defualt: 0)
} | [
"Creat",
"datetime",
"from",
"a",
"dictionary",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/dt.py#L43-L70 |
4,724 | ladybug-tools/ladybug | ladybug/dt.py | DateTime.from_hoy | def from_hoy(cls, hoy, leap_year=False):
"""Create Ladybug Datetime from an hour of the year.
Args:
hoy: A float value 0 <= and < 8760
"""
return cls.from_moy(round(hoy * 60), leap_year) | python | def from_hoy(cls, hoy, leap_year=False):
"""Create Ladybug Datetime from an hour of the year.
Args:
hoy: A float value 0 <= and < 8760
"""
return cls.from_moy(round(hoy * 60), leap_year) | [
"def",
"from_hoy",
"(",
"cls",
",",
"hoy",
",",
"leap_year",
"=",
"False",
")",
":",
"return",
"cls",
".",
"from_moy",
"(",
"round",
"(",
"hoy",
"*",
"60",
")",
",",
"leap_year",
")"
] | Create Ladybug Datetime from an hour of the year.
Args:
hoy: A float value 0 <= and < 8760 | [
"Create",
"Ladybug",
"Datetime",
"from",
"an",
"hour",
"of",
"the",
"year",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/dt.py#L73-L79 |
4,725 | ladybug-tools/ladybug | ladybug/dt.py | DateTime.from_moy | def from_moy(cls, moy, leap_year=False):
"""Create Ladybug Datetime from a minute of the year.
Args:
moy: An integer value 0 <= and < 525600
"""
if not leap_year:
num_of_minutes_until_month = (0, 44640, 84960, 129600, 172800, 217440,
260640, 305280, 349920, 393120, 437760,
480960, 525600)
else:
num_of_minutes_until_month = (0, 44640, 84960 + 1440, 129600 + 1440,
172800 + 1440, 217440 + 1440, 260640 + 1440,
305280 + 1440, 349920 + 1440, 393120 + 1440,
437760 + 1440, 480960 + 1440, 525600 + 1440)
# find month
for monthCount in range(12):
if int(moy) < num_of_minutes_until_month[monthCount + 1]:
month = monthCount + 1
break
try:
day = int((moy - num_of_minutes_until_month[month - 1]) / (60 * 24)) + 1
except UnboundLocalError:
raise ValueError(
"moy must be positive and smaller than 525600. Invalid input %d" % (moy)
)
else:
hour = int((moy / 60) % 24)
minute = int(moy % 60)
return cls(month, day, hour, minute, leap_year) | python | def from_moy(cls, moy, leap_year=False):
"""Create Ladybug Datetime from a minute of the year.
Args:
moy: An integer value 0 <= and < 525600
"""
if not leap_year:
num_of_minutes_until_month = (0, 44640, 84960, 129600, 172800, 217440,
260640, 305280, 349920, 393120, 437760,
480960, 525600)
else:
num_of_minutes_until_month = (0, 44640, 84960 + 1440, 129600 + 1440,
172800 + 1440, 217440 + 1440, 260640 + 1440,
305280 + 1440, 349920 + 1440, 393120 + 1440,
437760 + 1440, 480960 + 1440, 525600 + 1440)
# find month
for monthCount in range(12):
if int(moy) < num_of_minutes_until_month[monthCount + 1]:
month = monthCount + 1
break
try:
day = int((moy - num_of_minutes_until_month[month - 1]) / (60 * 24)) + 1
except UnboundLocalError:
raise ValueError(
"moy must be positive and smaller than 525600. Invalid input %d" % (moy)
)
else:
hour = int((moy / 60) % 24)
minute = int(moy % 60)
return cls(month, day, hour, minute, leap_year) | [
"def",
"from_moy",
"(",
"cls",
",",
"moy",
",",
"leap_year",
"=",
"False",
")",
":",
"if",
"not",
"leap_year",
":",
"num_of_minutes_until_month",
"=",
"(",
"0",
",",
"44640",
",",
"84960",
",",
"129600",
",",
"172800",
",",
"217440",
",",
"260640",
",",
"305280",
",",
"349920",
",",
"393120",
",",
"437760",
",",
"480960",
",",
"525600",
")",
"else",
":",
"num_of_minutes_until_month",
"=",
"(",
"0",
",",
"44640",
",",
"84960",
"+",
"1440",
",",
"129600",
"+",
"1440",
",",
"172800",
"+",
"1440",
",",
"217440",
"+",
"1440",
",",
"260640",
"+",
"1440",
",",
"305280",
"+",
"1440",
",",
"349920",
"+",
"1440",
",",
"393120",
"+",
"1440",
",",
"437760",
"+",
"1440",
",",
"480960",
"+",
"1440",
",",
"525600",
"+",
"1440",
")",
"# find month",
"for",
"monthCount",
"in",
"range",
"(",
"12",
")",
":",
"if",
"int",
"(",
"moy",
")",
"<",
"num_of_minutes_until_month",
"[",
"monthCount",
"+",
"1",
"]",
":",
"month",
"=",
"monthCount",
"+",
"1",
"break",
"try",
":",
"day",
"=",
"int",
"(",
"(",
"moy",
"-",
"num_of_minutes_until_month",
"[",
"month",
"-",
"1",
"]",
")",
"/",
"(",
"60",
"*",
"24",
")",
")",
"+",
"1",
"except",
"UnboundLocalError",
":",
"raise",
"ValueError",
"(",
"\"moy must be positive and smaller than 525600. Invalid input %d\"",
"%",
"(",
"moy",
")",
")",
"else",
":",
"hour",
"=",
"int",
"(",
"(",
"moy",
"/",
"60",
")",
"%",
"24",
")",
"minute",
"=",
"int",
"(",
"moy",
"%",
"60",
")",
"return",
"cls",
"(",
"month",
",",
"day",
",",
"hour",
",",
"minute",
",",
"leap_year",
")"
] | Create Ladybug Datetime from a minute of the year.
Args:
moy: An integer value 0 <= and < 525600 | [
"Create",
"Ladybug",
"Datetime",
"from",
"a",
"minute",
"of",
"the",
"year",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/dt.py#L82-L112 |
4,726 | ladybug-tools/ladybug | ladybug/dt.py | DateTime.from_date_time_string | def from_date_time_string(cls, datetime_string, leap_year=False):
"""Create Ladybug DateTime from a DateTime string.
Usage:
dt = DateTime.from_date_time_string("31 Dec 12:00")
"""
dt = datetime.strptime(datetime_string, '%d %b %H:%M')
return cls(dt.month, dt.day, dt.hour, dt.minute, leap_year) | python | def from_date_time_string(cls, datetime_string, leap_year=False):
"""Create Ladybug DateTime from a DateTime string.
Usage:
dt = DateTime.from_date_time_string("31 Dec 12:00")
"""
dt = datetime.strptime(datetime_string, '%d %b %H:%M')
return cls(dt.month, dt.day, dt.hour, dt.minute, leap_year) | [
"def",
"from_date_time_string",
"(",
"cls",
",",
"datetime_string",
",",
"leap_year",
"=",
"False",
")",
":",
"dt",
"=",
"datetime",
".",
"strptime",
"(",
"datetime_string",
",",
"'%d %b %H:%M'",
")",
"return",
"cls",
"(",
"dt",
".",
"month",
",",
"dt",
".",
"day",
",",
"dt",
".",
"hour",
",",
"dt",
".",
"minute",
",",
"leap_year",
")"
] | Create Ladybug DateTime from a DateTime string.
Usage:
dt = DateTime.from_date_time_string("31 Dec 12:00") | [
"Create",
"Ladybug",
"DateTime",
"from",
"a",
"DateTime",
"string",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/dt.py#L115-L123 |
4,727 | ladybug-tools/ladybug | ladybug/dt.py | DateTime._calculate_hour_and_minute | def _calculate_hour_and_minute(float_hour):
"""Calculate hour and minutes as integers from a float hour."""
hour, minute = int(float_hour), int(round((float_hour - int(float_hour)) * 60))
if minute == 60:
return hour + 1, 0
else:
return hour, minute | python | def _calculate_hour_and_minute(float_hour):
"""Calculate hour and minutes as integers from a float hour."""
hour, minute = int(float_hour), int(round((float_hour - int(float_hour)) * 60))
if minute == 60:
return hour + 1, 0
else:
return hour, minute | [
"def",
"_calculate_hour_and_minute",
"(",
"float_hour",
")",
":",
"hour",
",",
"minute",
"=",
"int",
"(",
"float_hour",
")",
",",
"int",
"(",
"round",
"(",
"(",
"float_hour",
"-",
"int",
"(",
"float_hour",
")",
")",
"*",
"60",
")",
")",
"if",
"minute",
"==",
"60",
":",
"return",
"hour",
"+",
"1",
",",
"0",
"else",
":",
"return",
"hour",
",",
"minute"
] | Calculate hour and minutes as integers from a float hour. | [
"Calculate",
"hour",
"and",
"minutes",
"as",
"integers",
"from",
"a",
"float",
"hour",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/dt.py#L159-L165 |
4,728 | ladybug-tools/ladybug | ladybug/dt.py | DateTime.add_minute | def add_minute(self, minute):
"""Create a new DateTime after the minutes are added.
Args:
minute: An integer value for minutes.
"""
_moy = self.moy + int(minute)
return self.__class__.from_moy(_moy) | python | def add_minute(self, minute):
"""Create a new DateTime after the minutes are added.
Args:
minute: An integer value for minutes.
"""
_moy = self.moy + int(minute)
return self.__class__.from_moy(_moy) | [
"def",
"add_minute",
"(",
"self",
",",
"minute",
")",
":",
"_moy",
"=",
"self",
".",
"moy",
"+",
"int",
"(",
"minute",
")",
"return",
"self",
".",
"__class__",
".",
"from_moy",
"(",
"_moy",
")"
] | Create a new DateTime after the minutes are added.
Args:
minute: An integer value for minutes. | [
"Create",
"a",
"new",
"DateTime",
"after",
"the",
"minutes",
"are",
"added",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/dt.py#L167-L174 |
4,729 | ladybug-tools/ladybug | ladybug/dt.py | DateTime.to_json | def to_json(self):
"""Get date time as a dictionary."""
return {'year': self.year,
'month': self.month,
'day': self.day,
'hour': self.hour,
'minute': self.minute} | python | def to_json(self):
"""Get date time as a dictionary."""
return {'year': self.year,
'month': self.month,
'day': self.day,
'hour': self.hour,
'minute': self.minute} | [
"def",
"to_json",
"(",
"self",
")",
":",
"return",
"{",
"'year'",
":",
"self",
".",
"year",
",",
"'month'",
":",
"self",
".",
"month",
",",
"'day'",
":",
"self",
".",
"day",
",",
"'hour'",
":",
"self",
".",
"hour",
",",
"'minute'",
":",
"self",
".",
"minute",
"}"
] | Get date time as a dictionary. | [
"Get",
"date",
"time",
"as",
"a",
"dictionary",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/dt.py#L208-L214 |
4,730 | Neurosim-lab/netpyne | netpyne/network/conn.py | fullConn | def fullConn (self, preCellsTags, postCellsTags, connParam):
from .. import sim
''' Generates connections between all pre and post-syn cells '''
if sim.cfg.verbose: print('Generating set of all-to-all connections (rule: %s) ...' % (connParam['label']))
# get list of params that have a lambda function
paramsStrFunc = [param for param in [p+'Func' for p in self.connStringFuncParams] if param in connParam]
for paramStrFunc in paramsStrFunc:
# replace lambda function (with args as dict of lambda funcs) with list of values
connParam[paramStrFunc[:-4]+'List'] = {(preGid,postGid): connParam[paramStrFunc](**{k:v if isinstance(v, Number) else v(preCellTags,postCellTags) for k,v in connParam[paramStrFunc+'Vars'].items()})
for preGid,preCellTags in preCellsTags.items() for postGid,postCellTags in postCellsTags.items()}
for postCellGid in postCellsTags: # for each postsyn cell
if postCellGid in self.gid2lid: # check if postsyn is in this node's list of gids
for preCellGid, preCellTags in preCellsTags.items(): # for each presyn cell
self._addCellConn(connParam, preCellGid, postCellGid) | python | def fullConn (self, preCellsTags, postCellsTags, connParam):
from .. import sim
''' Generates connections between all pre and post-syn cells '''
if sim.cfg.verbose: print('Generating set of all-to-all connections (rule: %s) ...' % (connParam['label']))
# get list of params that have a lambda function
paramsStrFunc = [param for param in [p+'Func' for p in self.connStringFuncParams] if param in connParam]
for paramStrFunc in paramsStrFunc:
# replace lambda function (with args as dict of lambda funcs) with list of values
connParam[paramStrFunc[:-4]+'List'] = {(preGid,postGid): connParam[paramStrFunc](**{k:v if isinstance(v, Number) else v(preCellTags,postCellTags) for k,v in connParam[paramStrFunc+'Vars'].items()})
for preGid,preCellTags in preCellsTags.items() for postGid,postCellTags in postCellsTags.items()}
for postCellGid in postCellsTags: # for each postsyn cell
if postCellGid in self.gid2lid: # check if postsyn is in this node's list of gids
for preCellGid, preCellTags in preCellsTags.items(): # for each presyn cell
self._addCellConn(connParam, preCellGid, postCellGid) | [
"def",
"fullConn",
"(",
"self",
",",
"preCellsTags",
",",
"postCellsTags",
",",
"connParam",
")",
":",
"from",
".",
".",
"import",
"sim",
"if",
"sim",
".",
"cfg",
".",
"verbose",
":",
"print",
"(",
"'Generating set of all-to-all connections (rule: %s) ...'",
"%",
"(",
"connParam",
"[",
"'label'",
"]",
")",
")",
"# get list of params that have a lambda function",
"paramsStrFunc",
"=",
"[",
"param",
"for",
"param",
"in",
"[",
"p",
"+",
"'Func'",
"for",
"p",
"in",
"self",
".",
"connStringFuncParams",
"]",
"if",
"param",
"in",
"connParam",
"]",
"for",
"paramStrFunc",
"in",
"paramsStrFunc",
":",
"# replace lambda function (with args as dict of lambda funcs) with list of values",
"connParam",
"[",
"paramStrFunc",
"[",
":",
"-",
"4",
"]",
"+",
"'List'",
"]",
"=",
"{",
"(",
"preGid",
",",
"postGid",
")",
":",
"connParam",
"[",
"paramStrFunc",
"]",
"(",
"*",
"*",
"{",
"k",
":",
"v",
"if",
"isinstance",
"(",
"v",
",",
"Number",
")",
"else",
"v",
"(",
"preCellTags",
",",
"postCellTags",
")",
"for",
"k",
",",
"v",
"in",
"connParam",
"[",
"paramStrFunc",
"+",
"'Vars'",
"]",
".",
"items",
"(",
")",
"}",
")",
"for",
"preGid",
",",
"preCellTags",
"in",
"preCellsTags",
".",
"items",
"(",
")",
"for",
"postGid",
",",
"postCellTags",
"in",
"postCellsTags",
".",
"items",
"(",
")",
"}",
"for",
"postCellGid",
"in",
"postCellsTags",
":",
"# for each postsyn cell",
"if",
"postCellGid",
"in",
"self",
".",
"gid2lid",
":",
"# check if postsyn is in this node's list of gids",
"for",
"preCellGid",
",",
"preCellTags",
"in",
"preCellsTags",
".",
"items",
"(",
")",
":",
"# for each presyn cell",
"self",
".",
"_addCellConn",
"(",
"connParam",
",",
"preCellGid",
",",
"postCellGid",
")"
] | Generates connections between all pre and post-syn cells | [
"Generates",
"connections",
"between",
"all",
"pre",
"and",
"post",
"-",
"syn",
"cells"
] | edb67b5098b2e7923d55010ded59ad1bf75c0f18 | https://github.com/Neurosim-lab/netpyne/blob/edb67b5098b2e7923d55010ded59ad1bf75c0f18/netpyne/network/conn.py#L310-L327 |
4,731 | Neurosim-lab/netpyne | netpyne/network/conn.py | fromListConn | def fromListConn (self, preCellsTags, postCellsTags, connParam):
from .. import sim
''' Generates connections between all pre and post-syn cells based list of relative cell ids'''
if sim.cfg.verbose: print('Generating set of connections from list (rule: %s) ...' % (connParam['label']))
orderedPreGids = sorted(preCellsTags)
orderedPostGids = sorted(postCellsTags)
# list of params that can have a lambda function
paramsStrFunc = [param for param in [p+'Func' for p in self.connStringFuncParams] if param in connParam]
for paramStrFunc in paramsStrFunc:
# replace lambda function (with args as dict of lambda funcs) with list of values
connParam[paramStrFunc[:-4]+'List'] = {(orderedPreGids[preId],orderedPostGids[postId]):
connParam[paramStrFunc](**{k:v if isinstance(v, Number) else v(preCellsTags[orderedPreGids[preId]], postCellsTags[orderedPostGids[postId]])
for k,v in connParam[paramStrFunc+'Vars'].items()}) for preId,postId in connParam['connList']}
if 'weight' in connParam and isinstance(connParam['weight'], list):
connParam['weightFromList'] = list(connParam['weight']) # if weight is a list, copy to weightFromList
if 'delay' in connParam and isinstance(connParam['delay'], list):
connParam['delayFromList'] = list(connParam['delay']) # if delay is a list, copy to delayFromList
if 'loc' in connParam and isinstance(connParam['loc'], list):
connParam['locFromList'] = list(connParam['loc']) # if delay is a list, copy to locFromList
for iconn, (relativePreId, relativePostId) in enumerate(connParam['connList']): # for each postsyn cell
preCellGid = orderedPreGids[relativePreId]
postCellGid = orderedPostGids[relativePostId]
if postCellGid in self.gid2lid: # check if postsyn is in this node's list of gids
if 'weightFromList' in connParam: connParam['weight'] = connParam['weightFromList'][iconn]
if 'delayFromList' in connParam: connParam['delay'] = connParam['delayFromList'][iconn]
if 'locFromList' in connParam: connParam['loc'] = connParam['locFromList'][iconn]
if preCellGid != postCellGid: # if not self-connection
self._addCellConn(connParam, preCellGid, postCellGid) | python | def fromListConn (self, preCellsTags, postCellsTags, connParam):
from .. import sim
''' Generates connections between all pre and post-syn cells based list of relative cell ids'''
if sim.cfg.verbose: print('Generating set of connections from list (rule: %s) ...' % (connParam['label']))
orderedPreGids = sorted(preCellsTags)
orderedPostGids = sorted(postCellsTags)
# list of params that can have a lambda function
paramsStrFunc = [param for param in [p+'Func' for p in self.connStringFuncParams] if param in connParam]
for paramStrFunc in paramsStrFunc:
# replace lambda function (with args as dict of lambda funcs) with list of values
connParam[paramStrFunc[:-4]+'List'] = {(orderedPreGids[preId],orderedPostGids[postId]):
connParam[paramStrFunc](**{k:v if isinstance(v, Number) else v(preCellsTags[orderedPreGids[preId]], postCellsTags[orderedPostGids[postId]])
for k,v in connParam[paramStrFunc+'Vars'].items()}) for preId,postId in connParam['connList']}
if 'weight' in connParam and isinstance(connParam['weight'], list):
connParam['weightFromList'] = list(connParam['weight']) # if weight is a list, copy to weightFromList
if 'delay' in connParam and isinstance(connParam['delay'], list):
connParam['delayFromList'] = list(connParam['delay']) # if delay is a list, copy to delayFromList
if 'loc' in connParam and isinstance(connParam['loc'], list):
connParam['locFromList'] = list(connParam['loc']) # if delay is a list, copy to locFromList
for iconn, (relativePreId, relativePostId) in enumerate(connParam['connList']): # for each postsyn cell
preCellGid = orderedPreGids[relativePreId]
postCellGid = orderedPostGids[relativePostId]
if postCellGid in self.gid2lid: # check if postsyn is in this node's list of gids
if 'weightFromList' in connParam: connParam['weight'] = connParam['weightFromList'][iconn]
if 'delayFromList' in connParam: connParam['delay'] = connParam['delayFromList'][iconn]
if 'locFromList' in connParam: connParam['loc'] = connParam['locFromList'][iconn]
if preCellGid != postCellGid: # if not self-connection
self._addCellConn(connParam, preCellGid, postCellGid) | [
"def",
"fromListConn",
"(",
"self",
",",
"preCellsTags",
",",
"postCellsTags",
",",
"connParam",
")",
":",
"from",
".",
".",
"import",
"sim",
"if",
"sim",
".",
"cfg",
".",
"verbose",
":",
"print",
"(",
"'Generating set of connections from list (rule: %s) ...'",
"%",
"(",
"connParam",
"[",
"'label'",
"]",
")",
")",
"orderedPreGids",
"=",
"sorted",
"(",
"preCellsTags",
")",
"orderedPostGids",
"=",
"sorted",
"(",
"postCellsTags",
")",
"# list of params that can have a lambda function",
"paramsStrFunc",
"=",
"[",
"param",
"for",
"param",
"in",
"[",
"p",
"+",
"'Func'",
"for",
"p",
"in",
"self",
".",
"connStringFuncParams",
"]",
"if",
"param",
"in",
"connParam",
"]",
"for",
"paramStrFunc",
"in",
"paramsStrFunc",
":",
"# replace lambda function (with args as dict of lambda funcs) with list of values",
"connParam",
"[",
"paramStrFunc",
"[",
":",
"-",
"4",
"]",
"+",
"'List'",
"]",
"=",
"{",
"(",
"orderedPreGids",
"[",
"preId",
"]",
",",
"orderedPostGids",
"[",
"postId",
"]",
")",
":",
"connParam",
"[",
"paramStrFunc",
"]",
"(",
"*",
"*",
"{",
"k",
":",
"v",
"if",
"isinstance",
"(",
"v",
",",
"Number",
")",
"else",
"v",
"(",
"preCellsTags",
"[",
"orderedPreGids",
"[",
"preId",
"]",
"]",
",",
"postCellsTags",
"[",
"orderedPostGids",
"[",
"postId",
"]",
"]",
")",
"for",
"k",
",",
"v",
"in",
"connParam",
"[",
"paramStrFunc",
"+",
"'Vars'",
"]",
".",
"items",
"(",
")",
"}",
")",
"for",
"preId",
",",
"postId",
"in",
"connParam",
"[",
"'connList'",
"]",
"}",
"if",
"'weight'",
"in",
"connParam",
"and",
"isinstance",
"(",
"connParam",
"[",
"'weight'",
"]",
",",
"list",
")",
":",
"connParam",
"[",
"'weightFromList'",
"]",
"=",
"list",
"(",
"connParam",
"[",
"'weight'",
"]",
")",
"# if weight is a list, copy to weightFromList",
"if",
"'delay'",
"in",
"connParam",
"and",
"isinstance",
"(",
"connParam",
"[",
"'delay'",
"]",
",",
"list",
")",
":",
"connParam",
"[",
"'delayFromList'",
"]",
"=",
"list",
"(",
"connParam",
"[",
"'delay'",
"]",
")",
"# if delay is a list, copy to delayFromList",
"if",
"'loc'",
"in",
"connParam",
"and",
"isinstance",
"(",
"connParam",
"[",
"'loc'",
"]",
",",
"list",
")",
":",
"connParam",
"[",
"'locFromList'",
"]",
"=",
"list",
"(",
"connParam",
"[",
"'loc'",
"]",
")",
"# if delay is a list, copy to locFromList",
"for",
"iconn",
",",
"(",
"relativePreId",
",",
"relativePostId",
")",
"in",
"enumerate",
"(",
"connParam",
"[",
"'connList'",
"]",
")",
":",
"# for each postsyn cell",
"preCellGid",
"=",
"orderedPreGids",
"[",
"relativePreId",
"]",
"postCellGid",
"=",
"orderedPostGids",
"[",
"relativePostId",
"]",
"if",
"postCellGid",
"in",
"self",
".",
"gid2lid",
":",
"# check if postsyn is in this node's list of gids",
"if",
"'weightFromList'",
"in",
"connParam",
":",
"connParam",
"[",
"'weight'",
"]",
"=",
"connParam",
"[",
"'weightFromList'",
"]",
"[",
"iconn",
"]",
"if",
"'delayFromList'",
"in",
"connParam",
":",
"connParam",
"[",
"'delay'",
"]",
"=",
"connParam",
"[",
"'delayFromList'",
"]",
"[",
"iconn",
"]",
"if",
"'locFromList'",
"in",
"connParam",
":",
"connParam",
"[",
"'loc'",
"]",
"=",
"connParam",
"[",
"'locFromList'",
"]",
"[",
"iconn",
"]",
"if",
"preCellGid",
"!=",
"postCellGid",
":",
"# if not self-connection",
"self",
".",
"_addCellConn",
"(",
"connParam",
",",
"preCellGid",
",",
"postCellGid",
")"
] | Generates connections between all pre and post-syn cells based list of relative cell ids | [
"Generates",
"connections",
"between",
"all",
"pre",
"and",
"post",
"-",
"syn",
"cells",
"based",
"list",
"of",
"relative",
"cell",
"ids"
] | edb67b5098b2e7923d55010ded59ad1bf75c0f18 | https://github.com/Neurosim-lab/netpyne/blob/edb67b5098b2e7923d55010ded59ad1bf75c0f18/netpyne/network/conn.py#L514-L549 |
4,732 | Neurosim-lab/netpyne | netpyne/cell/compartCell.py | CompartCell.setImembPtr | def setImembPtr(self):
"""Set PtrVector to point to the i_membrane_"""
jseg = 0
for sec in list(self.secs.values()):
hSec = sec['hObj']
for iseg, seg in enumerate(hSec):
self.imembPtr.pset(jseg, seg._ref_i_membrane_) # notice the underscore at the end (in nA)
jseg += 1 | python | def setImembPtr(self):
"""Set PtrVector to point to the i_membrane_"""
jseg = 0
for sec in list(self.secs.values()):
hSec = sec['hObj']
for iseg, seg in enumerate(hSec):
self.imembPtr.pset(jseg, seg._ref_i_membrane_) # notice the underscore at the end (in nA)
jseg += 1 | [
"def",
"setImembPtr",
"(",
"self",
")",
":",
"jseg",
"=",
"0",
"for",
"sec",
"in",
"list",
"(",
"self",
".",
"secs",
".",
"values",
"(",
")",
")",
":",
"hSec",
"=",
"sec",
"[",
"'hObj'",
"]",
"for",
"iseg",
",",
"seg",
"in",
"enumerate",
"(",
"hSec",
")",
":",
"self",
".",
"imembPtr",
".",
"pset",
"(",
"jseg",
",",
"seg",
".",
"_ref_i_membrane_",
")",
"# notice the underscore at the end (in nA)",
"jseg",
"+=",
"1"
] | Set PtrVector to point to the i_membrane_ | [
"Set",
"PtrVector",
"to",
"point",
"to",
"the",
"i_membrane_"
] | edb67b5098b2e7923d55010ded59ad1bf75c0f18 | https://github.com/Neurosim-lab/netpyne/blob/edb67b5098b2e7923d55010ded59ad1bf75c0f18/netpyne/cell/compartCell.py#L1245-L1252 |
4,733 | Neurosim-lab/netpyne | examples/RL_arm/main.py | saveWeights | def saveWeights(sim):
''' Save the weights for each plastic synapse '''
with open(sim.weightsfilename,'w') as fid:
for weightdata in sim.allWeights:
fid.write('%0.0f' % weightdata[0]) # Time
for i in range(1,len(weightdata)): fid.write('\t%0.8f' % weightdata[i])
fid.write('\n')
print(('Saved weights as %s' % sim.weightsfilename)) | python | def saveWeights(sim):
''' Save the weights for each plastic synapse '''
with open(sim.weightsfilename,'w') as fid:
for weightdata in sim.allWeights:
fid.write('%0.0f' % weightdata[0]) # Time
for i in range(1,len(weightdata)): fid.write('\t%0.8f' % weightdata[i])
fid.write('\n')
print(('Saved weights as %s' % sim.weightsfilename)) | [
"def",
"saveWeights",
"(",
"sim",
")",
":",
"with",
"open",
"(",
"sim",
".",
"weightsfilename",
",",
"'w'",
")",
"as",
"fid",
":",
"for",
"weightdata",
"in",
"sim",
".",
"allWeights",
":",
"fid",
".",
"write",
"(",
"'%0.0f'",
"%",
"weightdata",
"[",
"0",
"]",
")",
"# Time",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"weightdata",
")",
")",
":",
"fid",
".",
"write",
"(",
"'\\t%0.8f'",
"%",
"weightdata",
"[",
"i",
"]",
")",
"fid",
".",
"write",
"(",
"'\\n'",
")",
"print",
"(",
"(",
"'Saved weights as %s'",
"%",
"sim",
".",
"weightsfilename",
")",
")"
] | Save the weights for each plastic synapse | [
"Save",
"the",
"weights",
"for",
"each",
"plastic",
"synapse"
] | edb67b5098b2e7923d55010ded59ad1bf75c0f18 | https://github.com/Neurosim-lab/netpyne/blob/edb67b5098b2e7923d55010ded59ad1bf75c0f18/examples/RL_arm/main.py#L127-L134 |
4,734 | Neurosim-lab/netpyne | netpyne/specs/utils.py | validateFunction | def validateFunction(strFunc, netParamsVars):
''' returns True if "strFunc" can be evaluated'''
from math import exp, log, sqrt, sin, cos, tan, asin, acos, atan, sinh, cosh, tanh, pi, e
rand = h.Random()
stringFuncRandMethods = ['binomial', 'discunif', 'erlang', 'geometric', 'hypergeo',
'lognormal', 'negexp', 'normal', 'poisson', 'uniform', 'weibull']
for randmeth in stringFuncRandMethods: strFunc = strFunc.replace(randmeth, 'rand.'+randmeth)
variables = {
"pre_x" : 1, "pre_y" : 1, "pre_z" : 1,
"post_x" : 1, "post_y" : 1, "post_z" : 1,
"dist_x" : 1, "dist_y" : 1, "dist_z" : 1,
"pre_xnorm" : 1, "pre_ynorm" : 1, "pre_znorm" : 1,
"post_xnorm" : 1, "post_ynorm" : 1, "post_znorm" : 1,
"dist_xnorm" : 1, "dist_ynorm" : 1, "dist_znorm" : 1,
"dist_3D" : 1, "dist_3D_border" : 1, "dist_2D" : 1,
"dist_norm3D": 1, "dist_norm2D" : 1, "rand": rand,
"exp": exp, "log":log, "sqrt": sqrt,
"sin":sin, "cos":cos, "tan":tan, "asin":asin,
"acos":acos, "atan":atan, "sinh":sinh, "cosh":cosh,
"tanh":tanh, "pi":pi,"e": e
}
# add netParams variables
for k, v in netParamsVars.items():
if isinstance(v, Number):
variables[k] = v
try:
eval(strFunc, variables)
return True
except:
return False | python | def validateFunction(strFunc, netParamsVars):
''' returns True if "strFunc" can be evaluated'''
from math import exp, log, sqrt, sin, cos, tan, asin, acos, atan, sinh, cosh, tanh, pi, e
rand = h.Random()
stringFuncRandMethods = ['binomial', 'discunif', 'erlang', 'geometric', 'hypergeo',
'lognormal', 'negexp', 'normal', 'poisson', 'uniform', 'weibull']
for randmeth in stringFuncRandMethods: strFunc = strFunc.replace(randmeth, 'rand.'+randmeth)
variables = {
"pre_x" : 1, "pre_y" : 1, "pre_z" : 1,
"post_x" : 1, "post_y" : 1, "post_z" : 1,
"dist_x" : 1, "dist_y" : 1, "dist_z" : 1,
"pre_xnorm" : 1, "pre_ynorm" : 1, "pre_znorm" : 1,
"post_xnorm" : 1, "post_ynorm" : 1, "post_znorm" : 1,
"dist_xnorm" : 1, "dist_ynorm" : 1, "dist_znorm" : 1,
"dist_3D" : 1, "dist_3D_border" : 1, "dist_2D" : 1,
"dist_norm3D": 1, "dist_norm2D" : 1, "rand": rand,
"exp": exp, "log":log, "sqrt": sqrt,
"sin":sin, "cos":cos, "tan":tan, "asin":asin,
"acos":acos, "atan":atan, "sinh":sinh, "cosh":cosh,
"tanh":tanh, "pi":pi,"e": e
}
# add netParams variables
for k, v in netParamsVars.items():
if isinstance(v, Number):
variables[k] = v
try:
eval(strFunc, variables)
return True
except:
return False | [
"def",
"validateFunction",
"(",
"strFunc",
",",
"netParamsVars",
")",
":",
"from",
"math",
"import",
"exp",
",",
"log",
",",
"sqrt",
",",
"sin",
",",
"cos",
",",
"tan",
",",
"asin",
",",
"acos",
",",
"atan",
",",
"sinh",
",",
"cosh",
",",
"tanh",
",",
"pi",
",",
"e",
"rand",
"=",
"h",
".",
"Random",
"(",
")",
"stringFuncRandMethods",
"=",
"[",
"'binomial'",
",",
"'discunif'",
",",
"'erlang'",
",",
"'geometric'",
",",
"'hypergeo'",
",",
"'lognormal'",
",",
"'negexp'",
",",
"'normal'",
",",
"'poisson'",
",",
"'uniform'",
",",
"'weibull'",
"]",
"for",
"randmeth",
"in",
"stringFuncRandMethods",
":",
"strFunc",
"=",
"strFunc",
".",
"replace",
"(",
"randmeth",
",",
"'rand.'",
"+",
"randmeth",
")",
"variables",
"=",
"{",
"\"pre_x\"",
":",
"1",
",",
"\"pre_y\"",
":",
"1",
",",
"\"pre_z\"",
":",
"1",
",",
"\"post_x\"",
":",
"1",
",",
"\"post_y\"",
":",
"1",
",",
"\"post_z\"",
":",
"1",
",",
"\"dist_x\"",
":",
"1",
",",
"\"dist_y\"",
":",
"1",
",",
"\"dist_z\"",
":",
"1",
",",
"\"pre_xnorm\"",
":",
"1",
",",
"\"pre_ynorm\"",
":",
"1",
",",
"\"pre_znorm\"",
":",
"1",
",",
"\"post_xnorm\"",
":",
"1",
",",
"\"post_ynorm\"",
":",
"1",
",",
"\"post_znorm\"",
":",
"1",
",",
"\"dist_xnorm\"",
":",
"1",
",",
"\"dist_ynorm\"",
":",
"1",
",",
"\"dist_znorm\"",
":",
"1",
",",
"\"dist_3D\"",
":",
"1",
",",
"\"dist_3D_border\"",
":",
"1",
",",
"\"dist_2D\"",
":",
"1",
",",
"\"dist_norm3D\"",
":",
"1",
",",
"\"dist_norm2D\"",
":",
"1",
",",
"\"rand\"",
":",
"rand",
",",
"\"exp\"",
":",
"exp",
",",
"\"log\"",
":",
"log",
",",
"\"sqrt\"",
":",
"sqrt",
",",
"\"sin\"",
":",
"sin",
",",
"\"cos\"",
":",
"cos",
",",
"\"tan\"",
":",
"tan",
",",
"\"asin\"",
":",
"asin",
",",
"\"acos\"",
":",
"acos",
",",
"\"atan\"",
":",
"atan",
",",
"\"sinh\"",
":",
"sinh",
",",
"\"cosh\"",
":",
"cosh",
",",
"\"tanh\"",
":",
"tanh",
",",
"\"pi\"",
":",
"pi",
",",
"\"e\"",
":",
"e",
"}",
"# add netParams variables",
"for",
"k",
",",
"v",
"in",
"netParamsVars",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"Number",
")",
":",
"variables",
"[",
"k",
"]",
"=",
"v",
"try",
":",
"eval",
"(",
"strFunc",
",",
"variables",
")",
"return",
"True",
"except",
":",
"return",
"False"
] | returns True if "strFunc" can be evaluated | [
"returns",
"True",
"if",
"strFunc",
"can",
"be",
"evaluated"
] | edb67b5098b2e7923d55010ded59ad1bf75c0f18 | https://github.com/Neurosim-lab/netpyne/blob/edb67b5098b2e7923d55010ded59ad1bf75c0f18/netpyne/specs/utils.py#L17-L50 |
4,735 | Neurosim-lab/netpyne | netpyne/support/filter.py | bandpass | def bandpass(data, freqmin, freqmax, df, corners=4, zerophase=True):
"""
Butterworth-Bandpass Filter.
Filter data from ``freqmin`` to ``freqmax`` using ``corners``
corners.
The filter uses :func:`scipy.signal.iirfilter` (for design)
and :func:`scipy.signal.sosfilt` (for applying the filter).
:type data: numpy.ndarray
:param data: Data to filter.
:param freqmin: Pass band low corner frequency.
:param freqmax: Pass band high corner frequency.
:param df: Sampling rate in Hz.
:param corners: Filter corners / order.
:param zerophase: If True, apply filter once forwards and once backwards.
This results in twice the filter order but zero phase shift in
the resulting filtered trace.
:return: Filtered data.
"""
fe = 0.5 * df
low = freqmin / fe
high = freqmax / fe
# raise for some bad scenarios
if high - 1.0 > -1e-6:
msg = ("Selected high corner frequency ({}) of bandpass is at or "
"above Nyquist ({}). Applying a high-pass instead.").format(
freqmax, fe)
warnings.warn(msg)
return highpass(data, freq=freqmin, df=df, corners=corners,
zerophase=zerophase)
if low > 1:
msg = "Selected low corner frequency is above Nyquist."
raise ValueError(msg)
z, p, k = iirfilter(corners, [low, high], btype='band',
ftype='butter', output='zpk')
sos = zpk2sos(z, p, k)
if zerophase:
firstpass = sosfilt(sos, data)
return sosfilt(sos, firstpass[::-1])[::-1]
else:
return sosfilt(sos, data) | python | def bandpass(data, freqmin, freqmax, df, corners=4, zerophase=True):
"""
Butterworth-Bandpass Filter.
Filter data from ``freqmin`` to ``freqmax`` using ``corners``
corners.
The filter uses :func:`scipy.signal.iirfilter` (for design)
and :func:`scipy.signal.sosfilt` (for applying the filter).
:type data: numpy.ndarray
:param data: Data to filter.
:param freqmin: Pass band low corner frequency.
:param freqmax: Pass band high corner frequency.
:param df: Sampling rate in Hz.
:param corners: Filter corners / order.
:param zerophase: If True, apply filter once forwards and once backwards.
This results in twice the filter order but zero phase shift in
the resulting filtered trace.
:return: Filtered data.
"""
fe = 0.5 * df
low = freqmin / fe
high = freqmax / fe
# raise for some bad scenarios
if high - 1.0 > -1e-6:
msg = ("Selected high corner frequency ({}) of bandpass is at or "
"above Nyquist ({}). Applying a high-pass instead.").format(
freqmax, fe)
warnings.warn(msg)
return highpass(data, freq=freqmin, df=df, corners=corners,
zerophase=zerophase)
if low > 1:
msg = "Selected low corner frequency is above Nyquist."
raise ValueError(msg)
z, p, k = iirfilter(corners, [low, high], btype='band',
ftype='butter', output='zpk')
sos = zpk2sos(z, p, k)
if zerophase:
firstpass = sosfilt(sos, data)
return sosfilt(sos, firstpass[::-1])[::-1]
else:
return sosfilt(sos, data) | [
"def",
"bandpass",
"(",
"data",
",",
"freqmin",
",",
"freqmax",
",",
"df",
",",
"corners",
"=",
"4",
",",
"zerophase",
"=",
"True",
")",
":",
"fe",
"=",
"0.5",
"*",
"df",
"low",
"=",
"freqmin",
"/",
"fe",
"high",
"=",
"freqmax",
"/",
"fe",
"# raise for some bad scenarios",
"if",
"high",
"-",
"1.0",
">",
"-",
"1e-6",
":",
"msg",
"=",
"(",
"\"Selected high corner frequency ({}) of bandpass is at or \"",
"\"above Nyquist ({}). Applying a high-pass instead.\"",
")",
".",
"format",
"(",
"freqmax",
",",
"fe",
")",
"warnings",
".",
"warn",
"(",
"msg",
")",
"return",
"highpass",
"(",
"data",
",",
"freq",
"=",
"freqmin",
",",
"df",
"=",
"df",
",",
"corners",
"=",
"corners",
",",
"zerophase",
"=",
"zerophase",
")",
"if",
"low",
">",
"1",
":",
"msg",
"=",
"\"Selected low corner frequency is above Nyquist.\"",
"raise",
"ValueError",
"(",
"msg",
")",
"z",
",",
"p",
",",
"k",
"=",
"iirfilter",
"(",
"corners",
",",
"[",
"low",
",",
"high",
"]",
",",
"btype",
"=",
"'band'",
",",
"ftype",
"=",
"'butter'",
",",
"output",
"=",
"'zpk'",
")",
"sos",
"=",
"zpk2sos",
"(",
"z",
",",
"p",
",",
"k",
")",
"if",
"zerophase",
":",
"firstpass",
"=",
"sosfilt",
"(",
"sos",
",",
"data",
")",
"return",
"sosfilt",
"(",
"sos",
",",
"firstpass",
"[",
":",
":",
"-",
"1",
"]",
")",
"[",
":",
":",
"-",
"1",
"]",
"else",
":",
"return",
"sosfilt",
"(",
"sos",
",",
"data",
")"
] | Butterworth-Bandpass Filter.
Filter data from ``freqmin`` to ``freqmax`` using ``corners``
corners.
The filter uses :func:`scipy.signal.iirfilter` (for design)
and :func:`scipy.signal.sosfilt` (for applying the filter).
:type data: numpy.ndarray
:param data: Data to filter.
:param freqmin: Pass band low corner frequency.
:param freqmax: Pass band high corner frequency.
:param df: Sampling rate in Hz.
:param corners: Filter corners / order.
:param zerophase: If True, apply filter once forwards and once backwards.
This results in twice the filter order but zero phase shift in
the resulting filtered trace.
:return: Filtered data. | [
"Butterworth",
"-",
"Bandpass",
"Filter",
"."
] | edb67b5098b2e7923d55010ded59ad1bf75c0f18 | https://github.com/Neurosim-lab/netpyne/blob/edb67b5098b2e7923d55010ded59ad1bf75c0f18/netpyne/support/filter.py#L45-L86 |
4,736 | Neurosim-lab/netpyne | netpyne/support/filter.py | bandstop | def bandstop(data, freqmin, freqmax, df, corners=4, zerophase=False):
"""
Butterworth-Bandstop Filter.
Filter data removing data between frequencies ``freqmin`` and ``freqmax``
using ``corners`` corners.
The filter uses :func:`scipy.signal.iirfilter` (for design)
and :func:`scipy.signal.sosfilt` (for applying the filter).
:type data: numpy.ndarray
:param data: Data to filter.
:param freqmin: Stop band low corner frequency.
:param freqmax: Stop band high corner frequency.
:param df: Sampling rate in Hz.
:param corners: Filter corners / order.
:param zerophase: If True, apply filter once forwards and once backwards.
This results in twice the number of corners but zero phase shift in
the resulting filtered trace.
:return: Filtered data.
"""
fe = 0.5 * df
low = freqmin / fe
high = freqmax / fe
# raise for some bad scenarios
if high > 1:
high = 1.0
msg = "Selected high corner frequency is above Nyquist. " + \
"Setting Nyquist as high corner."
warnings.warn(msg)
if low > 1:
msg = "Selected low corner frequency is above Nyquist."
raise ValueError(msg)
z, p, k = iirfilter(corners, [low, high],
btype='bandstop', ftype='butter', output='zpk')
sos = zpk2sos(z, p, k)
if zerophase:
firstpass = sosfilt(sos, data)
return sosfilt(sos, firstpass[::-1])[::-1]
else:
return sosfilt(sos, data) | python | def bandstop(data, freqmin, freqmax, df, corners=4, zerophase=False):
"""
Butterworth-Bandstop Filter.
Filter data removing data between frequencies ``freqmin`` and ``freqmax``
using ``corners`` corners.
The filter uses :func:`scipy.signal.iirfilter` (for design)
and :func:`scipy.signal.sosfilt` (for applying the filter).
:type data: numpy.ndarray
:param data: Data to filter.
:param freqmin: Stop band low corner frequency.
:param freqmax: Stop band high corner frequency.
:param df: Sampling rate in Hz.
:param corners: Filter corners / order.
:param zerophase: If True, apply filter once forwards and once backwards.
This results in twice the number of corners but zero phase shift in
the resulting filtered trace.
:return: Filtered data.
"""
fe = 0.5 * df
low = freqmin / fe
high = freqmax / fe
# raise for some bad scenarios
if high > 1:
high = 1.0
msg = "Selected high corner frequency is above Nyquist. " + \
"Setting Nyquist as high corner."
warnings.warn(msg)
if low > 1:
msg = "Selected low corner frequency is above Nyquist."
raise ValueError(msg)
z, p, k = iirfilter(corners, [low, high],
btype='bandstop', ftype='butter', output='zpk')
sos = zpk2sos(z, p, k)
if zerophase:
firstpass = sosfilt(sos, data)
return sosfilt(sos, firstpass[::-1])[::-1]
else:
return sosfilt(sos, data) | [
"def",
"bandstop",
"(",
"data",
",",
"freqmin",
",",
"freqmax",
",",
"df",
",",
"corners",
"=",
"4",
",",
"zerophase",
"=",
"False",
")",
":",
"fe",
"=",
"0.5",
"*",
"df",
"low",
"=",
"freqmin",
"/",
"fe",
"high",
"=",
"freqmax",
"/",
"fe",
"# raise for some bad scenarios",
"if",
"high",
">",
"1",
":",
"high",
"=",
"1.0",
"msg",
"=",
"\"Selected high corner frequency is above Nyquist. \"",
"+",
"\"Setting Nyquist as high corner.\"",
"warnings",
".",
"warn",
"(",
"msg",
")",
"if",
"low",
">",
"1",
":",
"msg",
"=",
"\"Selected low corner frequency is above Nyquist.\"",
"raise",
"ValueError",
"(",
"msg",
")",
"z",
",",
"p",
",",
"k",
"=",
"iirfilter",
"(",
"corners",
",",
"[",
"low",
",",
"high",
"]",
",",
"btype",
"=",
"'bandstop'",
",",
"ftype",
"=",
"'butter'",
",",
"output",
"=",
"'zpk'",
")",
"sos",
"=",
"zpk2sos",
"(",
"z",
",",
"p",
",",
"k",
")",
"if",
"zerophase",
":",
"firstpass",
"=",
"sosfilt",
"(",
"sos",
",",
"data",
")",
"return",
"sosfilt",
"(",
"sos",
",",
"firstpass",
"[",
":",
":",
"-",
"1",
"]",
")",
"[",
":",
":",
"-",
"1",
"]",
"else",
":",
"return",
"sosfilt",
"(",
"sos",
",",
"data",
")"
] | Butterworth-Bandstop Filter.
Filter data removing data between frequencies ``freqmin`` and ``freqmax``
using ``corners`` corners.
The filter uses :func:`scipy.signal.iirfilter` (for design)
and :func:`scipy.signal.sosfilt` (for applying the filter).
:type data: numpy.ndarray
:param data: Data to filter.
:param freqmin: Stop band low corner frequency.
:param freqmax: Stop band high corner frequency.
:param df: Sampling rate in Hz.
:param corners: Filter corners / order.
:param zerophase: If True, apply filter once forwards and once backwards.
This results in twice the number of corners but zero phase shift in
the resulting filtered trace.
:return: Filtered data. | [
"Butterworth",
"-",
"Bandstop",
"Filter",
"."
] | edb67b5098b2e7923d55010ded59ad1bf75c0f18 | https://github.com/Neurosim-lab/netpyne/blob/edb67b5098b2e7923d55010ded59ad1bf75c0f18/netpyne/support/filter.py#L89-L128 |
4,737 | Neurosim-lab/netpyne | netpyne/support/filter.py | lowpass | def lowpass(data, freq, df, corners=4, zerophase=False):
"""
Butterworth-Lowpass Filter.
Filter data removing data over certain frequency ``freq`` using ``corners``
corners.
The filter uses :func:`scipy.signal.iirfilter` (for design)
and :func:`scipy.signal.sosfilt` (for applying the filter).
:type data: numpy.ndarray
:param data: Data to filter.
:param freq: Filter corner frequency.
:param df: Sampling rate in Hz.
:param corners: Filter corners / order.
:param zerophase: If True, apply filter once forwards and once backwards.
This results in twice the number of corners but zero phase shift in
the resulting filtered trace.
:return: Filtered data.
"""
fe = 0.5 * df
f = freq / fe
# raise for some bad scenarios
if f > 1:
f = 1.0
msg = "Selected corner frequency is above Nyquist. " + \
"Setting Nyquist as high corner."
warnings.warn(msg)
z, p, k = iirfilter(corners, f, btype='lowpass', ftype='butter',
output='zpk')
sos = zpk2sos(z, p, k)
if zerophase:
firstpass = sosfilt(sos, data)
return sosfilt(sos, firstpass[::-1])[::-1]
else:
return sosfilt(sos, data) | python | def lowpass(data, freq, df, corners=4, zerophase=False):
"""
Butterworth-Lowpass Filter.
Filter data removing data over certain frequency ``freq`` using ``corners``
corners.
The filter uses :func:`scipy.signal.iirfilter` (for design)
and :func:`scipy.signal.sosfilt` (for applying the filter).
:type data: numpy.ndarray
:param data: Data to filter.
:param freq: Filter corner frequency.
:param df: Sampling rate in Hz.
:param corners: Filter corners / order.
:param zerophase: If True, apply filter once forwards and once backwards.
This results in twice the number of corners but zero phase shift in
the resulting filtered trace.
:return: Filtered data.
"""
fe = 0.5 * df
f = freq / fe
# raise for some bad scenarios
if f > 1:
f = 1.0
msg = "Selected corner frequency is above Nyquist. " + \
"Setting Nyquist as high corner."
warnings.warn(msg)
z, p, k = iirfilter(corners, f, btype='lowpass', ftype='butter',
output='zpk')
sos = zpk2sos(z, p, k)
if zerophase:
firstpass = sosfilt(sos, data)
return sosfilt(sos, firstpass[::-1])[::-1]
else:
return sosfilt(sos, data) | [
"def",
"lowpass",
"(",
"data",
",",
"freq",
",",
"df",
",",
"corners",
"=",
"4",
",",
"zerophase",
"=",
"False",
")",
":",
"fe",
"=",
"0.5",
"*",
"df",
"f",
"=",
"freq",
"/",
"fe",
"# raise for some bad scenarios",
"if",
"f",
">",
"1",
":",
"f",
"=",
"1.0",
"msg",
"=",
"\"Selected corner frequency is above Nyquist. \"",
"+",
"\"Setting Nyquist as high corner.\"",
"warnings",
".",
"warn",
"(",
"msg",
")",
"z",
",",
"p",
",",
"k",
"=",
"iirfilter",
"(",
"corners",
",",
"f",
",",
"btype",
"=",
"'lowpass'",
",",
"ftype",
"=",
"'butter'",
",",
"output",
"=",
"'zpk'",
")",
"sos",
"=",
"zpk2sos",
"(",
"z",
",",
"p",
",",
"k",
")",
"if",
"zerophase",
":",
"firstpass",
"=",
"sosfilt",
"(",
"sos",
",",
"data",
")",
"return",
"sosfilt",
"(",
"sos",
",",
"firstpass",
"[",
":",
":",
"-",
"1",
"]",
")",
"[",
":",
":",
"-",
"1",
"]",
"else",
":",
"return",
"sosfilt",
"(",
"sos",
",",
"data",
")"
] | Butterworth-Lowpass Filter.
Filter data removing data over certain frequency ``freq`` using ``corners``
corners.
The filter uses :func:`scipy.signal.iirfilter` (for design)
and :func:`scipy.signal.sosfilt` (for applying the filter).
:type data: numpy.ndarray
:param data: Data to filter.
:param freq: Filter corner frequency.
:param df: Sampling rate in Hz.
:param corners: Filter corners / order.
:param zerophase: If True, apply filter once forwards and once backwards.
This results in twice the number of corners but zero phase shift in
the resulting filtered trace.
:return: Filtered data. | [
"Butterworth",
"-",
"Lowpass",
"Filter",
"."
] | edb67b5098b2e7923d55010ded59ad1bf75c0f18 | https://github.com/Neurosim-lab/netpyne/blob/edb67b5098b2e7923d55010ded59ad1bf75c0f18/netpyne/support/filter.py#L131-L165 |
4,738 | Neurosim-lab/netpyne | netpyne/support/filter.py | integer_decimation | def integer_decimation(data, decimation_factor):
"""
Downsampling by applying a simple integer decimation.
Make sure that no signal is present in frequency bands above the new
Nyquist frequency (samp_rate/2/decimation_factor), e.g. by applying a
lowpass filter beforehand!
New sampling rate is old sampling rate divided by decimation_factor.
:type data: numpy.ndarray
:param data: Data to filter.
:param decimation_factor: Integer decimation factor
:return: Downsampled data (array length: old length / decimation_factor)
"""
if not isinstance(decimation_factor, int):
msg = "Decimation_factor must be an integer!"
raise TypeError(msg)
# reshape and only use every decimation_factor-th sample
data = np.array(data[::decimation_factor])
return data | python | def integer_decimation(data, decimation_factor):
"""
Downsampling by applying a simple integer decimation.
Make sure that no signal is present in frequency bands above the new
Nyquist frequency (samp_rate/2/decimation_factor), e.g. by applying a
lowpass filter beforehand!
New sampling rate is old sampling rate divided by decimation_factor.
:type data: numpy.ndarray
:param data: Data to filter.
:param decimation_factor: Integer decimation factor
:return: Downsampled data (array length: old length / decimation_factor)
"""
if not isinstance(decimation_factor, int):
msg = "Decimation_factor must be an integer!"
raise TypeError(msg)
# reshape and only use every decimation_factor-th sample
data = np.array(data[::decimation_factor])
return data | [
"def",
"integer_decimation",
"(",
"data",
",",
"decimation_factor",
")",
":",
"if",
"not",
"isinstance",
"(",
"decimation_factor",
",",
"int",
")",
":",
"msg",
"=",
"\"Decimation_factor must be an integer!\"",
"raise",
"TypeError",
"(",
"msg",
")",
"# reshape and only use every decimation_factor-th sample",
"data",
"=",
"np",
".",
"array",
"(",
"data",
"[",
":",
":",
"decimation_factor",
"]",
")",
"return",
"data"
] | Downsampling by applying a simple integer decimation.
Make sure that no signal is present in frequency bands above the new
Nyquist frequency (samp_rate/2/decimation_factor), e.g. by applying a
lowpass filter beforehand!
New sampling rate is old sampling rate divided by decimation_factor.
:type data: numpy.ndarray
:param data: Data to filter.
:param decimation_factor: Integer decimation factor
:return: Downsampled data (array length: old length / decimation_factor) | [
"Downsampling",
"by",
"applying",
"a",
"simple",
"integer",
"decimation",
"."
] | edb67b5098b2e7923d55010ded59ad1bf75c0f18 | https://github.com/Neurosim-lab/netpyne/blob/edb67b5098b2e7923d55010ded59ad1bf75c0f18/netpyne/support/filter.py#L336-L356 |
4,739 | Neurosim-lab/netpyne | netpyne/conversion/sonataImport.py | _distributeCells | def _distributeCells(numCellsPop):
''' distribute cells across compute nodes using round-robin'''
from .. import sim
hostCells = {}
for i in range(sim.nhosts):
hostCells[i] = []
for i in range(numCellsPop):
hostCells[sim.nextHost].append(i)
sim.nextHost+=1
if sim.nextHost>=sim.nhosts:
sim.nextHost=0
if sim.cfg.verbose:
print(("Distributed population of %i cells on %s hosts: %s, next: %s"%(numCellsPop,sim.nhosts,hostCells,sim.nextHost)))
return hostCells | python | def _distributeCells(numCellsPop):
''' distribute cells across compute nodes using round-robin'''
from .. import sim
hostCells = {}
for i in range(sim.nhosts):
hostCells[i] = []
for i in range(numCellsPop):
hostCells[sim.nextHost].append(i)
sim.nextHost+=1
if sim.nextHost>=sim.nhosts:
sim.nextHost=0
if sim.cfg.verbose:
print(("Distributed population of %i cells on %s hosts: %s, next: %s"%(numCellsPop,sim.nhosts,hostCells,sim.nextHost)))
return hostCells | [
"def",
"_distributeCells",
"(",
"numCellsPop",
")",
":",
"from",
".",
".",
"import",
"sim",
"hostCells",
"=",
"{",
"}",
"for",
"i",
"in",
"range",
"(",
"sim",
".",
"nhosts",
")",
":",
"hostCells",
"[",
"i",
"]",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"numCellsPop",
")",
":",
"hostCells",
"[",
"sim",
".",
"nextHost",
"]",
".",
"append",
"(",
"i",
")",
"sim",
".",
"nextHost",
"+=",
"1",
"if",
"sim",
".",
"nextHost",
">=",
"sim",
".",
"nhosts",
":",
"sim",
".",
"nextHost",
"=",
"0",
"if",
"sim",
".",
"cfg",
".",
"verbose",
":",
"print",
"(",
"(",
"\"Distributed population of %i cells on %s hosts: %s, next: %s\"",
"%",
"(",
"numCellsPop",
",",
"sim",
".",
"nhosts",
",",
"hostCells",
",",
"sim",
".",
"nextHost",
")",
")",
")",
"return",
"hostCells"
] | distribute cells across compute nodes using round-robin | [
"distribute",
"cells",
"across",
"compute",
"nodes",
"using",
"round",
"-",
"robin"
] | edb67b5098b2e7923d55010ded59ad1bf75c0f18 | https://github.com/Neurosim-lab/netpyne/blob/edb67b5098b2e7923d55010ded59ad1bf75c0f18/netpyne/conversion/sonataImport.py#L82-L99 |
4,740 | Neurosim-lab/netpyne | netpyne/support/csd.py | getCSD | def getCSD (lfps,sampr,minf=0.05,maxf=300,norm=True,vaknin=False,spacing=1.0):
"""
get current source density approximation using set of local field potentials with equidistant spacing
first performs a lowpass filter
lfps is a list or numpy array of LFPs arranged spatially by column
spacing is in microns
"""
datband = getbandpass(lfps,sampr,minf,maxf)
if datband.shape[0] > datband.shape[1]: # take CSD along smaller dimension
ax = 1
else:
ax = 0
# can change default to run Vaknin on bandpass filtered LFPs before calculating CSD, that
# way would have same number of channels in CSD and LFP (but not critical, and would take more RAM);
if vaknin: datband = Vaknin(datband)
if norm: removemean(datband,ax=ax)
# NB: when drawing CSD make sure that negative values (depolarizing intracellular current) drawn in red,
# and positive values (hyperpolarizing intracellular current) drawn in blue
CSD = -numpy.diff(datband,n=2,axis=ax) / spacing**2 # now each column (or row) is an electrode -- CSD along electrodes
return CSD | python | def getCSD (lfps,sampr,minf=0.05,maxf=300,norm=True,vaknin=False,spacing=1.0):
"""
get current source density approximation using set of local field potentials with equidistant spacing
first performs a lowpass filter
lfps is a list or numpy array of LFPs arranged spatially by column
spacing is in microns
"""
datband = getbandpass(lfps,sampr,minf,maxf)
if datband.shape[0] > datband.shape[1]: # take CSD along smaller dimension
ax = 1
else:
ax = 0
# can change default to run Vaknin on bandpass filtered LFPs before calculating CSD, that
# way would have same number of channels in CSD and LFP (but not critical, and would take more RAM);
if vaknin: datband = Vaknin(datband)
if norm: removemean(datband,ax=ax)
# NB: when drawing CSD make sure that negative values (depolarizing intracellular current) drawn in red,
# and positive values (hyperpolarizing intracellular current) drawn in blue
CSD = -numpy.diff(datband,n=2,axis=ax) / spacing**2 # now each column (or row) is an electrode -- CSD along electrodes
return CSD | [
"def",
"getCSD",
"(",
"lfps",
",",
"sampr",
",",
"minf",
"=",
"0.05",
",",
"maxf",
"=",
"300",
",",
"norm",
"=",
"True",
",",
"vaknin",
"=",
"False",
",",
"spacing",
"=",
"1.0",
")",
":",
"datband",
"=",
"getbandpass",
"(",
"lfps",
",",
"sampr",
",",
"minf",
",",
"maxf",
")",
"if",
"datband",
".",
"shape",
"[",
"0",
"]",
">",
"datband",
".",
"shape",
"[",
"1",
"]",
":",
"# take CSD along smaller dimension",
"ax",
"=",
"1",
"else",
":",
"ax",
"=",
"0",
"# can change default to run Vaknin on bandpass filtered LFPs before calculating CSD, that",
"# way would have same number of channels in CSD and LFP (but not critical, and would take more RAM);",
"if",
"vaknin",
":",
"datband",
"=",
"Vaknin",
"(",
"datband",
")",
"if",
"norm",
":",
"removemean",
"(",
"datband",
",",
"ax",
"=",
"ax",
")",
"# NB: when drawing CSD make sure that negative values (depolarizing intracellular current) drawn in red,",
"# and positive values (hyperpolarizing intracellular current) drawn in blue",
"CSD",
"=",
"-",
"numpy",
".",
"diff",
"(",
"datband",
",",
"n",
"=",
"2",
",",
"axis",
"=",
"ax",
")",
"/",
"spacing",
"**",
"2",
"# now each column (or row) is an electrode -- CSD along electrodes",
"return",
"CSD"
] | get current source density approximation using set of local field potentials with equidistant spacing
first performs a lowpass filter
lfps is a list or numpy array of LFPs arranged spatially by column
spacing is in microns | [
"get",
"current",
"source",
"density",
"approximation",
"using",
"set",
"of",
"local",
"field",
"potentials",
"with",
"equidistant",
"spacing",
"first",
"performs",
"a",
"lowpass",
"filter",
"lfps",
"is",
"a",
"list",
"or",
"numpy",
"array",
"of",
"LFPs",
"arranged",
"spatially",
"by",
"column",
"spacing",
"is",
"in",
"microns"
] | edb67b5098b2e7923d55010ded59ad1bf75c0f18 | https://github.com/Neurosim-lab/netpyne/blob/edb67b5098b2e7923d55010ded59ad1bf75c0f18/netpyne/support/csd.py#L35-L54 |
4,741 | Neurosim-lab/netpyne | doc/source/code/HHCellFile.py | Cell.createSynapses | def createSynapses(self):
"""Add an exponentially decaying synapse """
synsoma = h.ExpSyn(self.soma(0.5))
synsoma.tau = 2
synsoma.e = 0
syndend = h.ExpSyn(self.dend(0.5))
syndend.tau = 2
syndend.e = 0
self.synlist.append(synsoma) # synlist is defined in Cell
self.synlist.append(syndend) | python | def createSynapses(self):
"""Add an exponentially decaying synapse """
synsoma = h.ExpSyn(self.soma(0.5))
synsoma.tau = 2
synsoma.e = 0
syndend = h.ExpSyn(self.dend(0.5))
syndend.tau = 2
syndend.e = 0
self.synlist.append(synsoma) # synlist is defined in Cell
self.synlist.append(syndend) | [
"def",
"createSynapses",
"(",
"self",
")",
":",
"synsoma",
"=",
"h",
".",
"ExpSyn",
"(",
"self",
".",
"soma",
"(",
"0.5",
")",
")",
"synsoma",
".",
"tau",
"=",
"2",
"synsoma",
".",
"e",
"=",
"0",
"syndend",
"=",
"h",
".",
"ExpSyn",
"(",
"self",
".",
"dend",
"(",
"0.5",
")",
")",
"syndend",
".",
"tau",
"=",
"2",
"syndend",
".",
"e",
"=",
"0",
"self",
".",
"synlist",
".",
"append",
"(",
"synsoma",
")",
"# synlist is defined in Cell",
"self",
".",
"synlist",
".",
"append",
"(",
"syndend",
")"
] | Add an exponentially decaying synapse | [
"Add",
"an",
"exponentially",
"decaying",
"synapse"
] | edb67b5098b2e7923d55010ded59ad1bf75c0f18 | https://github.com/Neurosim-lab/netpyne/blob/edb67b5098b2e7923d55010ded59ad1bf75c0f18/doc/source/code/HHCellFile.py#L30-L39 |
4,742 | Neurosim-lab/netpyne | doc/source/code/HHCellFile.py | Cell.createNetcon | def createNetcon(self, thresh=10):
""" created netcon to record spikes """
nc = h.NetCon(self.soma(0.5)._ref_v, None, sec = self.soma)
nc.threshold = thresh
return nc | python | def createNetcon(self, thresh=10):
""" created netcon to record spikes """
nc = h.NetCon(self.soma(0.5)._ref_v, None, sec = self.soma)
nc.threshold = thresh
return nc | [
"def",
"createNetcon",
"(",
"self",
",",
"thresh",
"=",
"10",
")",
":",
"nc",
"=",
"h",
".",
"NetCon",
"(",
"self",
".",
"soma",
"(",
"0.5",
")",
".",
"_ref_v",
",",
"None",
",",
"sec",
"=",
"self",
".",
"soma",
")",
"nc",
".",
"threshold",
"=",
"thresh",
"return",
"nc"
] | created netcon to record spikes | [
"created",
"netcon",
"to",
"record",
"spikes"
] | edb67b5098b2e7923d55010ded59ad1bf75c0f18 | https://github.com/Neurosim-lab/netpyne/blob/edb67b5098b2e7923d55010ded59ad1bf75c0f18/doc/source/code/HHCellFile.py#L42-L46 |
4,743 | Neurosim-lab/netpyne | doc/source/code/HHCellFile.py | HHCellClass.createSections | def createSections(self):
"""Create the sections of the cell."""
self.soma = h.Section(name='soma', cell=self)
self.dend = h.Section(name='dend', cell=self) | python | def createSections(self):
"""Create the sections of the cell."""
self.soma = h.Section(name='soma', cell=self)
self.dend = h.Section(name='dend', cell=self) | [
"def",
"createSections",
"(",
"self",
")",
":",
"self",
".",
"soma",
"=",
"h",
".",
"Section",
"(",
"name",
"=",
"'soma'",
",",
"cell",
"=",
"self",
")",
"self",
".",
"dend",
"=",
"h",
".",
"Section",
"(",
"name",
"=",
"'dend'",
",",
"cell",
"=",
"self",
")"
] | Create the sections of the cell. | [
"Create",
"the",
"sections",
"of",
"the",
"cell",
"."
] | edb67b5098b2e7923d55010ded59ad1bf75c0f18 | https://github.com/Neurosim-lab/netpyne/blob/edb67b5098b2e7923d55010ded59ad1bf75c0f18/doc/source/code/HHCellFile.py#L51-L54 |
4,744 | Neurosim-lab/netpyne | doc/source/code/HHCellFile.py | HHCellClass.defineGeometry | def defineGeometry(self):
"""Set the 3D geometry of the cell."""
self.soma.L = 18.8
self.soma.diam = 18.8
self.soma.Ra = 123.0
self.dend.L = 200.0
self.dend.diam = 1.0
self.dend.Ra = 100.0 | python | def defineGeometry(self):
"""Set the 3D geometry of the cell."""
self.soma.L = 18.8
self.soma.diam = 18.8
self.soma.Ra = 123.0
self.dend.L = 200.0
self.dend.diam = 1.0
self.dend.Ra = 100.0 | [
"def",
"defineGeometry",
"(",
"self",
")",
":",
"self",
".",
"soma",
".",
"L",
"=",
"18.8",
"self",
".",
"soma",
".",
"diam",
"=",
"18.8",
"self",
".",
"soma",
".",
"Ra",
"=",
"123.0",
"self",
".",
"dend",
".",
"L",
"=",
"200.0",
"self",
".",
"dend",
".",
"diam",
"=",
"1.0",
"self",
".",
"dend",
".",
"Ra",
"=",
"100.0"
] | Set the 3D geometry of the cell. | [
"Set",
"the",
"3D",
"geometry",
"of",
"the",
"cell",
"."
] | edb67b5098b2e7923d55010ded59ad1bf75c0f18 | https://github.com/Neurosim-lab/netpyne/blob/edb67b5098b2e7923d55010ded59ad1bf75c0f18/doc/source/code/HHCellFile.py#L56-L64 |
4,745 | Neurosim-lab/netpyne | doc/source/code/HHCellFile.py | HHCellClass.defineBiophysics | def defineBiophysics(self):
"""Assign the membrane properties across the cell."""
# Insert active Hodgkin-Huxley current in the soma
self.soma.insert('hh')
self.soma.gnabar_hh = 0.12 # Sodium conductance in S/cm2
self.soma.gkbar_hh = 0.036 # Potassium conductance in S/cm2
self.soma.gl_hh = 0.003 # Leak conductance in S/cm2
self.soma.el_hh = -70 # Reversal potential in mV
self.dend.insert('pas')
self.dend.g_pas = 0.001 # Passive conductance in S/cm2
self.dend.e_pas = -65 # Leak reversal potential mV
self.dend.nseg = 1000 | python | def defineBiophysics(self):
"""Assign the membrane properties across the cell."""
# Insert active Hodgkin-Huxley current in the soma
self.soma.insert('hh')
self.soma.gnabar_hh = 0.12 # Sodium conductance in S/cm2
self.soma.gkbar_hh = 0.036 # Potassium conductance in S/cm2
self.soma.gl_hh = 0.003 # Leak conductance in S/cm2
self.soma.el_hh = -70 # Reversal potential in mV
self.dend.insert('pas')
self.dend.g_pas = 0.001 # Passive conductance in S/cm2
self.dend.e_pas = -65 # Leak reversal potential mV
self.dend.nseg = 1000 | [
"def",
"defineBiophysics",
"(",
"self",
")",
":",
"# Insert active Hodgkin-Huxley current in the soma",
"self",
".",
"soma",
".",
"insert",
"(",
"'hh'",
")",
"self",
".",
"soma",
".",
"gnabar_hh",
"=",
"0.12",
"# Sodium conductance in S/cm2",
"self",
".",
"soma",
".",
"gkbar_hh",
"=",
"0.036",
"# Potassium conductance in S/cm2",
"self",
".",
"soma",
".",
"gl_hh",
"=",
"0.003",
"# Leak conductance in S/cm2",
"self",
".",
"soma",
".",
"el_hh",
"=",
"-",
"70",
"# Reversal potential in mV",
"self",
".",
"dend",
".",
"insert",
"(",
"'pas'",
")",
"self",
".",
"dend",
".",
"g_pas",
"=",
"0.001",
"# Passive conductance in S/cm2",
"self",
".",
"dend",
".",
"e_pas",
"=",
"-",
"65",
"# Leak reversal potential mV",
"self",
".",
"dend",
".",
"nseg",
"=",
"1000"
] | Assign the membrane properties across the cell. | [
"Assign",
"the",
"membrane",
"properties",
"across",
"the",
"cell",
"."
] | edb67b5098b2e7923d55010ded59ad1bf75c0f18 | https://github.com/Neurosim-lab/netpyne/blob/edb67b5098b2e7923d55010ded59ad1bf75c0f18/doc/source/code/HHCellFile.py#L66-L78 |
4,746 | Neurosim-lab/netpyne | netpyne/support/morphology.py | shapeplot | def shapeplot(h,ax,sections=None,order='pre',cvals=None,\
clim=None,cmap=cm.YlOrBr_r, legend=True, **kwargs): # meanLineWidth=1.0, maxLineWidth=10.0,
"""
Plots a 3D shapeplot
Args:
h = hocObject to interface with neuron
ax = matplotlib axis for plotting
sections = list of h.Section() objects to be plotted
order = { None= use h.allsec() to get sections
'pre'= pre-order traversal of morphology }
cvals = list/array with values mapped to color by cmap; useful
for displaying voltage, calcium or some other state
variable across the shapeplot.
**kwargs passes on to matplotlib (e.g. color='r' for red lines)
Returns:
lines = list of line objects making up shapeplot
"""
# Default is to plot all sections.
if sections is None:
if order == 'pre':
sections = allsec_preorder(h) # Get sections in "pre-order"
else:
sections = list(h.allsec())
# Determine color limits
if cvals is not None and clim is None:
clim = [np.nanmin(cvals), np.nanmax(cvals)]
# Plot each segement as a line
lines = []
i = 0
allDiams = []
for sec in sections:
allDiams.append(get_section_diams(h,sec))
#maxDiams = max([max(d) for d in allDiams])
#meanDiams = np.mean([np.mean(d) for d in allDiams])
for isec,sec in enumerate(sections):
xyz = get_section_path(h,sec)
seg_paths = interpolate_jagged(xyz,sec.nseg)
diams = allDiams[isec] # represent diams as linewidths
linewidths = diams # linewidth is in points so can use actual diams to plot
# linewidths = [min(d/meanDiams*meanLineWidth, maxLineWidth) for d in diams] # use if want to scale size
for (j,path) in enumerate(seg_paths):
line, = plt.plot(path[:,0], path[:,1], path[:,2], '-k', **kwargs)
try:
line.set_linewidth(linewidths[j])
except:
pass
if cvals is not None:
if isinstance(cvals[i], numbers.Number):
# map number to colormap
try:
col = cmap(int((cvals[i]-clim[0])*255/(clim[1]-clim[0])))
except:
col = cmap(0)
else:
# use input directly. E.g. if user specified color with a string.
col = cvals[i]
line.set_color(col)
lines.append(line)
i += 1
return lines | python | def shapeplot(h,ax,sections=None,order='pre',cvals=None,\
clim=None,cmap=cm.YlOrBr_r, legend=True, **kwargs): # meanLineWidth=1.0, maxLineWidth=10.0,
"""
Plots a 3D shapeplot
Args:
h = hocObject to interface with neuron
ax = matplotlib axis for plotting
sections = list of h.Section() objects to be plotted
order = { None= use h.allsec() to get sections
'pre'= pre-order traversal of morphology }
cvals = list/array with values mapped to color by cmap; useful
for displaying voltage, calcium or some other state
variable across the shapeplot.
**kwargs passes on to matplotlib (e.g. color='r' for red lines)
Returns:
lines = list of line objects making up shapeplot
"""
# Default is to plot all sections.
if sections is None:
if order == 'pre':
sections = allsec_preorder(h) # Get sections in "pre-order"
else:
sections = list(h.allsec())
# Determine color limits
if cvals is not None and clim is None:
clim = [np.nanmin(cvals), np.nanmax(cvals)]
# Plot each segement as a line
lines = []
i = 0
allDiams = []
for sec in sections:
allDiams.append(get_section_diams(h,sec))
#maxDiams = max([max(d) for d in allDiams])
#meanDiams = np.mean([np.mean(d) for d in allDiams])
for isec,sec in enumerate(sections):
xyz = get_section_path(h,sec)
seg_paths = interpolate_jagged(xyz,sec.nseg)
diams = allDiams[isec] # represent diams as linewidths
linewidths = diams # linewidth is in points so can use actual diams to plot
# linewidths = [min(d/meanDiams*meanLineWidth, maxLineWidth) for d in diams] # use if want to scale size
for (j,path) in enumerate(seg_paths):
line, = plt.plot(path[:,0], path[:,1], path[:,2], '-k', **kwargs)
try:
line.set_linewidth(linewidths[j])
except:
pass
if cvals is not None:
if isinstance(cvals[i], numbers.Number):
# map number to colormap
try:
col = cmap(int((cvals[i]-clim[0])*255/(clim[1]-clim[0])))
except:
col = cmap(0)
else:
# use input directly. E.g. if user specified color with a string.
col = cvals[i]
line.set_color(col)
lines.append(line)
i += 1
return lines | [
"def",
"shapeplot",
"(",
"h",
",",
"ax",
",",
"sections",
"=",
"None",
",",
"order",
"=",
"'pre'",
",",
"cvals",
"=",
"None",
",",
"clim",
"=",
"None",
",",
"cmap",
"=",
"cm",
".",
"YlOrBr_r",
",",
"legend",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"# meanLineWidth=1.0, maxLineWidth=10.0,",
"# Default is to plot all sections. ",
"if",
"sections",
"is",
"None",
":",
"if",
"order",
"==",
"'pre'",
":",
"sections",
"=",
"allsec_preorder",
"(",
"h",
")",
"# Get sections in \"pre-order\"",
"else",
":",
"sections",
"=",
"list",
"(",
"h",
".",
"allsec",
"(",
")",
")",
"# Determine color limits",
"if",
"cvals",
"is",
"not",
"None",
"and",
"clim",
"is",
"None",
":",
"clim",
"=",
"[",
"np",
".",
"nanmin",
"(",
"cvals",
")",
",",
"np",
".",
"nanmax",
"(",
"cvals",
")",
"]",
"# Plot each segement as a line",
"lines",
"=",
"[",
"]",
"i",
"=",
"0",
"allDiams",
"=",
"[",
"]",
"for",
"sec",
"in",
"sections",
":",
"allDiams",
".",
"append",
"(",
"get_section_diams",
"(",
"h",
",",
"sec",
")",
")",
"#maxDiams = max([max(d) for d in allDiams])",
"#meanDiams = np.mean([np.mean(d) for d in allDiams])",
"for",
"isec",
",",
"sec",
"in",
"enumerate",
"(",
"sections",
")",
":",
"xyz",
"=",
"get_section_path",
"(",
"h",
",",
"sec",
")",
"seg_paths",
"=",
"interpolate_jagged",
"(",
"xyz",
",",
"sec",
".",
"nseg",
")",
"diams",
"=",
"allDiams",
"[",
"isec",
"]",
"# represent diams as linewidths",
"linewidths",
"=",
"diams",
"# linewidth is in points so can use actual diams to plot",
"# linewidths = [min(d/meanDiams*meanLineWidth, maxLineWidth) for d in diams] # use if want to scale size ",
"for",
"(",
"j",
",",
"path",
")",
"in",
"enumerate",
"(",
"seg_paths",
")",
":",
"line",
",",
"=",
"plt",
".",
"plot",
"(",
"path",
"[",
":",
",",
"0",
"]",
",",
"path",
"[",
":",
",",
"1",
"]",
",",
"path",
"[",
":",
",",
"2",
"]",
",",
"'-k'",
",",
"*",
"*",
"kwargs",
")",
"try",
":",
"line",
".",
"set_linewidth",
"(",
"linewidths",
"[",
"j",
"]",
")",
"except",
":",
"pass",
"if",
"cvals",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"cvals",
"[",
"i",
"]",
",",
"numbers",
".",
"Number",
")",
":",
"# map number to colormap",
"try",
":",
"col",
"=",
"cmap",
"(",
"int",
"(",
"(",
"cvals",
"[",
"i",
"]",
"-",
"clim",
"[",
"0",
"]",
")",
"*",
"255",
"/",
"(",
"clim",
"[",
"1",
"]",
"-",
"clim",
"[",
"0",
"]",
")",
")",
")",
"except",
":",
"col",
"=",
"cmap",
"(",
"0",
")",
"else",
":",
"# use input directly. E.g. if user specified color with a string.",
"col",
"=",
"cvals",
"[",
"i",
"]",
"line",
".",
"set_color",
"(",
"col",
")",
"lines",
".",
"append",
"(",
"line",
")",
"i",
"+=",
"1",
"return",
"lines"
] | Plots a 3D shapeplot
Args:
h = hocObject to interface with neuron
ax = matplotlib axis for plotting
sections = list of h.Section() objects to be plotted
order = { None= use h.allsec() to get sections
'pre'= pre-order traversal of morphology }
cvals = list/array with values mapped to color by cmap; useful
for displaying voltage, calcium or some other state
variable across the shapeplot.
**kwargs passes on to matplotlib (e.g. color='r' for red lines)
Returns:
lines = list of line objects making up shapeplot | [
"Plots",
"a",
"3D",
"shapeplot"
] | edb67b5098b2e7923d55010ded59ad1bf75c0f18 | https://github.com/Neurosim-lab/netpyne/blob/edb67b5098b2e7923d55010ded59ad1bf75c0f18/netpyne/support/morphology.py#L279-L346 |
4,747 | Neurosim-lab/netpyne | netpyne/support/morphology.py | shapeplot_animate | def shapeplot_animate(v,lines,nframes=None,tscale='linear',\
clim=[-80,50],cmap=cm.YlOrBr_r):
""" Returns animate function which updates color of shapeplot """
if nframes is None:
nframes = v.shape[0]
if tscale == 'linear':
def animate(i):
i_t = int((i/nframes)*v.shape[0])
for i_seg in range(v.shape[1]):
lines[i_seg].set_color(cmap(int((v[i_t,i_seg]-clim[0])*255/(clim[1]-clim[0]))))
return []
elif tscale == 'log':
def animate(i):
i_t = int(np.round((v.shape[0] ** (1.0/(nframes-1))) ** i - 1))
for i_seg in range(v.shape[1]):
lines[i_seg].set_color(cmap(int((v[i_t,i_seg]-clim[0])*255/(clim[1]-clim[0]))))
return []
else:
raise ValueError("Unrecognized option '%s' for tscale" % tscale)
return animate | python | def shapeplot_animate(v,lines,nframes=None,tscale='linear',\
clim=[-80,50],cmap=cm.YlOrBr_r):
""" Returns animate function which updates color of shapeplot """
if nframes is None:
nframes = v.shape[0]
if tscale == 'linear':
def animate(i):
i_t = int((i/nframes)*v.shape[0])
for i_seg in range(v.shape[1]):
lines[i_seg].set_color(cmap(int((v[i_t,i_seg]-clim[0])*255/(clim[1]-clim[0]))))
return []
elif tscale == 'log':
def animate(i):
i_t = int(np.round((v.shape[0] ** (1.0/(nframes-1))) ** i - 1))
for i_seg in range(v.shape[1]):
lines[i_seg].set_color(cmap(int((v[i_t,i_seg]-clim[0])*255/(clim[1]-clim[0]))))
return []
else:
raise ValueError("Unrecognized option '%s' for tscale" % tscale)
return animate | [
"def",
"shapeplot_animate",
"(",
"v",
",",
"lines",
",",
"nframes",
"=",
"None",
",",
"tscale",
"=",
"'linear'",
",",
"clim",
"=",
"[",
"-",
"80",
",",
"50",
"]",
",",
"cmap",
"=",
"cm",
".",
"YlOrBr_r",
")",
":",
"if",
"nframes",
"is",
"None",
":",
"nframes",
"=",
"v",
".",
"shape",
"[",
"0",
"]",
"if",
"tscale",
"==",
"'linear'",
":",
"def",
"animate",
"(",
"i",
")",
":",
"i_t",
"=",
"int",
"(",
"(",
"i",
"/",
"nframes",
")",
"*",
"v",
".",
"shape",
"[",
"0",
"]",
")",
"for",
"i_seg",
"in",
"range",
"(",
"v",
".",
"shape",
"[",
"1",
"]",
")",
":",
"lines",
"[",
"i_seg",
"]",
".",
"set_color",
"(",
"cmap",
"(",
"int",
"(",
"(",
"v",
"[",
"i_t",
",",
"i_seg",
"]",
"-",
"clim",
"[",
"0",
"]",
")",
"*",
"255",
"/",
"(",
"clim",
"[",
"1",
"]",
"-",
"clim",
"[",
"0",
"]",
")",
")",
")",
")",
"return",
"[",
"]",
"elif",
"tscale",
"==",
"'log'",
":",
"def",
"animate",
"(",
"i",
")",
":",
"i_t",
"=",
"int",
"(",
"np",
".",
"round",
"(",
"(",
"v",
".",
"shape",
"[",
"0",
"]",
"**",
"(",
"1.0",
"/",
"(",
"nframes",
"-",
"1",
")",
")",
")",
"**",
"i",
"-",
"1",
")",
")",
"for",
"i_seg",
"in",
"range",
"(",
"v",
".",
"shape",
"[",
"1",
"]",
")",
":",
"lines",
"[",
"i_seg",
"]",
".",
"set_color",
"(",
"cmap",
"(",
"int",
"(",
"(",
"v",
"[",
"i_t",
",",
"i_seg",
"]",
"-",
"clim",
"[",
"0",
"]",
")",
"*",
"255",
"/",
"(",
"clim",
"[",
"1",
"]",
"-",
"clim",
"[",
"0",
"]",
")",
")",
")",
")",
"return",
"[",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unrecognized option '%s' for tscale\"",
"%",
"tscale",
")",
"return",
"animate"
] | Returns animate function which updates color of shapeplot | [
"Returns",
"animate",
"function",
"which",
"updates",
"color",
"of",
"shapeplot"
] | edb67b5098b2e7923d55010ded59ad1bf75c0f18 | https://github.com/Neurosim-lab/netpyne/blob/edb67b5098b2e7923d55010ded59ad1bf75c0f18/netpyne/support/morphology.py#L348-L368 |
4,748 | Neurosim-lab/netpyne | netpyne/support/morphology.py | mark_locations | def mark_locations(h,section,locs,markspec='or',**kwargs):
"""
Marks one or more locations on along a section. Could be used to
mark the location of a recording or electrical stimulation.
Args:
h = hocObject to interface with neuron
section = reference to section
locs = float between 0 and 1, or array of floats
optional arguments specify details of marker
Returns:
line = reference to plotted markers
"""
# get list of cartesian coordinates specifying section path
xyz = get_section_path(h,section)
(r,theta,phi) = sequential_spherical(xyz)
rcum = np.append(0,np.cumsum(r))
# convert locs into lengths from the beginning of the path
if type(locs) is float or type(locs) is np.float64:
locs = np.array([locs])
if type(locs) is list:
locs = np.array(locs)
lengths = locs*rcum[-1]
# find cartesian coordinates for markers
xyz_marks = []
for targ_length in lengths:
xyz_marks.append(find_coord(targ_length,xyz,rcum,theta,phi))
xyz_marks = np.array(xyz_marks)
# plot markers
line, = plt.plot(xyz_marks[:,0], xyz_marks[:,1], \
xyz_marks[:,2], markspec, **kwargs)
return line | python | def mark_locations(h,section,locs,markspec='or',**kwargs):
"""
Marks one or more locations on along a section. Could be used to
mark the location of a recording or electrical stimulation.
Args:
h = hocObject to interface with neuron
section = reference to section
locs = float between 0 and 1, or array of floats
optional arguments specify details of marker
Returns:
line = reference to plotted markers
"""
# get list of cartesian coordinates specifying section path
xyz = get_section_path(h,section)
(r,theta,phi) = sequential_spherical(xyz)
rcum = np.append(0,np.cumsum(r))
# convert locs into lengths from the beginning of the path
if type(locs) is float or type(locs) is np.float64:
locs = np.array([locs])
if type(locs) is list:
locs = np.array(locs)
lengths = locs*rcum[-1]
# find cartesian coordinates for markers
xyz_marks = []
for targ_length in lengths:
xyz_marks.append(find_coord(targ_length,xyz,rcum,theta,phi))
xyz_marks = np.array(xyz_marks)
# plot markers
line, = plt.plot(xyz_marks[:,0], xyz_marks[:,1], \
xyz_marks[:,2], markspec, **kwargs)
return line | [
"def",
"mark_locations",
"(",
"h",
",",
"section",
",",
"locs",
",",
"markspec",
"=",
"'or'",
",",
"*",
"*",
"kwargs",
")",
":",
"# get list of cartesian coordinates specifying section path",
"xyz",
"=",
"get_section_path",
"(",
"h",
",",
"section",
")",
"(",
"r",
",",
"theta",
",",
"phi",
")",
"=",
"sequential_spherical",
"(",
"xyz",
")",
"rcum",
"=",
"np",
".",
"append",
"(",
"0",
",",
"np",
".",
"cumsum",
"(",
"r",
")",
")",
"# convert locs into lengths from the beginning of the path",
"if",
"type",
"(",
"locs",
")",
"is",
"float",
"or",
"type",
"(",
"locs",
")",
"is",
"np",
".",
"float64",
":",
"locs",
"=",
"np",
".",
"array",
"(",
"[",
"locs",
"]",
")",
"if",
"type",
"(",
"locs",
")",
"is",
"list",
":",
"locs",
"=",
"np",
".",
"array",
"(",
"locs",
")",
"lengths",
"=",
"locs",
"*",
"rcum",
"[",
"-",
"1",
"]",
"# find cartesian coordinates for markers",
"xyz_marks",
"=",
"[",
"]",
"for",
"targ_length",
"in",
"lengths",
":",
"xyz_marks",
".",
"append",
"(",
"find_coord",
"(",
"targ_length",
",",
"xyz",
",",
"rcum",
",",
"theta",
",",
"phi",
")",
")",
"xyz_marks",
"=",
"np",
".",
"array",
"(",
"xyz_marks",
")",
"# plot markers",
"line",
",",
"=",
"plt",
".",
"plot",
"(",
"xyz_marks",
"[",
":",
",",
"0",
"]",
",",
"xyz_marks",
"[",
":",
",",
"1",
"]",
",",
"xyz_marks",
"[",
":",
",",
"2",
"]",
",",
"markspec",
",",
"*",
"*",
"kwargs",
")",
"return",
"line"
] | Marks one or more locations on along a section. Could be used to
mark the location of a recording or electrical stimulation.
Args:
h = hocObject to interface with neuron
section = reference to section
locs = float between 0 and 1, or array of floats
optional arguments specify details of marker
Returns:
line = reference to plotted markers | [
"Marks",
"one",
"or",
"more",
"locations",
"on",
"along",
"a",
"section",
".",
"Could",
"be",
"used",
"to",
"mark",
"the",
"location",
"of",
"a",
"recording",
"or",
"electrical",
"stimulation",
"."
] | edb67b5098b2e7923d55010ded59ad1bf75c0f18 | https://github.com/Neurosim-lab/netpyne/blob/edb67b5098b2e7923d55010ded59ad1bf75c0f18/netpyne/support/morphology.py#L370-L406 |
4,749 | Neurosim-lab/netpyne | netpyne/support/morphology.py | root_sections | def root_sections(h):
"""
Returns a list of all sections that have no parent.
"""
roots = []
for section in h.allsec():
sref = h.SectionRef(sec=section)
# has_parent returns a float... cast to bool
if sref.has_parent() < 0.9:
roots.append(section)
return roots | python | def root_sections(h):
"""
Returns a list of all sections that have no parent.
"""
roots = []
for section in h.allsec():
sref = h.SectionRef(sec=section)
# has_parent returns a float... cast to bool
if sref.has_parent() < 0.9:
roots.append(section)
return roots | [
"def",
"root_sections",
"(",
"h",
")",
":",
"roots",
"=",
"[",
"]",
"for",
"section",
"in",
"h",
".",
"allsec",
"(",
")",
":",
"sref",
"=",
"h",
".",
"SectionRef",
"(",
"sec",
"=",
"section",
")",
"# has_parent returns a float... cast to bool",
"if",
"sref",
".",
"has_parent",
"(",
")",
"<",
"0.9",
":",
"roots",
".",
"append",
"(",
"section",
")",
"return",
"roots"
] | Returns a list of all sections that have no parent. | [
"Returns",
"a",
"list",
"of",
"all",
"sections",
"that",
"have",
"no",
"parent",
"."
] | edb67b5098b2e7923d55010ded59ad1bf75c0f18 | https://github.com/Neurosim-lab/netpyne/blob/edb67b5098b2e7923d55010ded59ad1bf75c0f18/netpyne/support/morphology.py#L408-L418 |
4,750 | Neurosim-lab/netpyne | netpyne/support/morphology.py | leaf_sections | def leaf_sections(h):
"""
Returns a list of all sections that have no children.
"""
leaves = []
for section in h.allsec():
sref = h.SectionRef(sec=section)
# nchild returns a float... cast to bool
if sref.nchild() < 0.9:
leaves.append(section)
return leaves | python | def leaf_sections(h):
"""
Returns a list of all sections that have no children.
"""
leaves = []
for section in h.allsec():
sref = h.SectionRef(sec=section)
# nchild returns a float... cast to bool
if sref.nchild() < 0.9:
leaves.append(section)
return leaves | [
"def",
"leaf_sections",
"(",
"h",
")",
":",
"leaves",
"=",
"[",
"]",
"for",
"section",
"in",
"h",
".",
"allsec",
"(",
")",
":",
"sref",
"=",
"h",
".",
"SectionRef",
"(",
"sec",
"=",
"section",
")",
"# nchild returns a float... cast to bool",
"if",
"sref",
".",
"nchild",
"(",
")",
"<",
"0.9",
":",
"leaves",
".",
"append",
"(",
"section",
")",
"return",
"leaves"
] | Returns a list of all sections that have no children. | [
"Returns",
"a",
"list",
"of",
"all",
"sections",
"that",
"have",
"no",
"children",
"."
] | edb67b5098b2e7923d55010ded59ad1bf75c0f18 | https://github.com/Neurosim-lab/netpyne/blob/edb67b5098b2e7923d55010ded59ad1bf75c0f18/netpyne/support/morphology.py#L420-L430 |
4,751 | Neurosim-lab/netpyne | netpyne/support/morphology.py | root_indices | def root_indices(sec_list):
"""
Returns the index of all sections without a parent.
"""
roots = []
for i,section in enumerate(sec_list):
sref = h.SectionRef(sec=section)
# has_parent returns a float... cast to bool
if sref.has_parent() < 0.9:
roots.append(i)
return roots | python | def root_indices(sec_list):
"""
Returns the index of all sections without a parent.
"""
roots = []
for i,section in enumerate(sec_list):
sref = h.SectionRef(sec=section)
# has_parent returns a float... cast to bool
if sref.has_parent() < 0.9:
roots.append(i)
return roots | [
"def",
"root_indices",
"(",
"sec_list",
")",
":",
"roots",
"=",
"[",
"]",
"for",
"i",
",",
"section",
"in",
"enumerate",
"(",
"sec_list",
")",
":",
"sref",
"=",
"h",
".",
"SectionRef",
"(",
"sec",
"=",
"section",
")",
"# has_parent returns a float... cast to bool",
"if",
"sref",
".",
"has_parent",
"(",
")",
"<",
"0.9",
":",
"roots",
".",
"append",
"(",
"i",
")",
"return",
"roots"
] | Returns the index of all sections without a parent. | [
"Returns",
"the",
"index",
"of",
"all",
"sections",
"without",
"a",
"parent",
"."
] | edb67b5098b2e7923d55010ded59ad1bf75c0f18 | https://github.com/Neurosim-lab/netpyne/blob/edb67b5098b2e7923d55010ded59ad1bf75c0f18/netpyne/support/morphology.py#L432-L442 |
4,752 | Neurosim-lab/netpyne | netpyne/support/morphology.py | branch_order | def branch_order(h,section, path=[]):
"""
Returns the branch order of a section
"""
path.append(section)
sref = h.SectionRef(sec=section)
# has_parent returns a float... cast to bool
if sref.has_parent() < 0.9:
return 0 # section is a root
else:
nchild = len(list(h.SectionRef(sec=sref.parent).child))
if nchild <= 1.1:
return branch_order(h,sref.parent,path)
else:
return 1+branch_order(h,sref.parent,path) | python | def branch_order(h,section, path=[]):
"""
Returns the branch order of a section
"""
path.append(section)
sref = h.SectionRef(sec=section)
# has_parent returns a float... cast to bool
if sref.has_parent() < 0.9:
return 0 # section is a root
else:
nchild = len(list(h.SectionRef(sec=sref.parent).child))
if nchild <= 1.1:
return branch_order(h,sref.parent,path)
else:
return 1+branch_order(h,sref.parent,path) | [
"def",
"branch_order",
"(",
"h",
",",
"section",
",",
"path",
"=",
"[",
"]",
")",
":",
"path",
".",
"append",
"(",
"section",
")",
"sref",
"=",
"h",
".",
"SectionRef",
"(",
"sec",
"=",
"section",
")",
"# has_parent returns a float... cast to bool",
"if",
"sref",
".",
"has_parent",
"(",
")",
"<",
"0.9",
":",
"return",
"0",
"# section is a root",
"else",
":",
"nchild",
"=",
"len",
"(",
"list",
"(",
"h",
".",
"SectionRef",
"(",
"sec",
"=",
"sref",
".",
"parent",
")",
".",
"child",
")",
")",
"if",
"nchild",
"<=",
"1.1",
":",
"return",
"branch_order",
"(",
"h",
",",
"sref",
".",
"parent",
",",
"path",
")",
"else",
":",
"return",
"1",
"+",
"branch_order",
"(",
"h",
",",
"sref",
".",
"parent",
",",
"path",
")"
] | Returns the branch order of a section | [
"Returns",
"the",
"branch",
"order",
"of",
"a",
"section"
] | edb67b5098b2e7923d55010ded59ad1bf75c0f18 | https://github.com/Neurosim-lab/netpyne/blob/edb67b5098b2e7923d55010ded59ad1bf75c0f18/netpyne/support/morphology.py#L504-L518 |
4,753 | Neurosim-lab/netpyne | netpyne/network/pop.py | Pop.createCells | def createCells(self):
'''Function to instantiate Cell objects based on the characteristics of this population'''
# add individual cells
if 'cellsList' in self.tags:
cells = self.createCellsList()
# create cells based on fixed number of cells
elif 'numCells' in self.tags:
cells = self.createCellsFixedNum()
# create cells based on density (optional ynorm-dep)
elif 'density' in self.tags:
cells = self.createCellsDensity()
# create cells based on density (optional ynorm-dep)
elif 'gridSpacing' in self.tags:
cells = self.createCellsGrid()
# not enough tags to create cells
else:
self.tags['numCells'] = 1
print('Warninig: number or density of cells not specified for population %s; defaulting to numCells = 1' % (self.tags['pop']))
cells = self.createCellsFixedNum()
return cells | python | def createCells(self):
'''Function to instantiate Cell objects based on the characteristics of this population'''
# add individual cells
if 'cellsList' in self.tags:
cells = self.createCellsList()
# create cells based on fixed number of cells
elif 'numCells' in self.tags:
cells = self.createCellsFixedNum()
# create cells based on density (optional ynorm-dep)
elif 'density' in self.tags:
cells = self.createCellsDensity()
# create cells based on density (optional ynorm-dep)
elif 'gridSpacing' in self.tags:
cells = self.createCellsGrid()
# not enough tags to create cells
else:
self.tags['numCells'] = 1
print('Warninig: number or density of cells not specified for population %s; defaulting to numCells = 1' % (self.tags['pop']))
cells = self.createCellsFixedNum()
return cells | [
"def",
"createCells",
"(",
"self",
")",
":",
"# add individual cells",
"if",
"'cellsList'",
"in",
"self",
".",
"tags",
":",
"cells",
"=",
"self",
".",
"createCellsList",
"(",
")",
"# create cells based on fixed number of cells",
"elif",
"'numCells'",
"in",
"self",
".",
"tags",
":",
"cells",
"=",
"self",
".",
"createCellsFixedNum",
"(",
")",
"# create cells based on density (optional ynorm-dep)",
"elif",
"'density'",
"in",
"self",
".",
"tags",
":",
"cells",
"=",
"self",
".",
"createCellsDensity",
"(",
")",
"# create cells based on density (optional ynorm-dep)",
"elif",
"'gridSpacing'",
"in",
"self",
".",
"tags",
":",
"cells",
"=",
"self",
".",
"createCellsGrid",
"(",
")",
"# not enough tags to create cells",
"else",
":",
"self",
".",
"tags",
"[",
"'numCells'",
"]",
"=",
"1",
"print",
"(",
"'Warninig: number or density of cells not specified for population %s; defaulting to numCells = 1'",
"%",
"(",
"self",
".",
"tags",
"[",
"'pop'",
"]",
")",
")",
"cells",
"=",
"self",
".",
"createCellsFixedNum",
"(",
")",
"return",
"cells"
] | Function to instantiate Cell objects based on the characteristics of this population | [
"Function",
"to",
"instantiate",
"Cell",
"objects",
"based",
"on",
"the",
"characteristics",
"of",
"this",
"population"
] | edb67b5098b2e7923d55010ded59ad1bf75c0f18 | https://github.com/Neurosim-lab/netpyne/blob/edb67b5098b2e7923d55010ded59ad1bf75c0f18/netpyne/network/pop.py#L64-L88 |
4,754 | Neurosim-lab/netpyne | netpyne/network/pop.py | Pop.createCellsList | def createCellsList (self):
''' Create population cells based on list of individual cells'''
from .. import sim
cells = []
self.tags['numCells'] = len(self.tags['cellsList'])
for i in self._distributeCells(len(self.tags['cellsList']))[sim.rank]:
#if 'cellModel' in self.tags['cellsList'][i]:
# self.cellModelClass = getattr(f, self.tags['cellsList'][i]['cellModel']) # select cell class to instantiate cells based on the cellModel tags
gid = sim.net.lastGid+i
self.cellGids.append(gid) # add gid list of cells belonging to this population - not needed?
cellTags = {k: v for (k, v) in self.tags.items() if k in sim.net.params.popTagsCopiedToCells} # copy all pop tags to cell tags, except those that are pop-specific
cellTags['pop'] = self.tags['pop']
cellTags.update(self.tags['cellsList'][i]) # add tags specific to this cells
for coord in ['x','y','z']:
if coord in cellTags: # if absolute coord exists
cellTags[coord+'norm'] = cellTags[coord]/getattr(sim.net.params, 'size'+coord.upper()) # calculate norm coord
elif coord+'norm' in cellTags: # elif norm coord exists
cellTags[coord] = cellTags[coord+'norm']*getattr(sim.net.params, 'size'+coord.upper()) # calculate norm coord
else:
cellTags[coord+'norm'] = cellTags[coord] = 0
if 'cellModel' in self.tags.keys() and self.tags['cellModel'] == 'Vecstim': # if VecStim, copy spike times to params
cellTags['params']['spkTimes'] = self.tags['cellsList'][i]['spkTimes']
cells.append(self.cellModelClass(gid, cellTags)) # instantiate Cell object
if sim.cfg.verbose: print(('Cell %d/%d (gid=%d) of pop %d, on node %d, '%(i, self.tags['numCells']-1, gid, i, sim.rank)))
sim.net.lastGid = sim.net.lastGid + len(self.tags['cellsList'])
return cells | python | def createCellsList (self):
''' Create population cells based on list of individual cells'''
from .. import sim
cells = []
self.tags['numCells'] = len(self.tags['cellsList'])
for i in self._distributeCells(len(self.tags['cellsList']))[sim.rank]:
#if 'cellModel' in self.tags['cellsList'][i]:
# self.cellModelClass = getattr(f, self.tags['cellsList'][i]['cellModel']) # select cell class to instantiate cells based on the cellModel tags
gid = sim.net.lastGid+i
self.cellGids.append(gid) # add gid list of cells belonging to this population - not needed?
cellTags = {k: v for (k, v) in self.tags.items() if k in sim.net.params.popTagsCopiedToCells} # copy all pop tags to cell tags, except those that are pop-specific
cellTags['pop'] = self.tags['pop']
cellTags.update(self.tags['cellsList'][i]) # add tags specific to this cells
for coord in ['x','y','z']:
if coord in cellTags: # if absolute coord exists
cellTags[coord+'norm'] = cellTags[coord]/getattr(sim.net.params, 'size'+coord.upper()) # calculate norm coord
elif coord+'norm' in cellTags: # elif norm coord exists
cellTags[coord] = cellTags[coord+'norm']*getattr(sim.net.params, 'size'+coord.upper()) # calculate norm coord
else:
cellTags[coord+'norm'] = cellTags[coord] = 0
if 'cellModel' in self.tags.keys() and self.tags['cellModel'] == 'Vecstim': # if VecStim, copy spike times to params
cellTags['params']['spkTimes'] = self.tags['cellsList'][i]['spkTimes']
cells.append(self.cellModelClass(gid, cellTags)) # instantiate Cell object
if sim.cfg.verbose: print(('Cell %d/%d (gid=%d) of pop %d, on node %d, '%(i, self.tags['numCells']-1, gid, i, sim.rank)))
sim.net.lastGid = sim.net.lastGid + len(self.tags['cellsList'])
return cells | [
"def",
"createCellsList",
"(",
"self",
")",
":",
"from",
".",
".",
"import",
"sim",
"cells",
"=",
"[",
"]",
"self",
".",
"tags",
"[",
"'numCells'",
"]",
"=",
"len",
"(",
"self",
".",
"tags",
"[",
"'cellsList'",
"]",
")",
"for",
"i",
"in",
"self",
".",
"_distributeCells",
"(",
"len",
"(",
"self",
".",
"tags",
"[",
"'cellsList'",
"]",
")",
")",
"[",
"sim",
".",
"rank",
"]",
":",
"#if 'cellModel' in self.tags['cellsList'][i]:",
"# self.cellModelClass = getattr(f, self.tags['cellsList'][i]['cellModel']) # select cell class to instantiate cells based on the cellModel tags",
"gid",
"=",
"sim",
".",
"net",
".",
"lastGid",
"+",
"i",
"self",
".",
"cellGids",
".",
"append",
"(",
"gid",
")",
"# add gid list of cells belonging to this population - not needed?",
"cellTags",
"=",
"{",
"k",
":",
"v",
"for",
"(",
"k",
",",
"v",
")",
"in",
"self",
".",
"tags",
".",
"items",
"(",
")",
"if",
"k",
"in",
"sim",
".",
"net",
".",
"params",
".",
"popTagsCopiedToCells",
"}",
"# copy all pop tags to cell tags, except those that are pop-specific",
"cellTags",
"[",
"'pop'",
"]",
"=",
"self",
".",
"tags",
"[",
"'pop'",
"]",
"cellTags",
".",
"update",
"(",
"self",
".",
"tags",
"[",
"'cellsList'",
"]",
"[",
"i",
"]",
")",
"# add tags specific to this cells",
"for",
"coord",
"in",
"[",
"'x'",
",",
"'y'",
",",
"'z'",
"]",
":",
"if",
"coord",
"in",
"cellTags",
":",
"# if absolute coord exists",
"cellTags",
"[",
"coord",
"+",
"'norm'",
"]",
"=",
"cellTags",
"[",
"coord",
"]",
"/",
"getattr",
"(",
"sim",
".",
"net",
".",
"params",
",",
"'size'",
"+",
"coord",
".",
"upper",
"(",
")",
")",
"# calculate norm coord",
"elif",
"coord",
"+",
"'norm'",
"in",
"cellTags",
":",
"# elif norm coord exists",
"cellTags",
"[",
"coord",
"]",
"=",
"cellTags",
"[",
"coord",
"+",
"'norm'",
"]",
"*",
"getattr",
"(",
"sim",
".",
"net",
".",
"params",
",",
"'size'",
"+",
"coord",
".",
"upper",
"(",
")",
")",
"# calculate norm coord",
"else",
":",
"cellTags",
"[",
"coord",
"+",
"'norm'",
"]",
"=",
"cellTags",
"[",
"coord",
"]",
"=",
"0",
"if",
"'cellModel'",
"in",
"self",
".",
"tags",
".",
"keys",
"(",
")",
"and",
"self",
".",
"tags",
"[",
"'cellModel'",
"]",
"==",
"'Vecstim'",
":",
"# if VecStim, copy spike times to params",
"cellTags",
"[",
"'params'",
"]",
"[",
"'spkTimes'",
"]",
"=",
"self",
".",
"tags",
"[",
"'cellsList'",
"]",
"[",
"i",
"]",
"[",
"'spkTimes'",
"]",
"cells",
".",
"append",
"(",
"self",
".",
"cellModelClass",
"(",
"gid",
",",
"cellTags",
")",
")",
"# instantiate Cell object",
"if",
"sim",
".",
"cfg",
".",
"verbose",
":",
"print",
"(",
"(",
"'Cell %d/%d (gid=%d) of pop %d, on node %d, '",
"%",
"(",
"i",
",",
"self",
".",
"tags",
"[",
"'numCells'",
"]",
"-",
"1",
",",
"gid",
",",
"i",
",",
"sim",
".",
"rank",
")",
")",
")",
"sim",
".",
"net",
".",
"lastGid",
"=",
"sim",
".",
"net",
".",
"lastGid",
"+",
"len",
"(",
"self",
".",
"tags",
"[",
"'cellsList'",
"]",
")",
"return",
"cells"
] | Create population cells based on list of individual cells | [
"Create",
"population",
"cells",
"based",
"on",
"list",
"of",
"individual",
"cells"
] | edb67b5098b2e7923d55010ded59ad1bf75c0f18 | https://github.com/Neurosim-lab/netpyne/blob/edb67b5098b2e7923d55010ded59ad1bf75c0f18/netpyne/network/pop.py#L275-L301 |
4,755 | Neurosim-lab/netpyne | netpyne/sim/wrappers.py | create | def create (netParams=None, simConfig=None, output=False):
''' Sequence of commands to create network '''
from .. import sim
import __main__ as top
if not netParams: netParams = top.netParams
if not simConfig: simConfig = top.simConfig
sim.initialize(netParams, simConfig) # create network object and set cfg and net params
pops = sim.net.createPops() # instantiate network populations
cells = sim.net.createCells() # instantiate network cells based on defined populations
conns = sim.net.connectCells() # create connections between cells based on params
stims = sim.net.addStims() # add external stimulation to cells (IClamps etc)
rxd = sim.net.addRxD() # add reaction-diffusion (RxD)
simData = sim.setupRecording() # setup variables to record for each cell (spikes, V traces, etc)
if output: return (pops, cells, conns, rxd, stims, simData) | python | def create (netParams=None, simConfig=None, output=False):
''' Sequence of commands to create network '''
from .. import sim
import __main__ as top
if not netParams: netParams = top.netParams
if not simConfig: simConfig = top.simConfig
sim.initialize(netParams, simConfig) # create network object and set cfg and net params
pops = sim.net.createPops() # instantiate network populations
cells = sim.net.createCells() # instantiate network cells based on defined populations
conns = sim.net.connectCells() # create connections between cells based on params
stims = sim.net.addStims() # add external stimulation to cells (IClamps etc)
rxd = sim.net.addRxD() # add reaction-diffusion (RxD)
simData = sim.setupRecording() # setup variables to record for each cell (spikes, V traces, etc)
if output: return (pops, cells, conns, rxd, stims, simData) | [
"def",
"create",
"(",
"netParams",
"=",
"None",
",",
"simConfig",
"=",
"None",
",",
"output",
"=",
"False",
")",
":",
"from",
".",
".",
"import",
"sim",
"import",
"__main__",
"as",
"top",
"if",
"not",
"netParams",
":",
"netParams",
"=",
"top",
".",
"netParams",
"if",
"not",
"simConfig",
":",
"simConfig",
"=",
"top",
".",
"simConfig",
"sim",
".",
"initialize",
"(",
"netParams",
",",
"simConfig",
")",
"# create network object and set cfg and net params",
"pops",
"=",
"sim",
".",
"net",
".",
"createPops",
"(",
")",
"# instantiate network populations",
"cells",
"=",
"sim",
".",
"net",
".",
"createCells",
"(",
")",
"# instantiate network cells based on defined populations",
"conns",
"=",
"sim",
".",
"net",
".",
"connectCells",
"(",
")",
"# create connections between cells based on params",
"stims",
"=",
"sim",
".",
"net",
".",
"addStims",
"(",
")",
"# add external stimulation to cells (IClamps etc)",
"rxd",
"=",
"sim",
".",
"net",
".",
"addRxD",
"(",
")",
"# add reaction-diffusion (RxD)",
"simData",
"=",
"sim",
".",
"setupRecording",
"(",
")",
"# setup variables to record for each cell (spikes, V traces, etc)",
"if",
"output",
":",
"return",
"(",
"pops",
",",
"cells",
",",
"conns",
",",
"rxd",
",",
"stims",
",",
"simData",
")"
] | Sequence of commands to create network | [
"Sequence",
"of",
"commands",
"to",
"create",
"network"
] | edb67b5098b2e7923d55010ded59ad1bf75c0f18 | https://github.com/Neurosim-lab/netpyne/blob/edb67b5098b2e7923d55010ded59ad1bf75c0f18/netpyne/sim/wrappers.py#L19-L34 |
4,756 | Neurosim-lab/netpyne | netpyne/sim/wrappers.py | intervalSimulate | def intervalSimulate (interval):
''' Sequence of commands to simulate network '''
from .. import sim
sim.runSimWithIntervalFunc(interval, sim.intervalSave) # run parallel Neuron simulation
#this gather is justa merging of files
sim.fileGather() | python | def intervalSimulate (interval):
''' Sequence of commands to simulate network '''
from .. import sim
sim.runSimWithIntervalFunc(interval, sim.intervalSave) # run parallel Neuron simulation
#this gather is justa merging of files
sim.fileGather() | [
"def",
"intervalSimulate",
"(",
"interval",
")",
":",
"from",
".",
".",
"import",
"sim",
"sim",
".",
"runSimWithIntervalFunc",
"(",
"interval",
",",
"sim",
".",
"intervalSave",
")",
"# run parallel Neuron simulation ",
"#this gather is justa merging of files",
"sim",
".",
"fileGather",
"(",
")"
] | Sequence of commands to simulate network | [
"Sequence",
"of",
"commands",
"to",
"simulate",
"network"
] | edb67b5098b2e7923d55010ded59ad1bf75c0f18 | https://github.com/Neurosim-lab/netpyne/blob/edb67b5098b2e7923d55010ded59ad1bf75c0f18/netpyne/sim/wrappers.py#L49-L54 |
4,757 | Neurosim-lab/netpyne | netpyne/sim/wrappers.py | load | def load (filename, simConfig=None, output=False, instantiate=True, createNEURONObj=True):
''' Sequence of commands load, simulate and analyse network '''
from .. import sim
sim.initialize() # create network object and set cfg and net params
sim.cfg.createNEURONObj = createNEURONObj
sim.loadAll(filename, instantiate=instantiate, createNEURONObj=createNEURONObj)
if simConfig: sim.setSimCfg(simConfig) # set after to replace potentially loaded cfg
if len(sim.net.cells) == 0 and instantiate:
pops = sim.net.createPops() # instantiate network populations
cells = sim.net.createCells() # instantiate network cells based on defined populations
conns = sim.net.connectCells() # create connections between cells based on params
stims = sim.net.addStims() # add external stimulation to cells (IClamps etc)
rxd = sim.net.addRxD() # add reaction-diffusion (RxD)
simData = sim.setupRecording() # setup variables to record for each cell (spikes, V traces, etc)
if output:
try:
return (pops, cells, conns, stims, rxd, simData)
except:
pass | python | def load (filename, simConfig=None, output=False, instantiate=True, createNEURONObj=True):
''' Sequence of commands load, simulate and analyse network '''
from .. import sim
sim.initialize() # create network object and set cfg and net params
sim.cfg.createNEURONObj = createNEURONObj
sim.loadAll(filename, instantiate=instantiate, createNEURONObj=createNEURONObj)
if simConfig: sim.setSimCfg(simConfig) # set after to replace potentially loaded cfg
if len(sim.net.cells) == 0 and instantiate:
pops = sim.net.createPops() # instantiate network populations
cells = sim.net.createCells() # instantiate network cells based on defined populations
conns = sim.net.connectCells() # create connections between cells based on params
stims = sim.net.addStims() # add external stimulation to cells (IClamps etc)
rxd = sim.net.addRxD() # add reaction-diffusion (RxD)
simData = sim.setupRecording() # setup variables to record for each cell (spikes, V traces, etc)
if output:
try:
return (pops, cells, conns, stims, rxd, simData)
except:
pass | [
"def",
"load",
"(",
"filename",
",",
"simConfig",
"=",
"None",
",",
"output",
"=",
"False",
",",
"instantiate",
"=",
"True",
",",
"createNEURONObj",
"=",
"True",
")",
":",
"from",
".",
".",
"import",
"sim",
"sim",
".",
"initialize",
"(",
")",
"# create network object and set cfg and net params",
"sim",
".",
"cfg",
".",
"createNEURONObj",
"=",
"createNEURONObj",
"sim",
".",
"loadAll",
"(",
"filename",
",",
"instantiate",
"=",
"instantiate",
",",
"createNEURONObj",
"=",
"createNEURONObj",
")",
"if",
"simConfig",
":",
"sim",
".",
"setSimCfg",
"(",
"simConfig",
")",
"# set after to replace potentially loaded cfg",
"if",
"len",
"(",
"sim",
".",
"net",
".",
"cells",
")",
"==",
"0",
"and",
"instantiate",
":",
"pops",
"=",
"sim",
".",
"net",
".",
"createPops",
"(",
")",
"# instantiate network populations",
"cells",
"=",
"sim",
".",
"net",
".",
"createCells",
"(",
")",
"# instantiate network cells based on defined populations",
"conns",
"=",
"sim",
".",
"net",
".",
"connectCells",
"(",
")",
"# create connections between cells based on params",
"stims",
"=",
"sim",
".",
"net",
".",
"addStims",
"(",
")",
"# add external stimulation to cells (IClamps etc)",
"rxd",
"=",
"sim",
".",
"net",
".",
"addRxD",
"(",
")",
"# add reaction-diffusion (RxD)",
"simData",
"=",
"sim",
".",
"setupRecording",
"(",
")",
"# setup variables to record for each cell (spikes, V traces, etc)",
"if",
"output",
":",
"try",
":",
"return",
"(",
"pops",
",",
"cells",
",",
"conns",
",",
"stims",
",",
"rxd",
",",
"simData",
")",
"except",
":",
"pass"
] | Sequence of commands load, simulate and analyse network | [
"Sequence",
"of",
"commands",
"load",
"simulate",
"and",
"analyse",
"network"
] | edb67b5098b2e7923d55010ded59ad1bf75c0f18 | https://github.com/Neurosim-lab/netpyne/blob/edb67b5098b2e7923d55010ded59ad1bf75c0f18/netpyne/sim/wrappers.py#L116-L136 |
4,758 | Neurosim-lab/netpyne | netpyne/sim/wrappers.py | createExportNeuroML2 | def createExportNeuroML2 (netParams=None, simConfig=None, reference=None, connections=True, stimulations=True, output=False, format='xml'):
''' Sequence of commands to create and export network to NeuroML2 '''
from .. import sim
import __main__ as top
if not netParams: netParams = top.netParams
if not simConfig: simConfig = top.simConfig
sim.initialize(netParams, simConfig) # create network object and set cfg and net params
pops = sim.net.createPops() # instantiate network populations
cells = sim.net.createCells() # instantiate network cells based on defined populations
conns = sim.net.connectCells() # create connections between cells based on params
stims = sim.net.addStims() # add external stimulation to cells (IClamps etc)
rxd = sim.net.addRxD() # add reaction-diffusion (RxD)
simData = sim.setupRecording() # setup variables to record for each cell (spikes, V traces, etc)
sim.exportNeuroML2(reference,connections,stimulations,format) # export cells and connectivity to NeuroML 2 format
if output: return (pops, cells, conns, stims, rxd, simData) | python | def createExportNeuroML2 (netParams=None, simConfig=None, reference=None, connections=True, stimulations=True, output=False, format='xml'):
''' Sequence of commands to create and export network to NeuroML2 '''
from .. import sim
import __main__ as top
if not netParams: netParams = top.netParams
if not simConfig: simConfig = top.simConfig
sim.initialize(netParams, simConfig) # create network object and set cfg and net params
pops = sim.net.createPops() # instantiate network populations
cells = sim.net.createCells() # instantiate network cells based on defined populations
conns = sim.net.connectCells() # create connections between cells based on params
stims = sim.net.addStims() # add external stimulation to cells (IClamps etc)
rxd = sim.net.addRxD() # add reaction-diffusion (RxD)
simData = sim.setupRecording() # setup variables to record for each cell (spikes, V traces, etc)
sim.exportNeuroML2(reference,connections,stimulations,format) # export cells and connectivity to NeuroML 2 format
if output: return (pops, cells, conns, stims, rxd, simData) | [
"def",
"createExportNeuroML2",
"(",
"netParams",
"=",
"None",
",",
"simConfig",
"=",
"None",
",",
"reference",
"=",
"None",
",",
"connections",
"=",
"True",
",",
"stimulations",
"=",
"True",
",",
"output",
"=",
"False",
",",
"format",
"=",
"'xml'",
")",
":",
"from",
".",
".",
"import",
"sim",
"import",
"__main__",
"as",
"top",
"if",
"not",
"netParams",
":",
"netParams",
"=",
"top",
".",
"netParams",
"if",
"not",
"simConfig",
":",
"simConfig",
"=",
"top",
".",
"simConfig",
"sim",
".",
"initialize",
"(",
"netParams",
",",
"simConfig",
")",
"# create network object and set cfg and net params",
"pops",
"=",
"sim",
".",
"net",
".",
"createPops",
"(",
")",
"# instantiate network populations",
"cells",
"=",
"sim",
".",
"net",
".",
"createCells",
"(",
")",
"# instantiate network cells based on defined populations",
"conns",
"=",
"sim",
".",
"net",
".",
"connectCells",
"(",
")",
"# create connections between cells based on params",
"stims",
"=",
"sim",
".",
"net",
".",
"addStims",
"(",
")",
"# add external stimulation to cells (IClamps etc)",
"rxd",
"=",
"sim",
".",
"net",
".",
"addRxD",
"(",
")",
"# add reaction-diffusion (RxD)",
"simData",
"=",
"sim",
".",
"setupRecording",
"(",
")",
"# setup variables to record for each cell (spikes, V traces, etc)",
"sim",
".",
"exportNeuroML2",
"(",
"reference",
",",
"connections",
",",
"stimulations",
",",
"format",
")",
"# export cells and connectivity to NeuroML 2 format",
"if",
"output",
":",
"return",
"(",
"pops",
",",
"cells",
",",
"conns",
",",
"stims",
",",
"rxd",
",",
"simData",
")"
] | Sequence of commands to create and export network to NeuroML2 | [
"Sequence",
"of",
"commands",
"to",
"create",
"and",
"export",
"network",
"to",
"NeuroML2"
] | edb67b5098b2e7923d55010ded59ad1bf75c0f18 | https://github.com/Neurosim-lab/netpyne/blob/edb67b5098b2e7923d55010ded59ad1bf75c0f18/netpyne/sim/wrappers.py#L164-L180 |
4,759 | Neurosim-lab/netpyne | netpyne/analysis/utils.py | exception | def exception(function):
"""
A decorator that wraps the passed in function and prints exception should one occur
"""
@functools.wraps(function)
def wrapper(*args, **kwargs):
try:
return function(*args, **kwargs)
except Exception as e:
# print
err = "There was an exception in %s():"%(function.__name__)
print(("%s \n %s \n%s"%(err,e,sys.exc_info())))
return -1
return wrapper | python | def exception(function):
"""
A decorator that wraps the passed in function and prints exception should one occur
"""
@functools.wraps(function)
def wrapper(*args, **kwargs):
try:
return function(*args, **kwargs)
except Exception as e:
# print
err = "There was an exception in %s():"%(function.__name__)
print(("%s \n %s \n%s"%(err,e,sys.exc_info())))
return -1
return wrapper | [
"def",
"exception",
"(",
"function",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"function",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"return",
"function",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"Exception",
"as",
"e",
":",
"# print ",
"err",
"=",
"\"There was an exception in %s():\"",
"%",
"(",
"function",
".",
"__name__",
")",
"print",
"(",
"(",
"\"%s \\n %s \\n%s\"",
"%",
"(",
"err",
",",
"e",
",",
"sys",
".",
"exc_info",
"(",
")",
")",
")",
")",
"return",
"-",
"1",
"return",
"wrapper"
] | A decorator that wraps the passed in function and prints exception should one occur | [
"A",
"decorator",
"that",
"wraps",
"the",
"passed",
"in",
"function",
"and",
"prints",
"exception",
"should",
"one",
"occur"
] | edb67b5098b2e7923d55010ded59ad1bf75c0f18 | https://github.com/Neurosim-lab/netpyne/blob/edb67b5098b2e7923d55010ded59ad1bf75c0f18/netpyne/analysis/utils.py#L54-L68 |
4,760 | Neurosim-lab/netpyne | netpyne/analysis/utils.py | getSpktSpkid | def getSpktSpkid(cellGids=[], timeRange=None, allCells=False):
'''return spike ids and times; with allCells=True just need to identify slice of time so can omit cellGids'''
from .. import sim
import pandas as pd
try: # Pandas 0.24 and later
from pandas import _lib as pandaslib
except: # Pandas 0.23 and earlier
from pandas import lib as pandaslib
df = pd.DataFrame(pandaslib.to_object_array([sim.allSimData['spkt'], sim.allSimData['spkid']]).transpose(), columns=['spkt', 'spkid'])
#df = pd.DataFrame(pd.lib.to_object_array([sim.allSimData['spkt'], sim.allSimData['spkid']]).transpose(), columns=['spkt', 'spkid'])
if timeRange:
min, max = [int(df['spkt'].searchsorted(timeRange[i])) for i in range(2)] # binary search faster than query
else: # timeRange None or empty list means all times
min, max = 0, len(df)
if len(cellGids)==0 or allCells: # get all by either using flag or giving empty list -- can get rid of the flag
sel = df[min:max]
else:
sel = df[min:max].query('spkid in @cellGids')
return sel, sel['spkt'].tolist(), sel['spkid'].tolist() | python | def getSpktSpkid(cellGids=[], timeRange=None, allCells=False):
'''return spike ids and times; with allCells=True just need to identify slice of time so can omit cellGids'''
from .. import sim
import pandas as pd
try: # Pandas 0.24 and later
from pandas import _lib as pandaslib
except: # Pandas 0.23 and earlier
from pandas import lib as pandaslib
df = pd.DataFrame(pandaslib.to_object_array([sim.allSimData['spkt'], sim.allSimData['spkid']]).transpose(), columns=['spkt', 'spkid'])
#df = pd.DataFrame(pd.lib.to_object_array([sim.allSimData['spkt'], sim.allSimData['spkid']]).transpose(), columns=['spkt', 'spkid'])
if timeRange:
min, max = [int(df['spkt'].searchsorted(timeRange[i])) for i in range(2)] # binary search faster than query
else: # timeRange None or empty list means all times
min, max = 0, len(df)
if len(cellGids)==0 or allCells: # get all by either using flag or giving empty list -- can get rid of the flag
sel = df[min:max]
else:
sel = df[min:max].query('spkid in @cellGids')
return sel, sel['spkt'].tolist(), sel['spkid'].tolist() | [
"def",
"getSpktSpkid",
"(",
"cellGids",
"=",
"[",
"]",
",",
"timeRange",
"=",
"None",
",",
"allCells",
"=",
"False",
")",
":",
"from",
".",
".",
"import",
"sim",
"import",
"pandas",
"as",
"pd",
"try",
":",
"# Pandas 0.24 and later",
"from",
"pandas",
"import",
"_lib",
"as",
"pandaslib",
"except",
":",
"# Pandas 0.23 and earlier",
"from",
"pandas",
"import",
"lib",
"as",
"pandaslib",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"pandaslib",
".",
"to_object_array",
"(",
"[",
"sim",
".",
"allSimData",
"[",
"'spkt'",
"]",
",",
"sim",
".",
"allSimData",
"[",
"'spkid'",
"]",
"]",
")",
".",
"transpose",
"(",
")",
",",
"columns",
"=",
"[",
"'spkt'",
",",
"'spkid'",
"]",
")",
"#df = pd.DataFrame(pd.lib.to_object_array([sim.allSimData['spkt'], sim.allSimData['spkid']]).transpose(), columns=['spkt', 'spkid'])",
"if",
"timeRange",
":",
"min",
",",
"max",
"=",
"[",
"int",
"(",
"df",
"[",
"'spkt'",
"]",
".",
"searchsorted",
"(",
"timeRange",
"[",
"i",
"]",
")",
")",
"for",
"i",
"in",
"range",
"(",
"2",
")",
"]",
"# binary search faster than query",
"else",
":",
"# timeRange None or empty list means all times",
"min",
",",
"max",
"=",
"0",
",",
"len",
"(",
"df",
")",
"if",
"len",
"(",
"cellGids",
")",
"==",
"0",
"or",
"allCells",
":",
"# get all by either using flag or giving empty list -- can get rid of the flag",
"sel",
"=",
"df",
"[",
"min",
":",
"max",
"]",
"else",
":",
"sel",
"=",
"df",
"[",
"min",
":",
"max",
"]",
".",
"query",
"(",
"'spkid in @cellGids'",
")",
"return",
"sel",
",",
"sel",
"[",
"'spkt'",
"]",
".",
"tolist",
"(",
")",
",",
"sel",
"[",
"'spkid'",
"]",
".",
"tolist",
"(",
")"
] | return spike ids and times; with allCells=True just need to identify slice of time so can omit cellGids | [
"return",
"spike",
"ids",
"and",
"times",
";",
"with",
"allCells",
"=",
"True",
"just",
"need",
"to",
"identify",
"slice",
"of",
"time",
"so",
"can",
"omit",
"cellGids"
] | edb67b5098b2e7923d55010ded59ad1bf75c0f18 | https://github.com/Neurosim-lab/netpyne/blob/edb67b5098b2e7923d55010ded59ad1bf75c0f18/netpyne/analysis/utils.py#L321-L341 |
4,761 | Neurosim-lab/netpyne | netpyne/support/recxelectrode.py | RecXElectrode.calcTransferResistance | def calcTransferResistance(self, gid, seg_coords):
"""Precompute mapping from segment to electrode locations"""
sigma = 0.3 # mS/mm
# Value used in NEURON extracellular recording example ("extracellular_stim_and_rec")
# rho = 35.4 # ohm cm, squid axon cytoplasm = 2.8249e-2 S/cm = 0.028 S/cm = 0.0028 S/mm = 2.8 mS/mm
# rho_um = 35.4 * 0.01 = 35.4 / 1e6 * 1e4 = 0.354 Mohm um ~= 3 uS / um = 3000 uS / mm = 3 mS /mm
# equivalent sigma value (~3) is 10x larger than Allen (0.3)
# if use same sigma value, results are consistent
r05 = (seg_coords['p0'] + seg_coords['p1'])/2
dl = seg_coords['p1'] - seg_coords['p0']
nseg = r05.shape[1]
tr = np.zeros((self.nsites,nseg))
# tr_NEURON = np.zeros((self.nsites,nseg)) # used to compare with NEURON extracellular example
for j in range(self.nsites): # calculate mapping for each site on the electrode
rel = np.expand_dims(self.pos[:, j], axis=1) # coordinates of a j-th site on the electrode
rel_05 = rel - r05 # distance between electrode and segment centers
r2 = np.einsum('ij,ij->j', rel_05, rel_05) # compute dot product column-wise, the resulting array has as many columns as original
rlldl = np.einsum('ij,ij->j', rel_05, dl) # compute dot product column-wise, the resulting array has as many columns as original
dlmag = np.linalg.norm(dl, axis=0) # length of each segment
rll = abs(rlldl/dlmag) # component of r parallel to the segment axis it must be always positive
rT2 = r2 - rll**2 # square of perpendicular component
up = rll + dlmag/2
low = rll - dlmag/2
num = up + np.sqrt(up**2 + rT2)
den = low + np.sqrt(low**2 + rT2)
tr[j, :] = np.log(num/den)/dlmag # units of (1/um) use with imemb_ (total seg current)
# Consistent with NEURON extracellular recording example
# r = np.sqrt(rel_05[0,:]**2 + rel_05[1,:]**2 + rel_05[2,:]**2)
# tr_NEURON[j, :] = (rho / 4 / math.pi)*(1/r)*0.01
tr *= 1/(4*math.pi*sigma) # units: 1/um / (mS/mm) = mm/um / mS = 1e3 * kOhm = MOhm
self.transferResistances[gid] = tr | python | def calcTransferResistance(self, gid, seg_coords):
"""Precompute mapping from segment to electrode locations"""
sigma = 0.3 # mS/mm
# Value used in NEURON extracellular recording example ("extracellular_stim_and_rec")
# rho = 35.4 # ohm cm, squid axon cytoplasm = 2.8249e-2 S/cm = 0.028 S/cm = 0.0028 S/mm = 2.8 mS/mm
# rho_um = 35.4 * 0.01 = 35.4 / 1e6 * 1e4 = 0.354 Mohm um ~= 3 uS / um = 3000 uS / mm = 3 mS /mm
# equivalent sigma value (~3) is 10x larger than Allen (0.3)
# if use same sigma value, results are consistent
r05 = (seg_coords['p0'] + seg_coords['p1'])/2
dl = seg_coords['p1'] - seg_coords['p0']
nseg = r05.shape[1]
tr = np.zeros((self.nsites,nseg))
# tr_NEURON = np.zeros((self.nsites,nseg)) # used to compare with NEURON extracellular example
for j in range(self.nsites): # calculate mapping for each site on the electrode
rel = np.expand_dims(self.pos[:, j], axis=1) # coordinates of a j-th site on the electrode
rel_05 = rel - r05 # distance between electrode and segment centers
r2 = np.einsum('ij,ij->j', rel_05, rel_05) # compute dot product column-wise, the resulting array has as many columns as original
rlldl = np.einsum('ij,ij->j', rel_05, dl) # compute dot product column-wise, the resulting array has as many columns as original
dlmag = np.linalg.norm(dl, axis=0) # length of each segment
rll = abs(rlldl/dlmag) # component of r parallel to the segment axis it must be always positive
rT2 = r2 - rll**2 # square of perpendicular component
up = rll + dlmag/2
low = rll - dlmag/2
num = up + np.sqrt(up**2 + rT2)
den = low + np.sqrt(low**2 + rT2)
tr[j, :] = np.log(num/den)/dlmag # units of (1/um) use with imemb_ (total seg current)
# Consistent with NEURON extracellular recording example
# r = np.sqrt(rel_05[0,:]**2 + rel_05[1,:]**2 + rel_05[2,:]**2)
# tr_NEURON[j, :] = (rho / 4 / math.pi)*(1/r)*0.01
tr *= 1/(4*math.pi*sigma) # units: 1/um / (mS/mm) = mm/um / mS = 1e3 * kOhm = MOhm
self.transferResistances[gid] = tr | [
"def",
"calcTransferResistance",
"(",
"self",
",",
"gid",
",",
"seg_coords",
")",
":",
"sigma",
"=",
"0.3",
"# mS/mm ",
"# Value used in NEURON extracellular recording example (\"extracellular_stim_and_rec\")",
"# rho = 35.4 # ohm cm, squid axon cytoplasm = 2.8249e-2 S/cm = 0.028 S/cm = 0.0028 S/mm = 2.8 mS/mm ",
"# rho_um = 35.4 * 0.01 = 35.4 / 1e6 * 1e4 = 0.354 Mohm um ~= 3 uS / um = 3000 uS / mm = 3 mS /mm",
"# equivalent sigma value (~3) is 10x larger than Allen (0.3) ",
"# if use same sigma value, results are consistent",
"r05",
"=",
"(",
"seg_coords",
"[",
"'p0'",
"]",
"+",
"seg_coords",
"[",
"'p1'",
"]",
")",
"/",
"2",
"dl",
"=",
"seg_coords",
"[",
"'p1'",
"]",
"-",
"seg_coords",
"[",
"'p0'",
"]",
"nseg",
"=",
"r05",
".",
"shape",
"[",
"1",
"]",
"tr",
"=",
"np",
".",
"zeros",
"(",
"(",
"self",
".",
"nsites",
",",
"nseg",
")",
")",
"# tr_NEURON = np.zeros((self.nsites,nseg)) # used to compare with NEURON extracellular example",
"for",
"j",
"in",
"range",
"(",
"self",
".",
"nsites",
")",
":",
"# calculate mapping for each site on the electrode",
"rel",
"=",
"np",
".",
"expand_dims",
"(",
"self",
".",
"pos",
"[",
":",
",",
"j",
"]",
",",
"axis",
"=",
"1",
")",
"# coordinates of a j-th site on the electrode",
"rel_05",
"=",
"rel",
"-",
"r05",
"# distance between electrode and segment centers",
"r2",
"=",
"np",
".",
"einsum",
"(",
"'ij,ij->j'",
",",
"rel_05",
",",
"rel_05",
")",
"# compute dot product column-wise, the resulting array has as many columns as original",
"rlldl",
"=",
"np",
".",
"einsum",
"(",
"'ij,ij->j'",
",",
"rel_05",
",",
"dl",
")",
"# compute dot product column-wise, the resulting array has as many columns as original",
"dlmag",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"dl",
",",
"axis",
"=",
"0",
")",
"# length of each segment",
"rll",
"=",
"abs",
"(",
"rlldl",
"/",
"dlmag",
")",
"# component of r parallel to the segment axis it must be always positive",
"rT2",
"=",
"r2",
"-",
"rll",
"**",
"2",
"# square of perpendicular component",
"up",
"=",
"rll",
"+",
"dlmag",
"/",
"2",
"low",
"=",
"rll",
"-",
"dlmag",
"/",
"2",
"num",
"=",
"up",
"+",
"np",
".",
"sqrt",
"(",
"up",
"**",
"2",
"+",
"rT2",
")",
"den",
"=",
"low",
"+",
"np",
".",
"sqrt",
"(",
"low",
"**",
"2",
"+",
"rT2",
")",
"tr",
"[",
"j",
",",
":",
"]",
"=",
"np",
".",
"log",
"(",
"num",
"/",
"den",
")",
"/",
"dlmag",
"# units of (1/um) use with imemb_ (total seg current)",
"# Consistent with NEURON extracellular recording example",
"# r = np.sqrt(rel_05[0,:]**2 + rel_05[1,:]**2 + rel_05[2,:]**2)",
"# tr_NEURON[j, :] = (rho / 4 / math.pi)*(1/r)*0.01",
"tr",
"*=",
"1",
"/",
"(",
"4",
"*",
"math",
".",
"pi",
"*",
"sigma",
")",
"# units: 1/um / (mS/mm) = mm/um / mS = 1e3 * kOhm = MOhm",
"self",
".",
"transferResistances",
"[",
"gid",
"]",
"=",
"tr"
] | Precompute mapping from segment to electrode locations | [
"Precompute",
"mapping",
"from",
"segment",
"to",
"electrode",
"locations"
] | edb67b5098b2e7923d55010ded59ad1bf75c0f18 | https://github.com/Neurosim-lab/netpyne/blob/edb67b5098b2e7923d55010ded59ad1bf75c0f18/netpyne/support/recxelectrode.py#L67-L105 |
4,762 | Neurosim-lab/netpyne | netpyne/conversion/excel.py | importConnFromExcel | def importConnFromExcel (fileName, sheetName):
''' Import connectivity rules from Excel sheet'''
import openpyxl as xl
# set columns
colPreTags = 0 # 'A'
colPostTags = 1 # 'B'
colConnFunc = 2 # 'C'
colSyn = 3 # 'D'
colProb = 5 # 'F'
colWeight = 6 # 'G'
colAnnot = 8 # 'I'
outFileName = fileName[:-5]+'_'+sheetName+'.py' # set output file name
connText = """## Generated using importConnFromExcel() function in params/utils.py \n\nnetParams['connParams'] = [] \n\n"""
# open excel file and sheet
wb = xl.load_workbook(fileName)
sheet = wb.get_sheet_by_name(sheetName)
numRows = sheet.get_highest_row()
with open(outFileName, 'w') as f:
f.write(connText) # write starting text
for row in range(1,numRows+1):
if sheet.cell(row=row, column=colProb).value: # if not empty row
print('Creating conn rule for row ' + str(row))
# read row values
pre = sheet.cell(row=row, column=colPreTags).value
post = sheet.cell(row=row, column=colPostTags).value
func = sheet.cell(row=row, column=colConnFunc).value
syn = sheet.cell(row=row, column=colSyn).value
prob = sheet.cell(row=row, column=colProb).value
weight = sheet.cell(row=row, column=colWeight).value
# write preTags
line = "netParams['connParams'].append({'preConds': {"
for i,cond in enumerate(pre.split(';')): # split into different conditions
if i>0: line = line + ", "
cond2 = cond.split('=') # split into key and value
line = line + "'" + cond2[0].replace(' ','') + "': " + cond2[1].replace(' ','') # generate line
line = line + "}" # end of preTags
# write postTags
line = line + ",\n'postConds': {"
for i,cond in enumerate(post.split(';')): # split into different conditions
if i>0: line = line + ", "
cond2 = cond.split('=') # split into key and value
line = line + "'" + cond2[0].replace(' ','') + "': " + cond2[1].replace(' ','') # generate line
line = line + "}" # end of postTags
line = line + ",\n'connFunc': '" + func + "'" # write connFunc
line = line + ",\n'synMech': '" + syn + "'" # write synReceptor
line = line + ",\n'probability': " + str(prob) # write prob
line = line + ",\n'weight': " + str(weight) # write prob
line = line + "})" # add closing brackets
line = line + '\n\n' # new line after each conn rule
f.write(line) | python | def importConnFromExcel (fileName, sheetName):
''' Import connectivity rules from Excel sheet'''
import openpyxl as xl
# set columns
colPreTags = 0 # 'A'
colPostTags = 1 # 'B'
colConnFunc = 2 # 'C'
colSyn = 3 # 'D'
colProb = 5 # 'F'
colWeight = 6 # 'G'
colAnnot = 8 # 'I'
outFileName = fileName[:-5]+'_'+sheetName+'.py' # set output file name
connText = """## Generated using importConnFromExcel() function in params/utils.py \n\nnetParams['connParams'] = [] \n\n"""
# open excel file and sheet
wb = xl.load_workbook(fileName)
sheet = wb.get_sheet_by_name(sheetName)
numRows = sheet.get_highest_row()
with open(outFileName, 'w') as f:
f.write(connText) # write starting text
for row in range(1,numRows+1):
if sheet.cell(row=row, column=colProb).value: # if not empty row
print('Creating conn rule for row ' + str(row))
# read row values
pre = sheet.cell(row=row, column=colPreTags).value
post = sheet.cell(row=row, column=colPostTags).value
func = sheet.cell(row=row, column=colConnFunc).value
syn = sheet.cell(row=row, column=colSyn).value
prob = sheet.cell(row=row, column=colProb).value
weight = sheet.cell(row=row, column=colWeight).value
# write preTags
line = "netParams['connParams'].append({'preConds': {"
for i,cond in enumerate(pre.split(';')): # split into different conditions
if i>0: line = line + ", "
cond2 = cond.split('=') # split into key and value
line = line + "'" + cond2[0].replace(' ','') + "': " + cond2[1].replace(' ','') # generate line
line = line + "}" # end of preTags
# write postTags
line = line + ",\n'postConds': {"
for i,cond in enumerate(post.split(';')): # split into different conditions
if i>0: line = line + ", "
cond2 = cond.split('=') # split into key and value
line = line + "'" + cond2[0].replace(' ','') + "': " + cond2[1].replace(' ','') # generate line
line = line + "}" # end of postTags
line = line + ",\n'connFunc': '" + func + "'" # write connFunc
line = line + ",\n'synMech': '" + syn + "'" # write synReceptor
line = line + ",\n'probability': " + str(prob) # write prob
line = line + ",\n'weight': " + str(weight) # write prob
line = line + "})" # add closing brackets
line = line + '\n\n' # new line after each conn rule
f.write(line) | [
"def",
"importConnFromExcel",
"(",
"fileName",
",",
"sheetName",
")",
":",
"import",
"openpyxl",
"as",
"xl",
"# set columns",
"colPreTags",
"=",
"0",
"# 'A'",
"colPostTags",
"=",
"1",
"# 'B'",
"colConnFunc",
"=",
"2",
"# 'C'",
"colSyn",
"=",
"3",
"# 'D'",
"colProb",
"=",
"5",
"# 'F'",
"colWeight",
"=",
"6",
"# 'G'",
"colAnnot",
"=",
"8",
"# 'I' ",
"outFileName",
"=",
"fileName",
"[",
":",
"-",
"5",
"]",
"+",
"'_'",
"+",
"sheetName",
"+",
"'.py'",
"# set output file name",
"connText",
"=",
"\"\"\"## Generated using importConnFromExcel() function in params/utils.py \\n\\nnetParams['connParams'] = [] \\n\\n\"\"\"",
"# open excel file and sheet",
"wb",
"=",
"xl",
".",
"load_workbook",
"(",
"fileName",
")",
"sheet",
"=",
"wb",
".",
"get_sheet_by_name",
"(",
"sheetName",
")",
"numRows",
"=",
"sheet",
".",
"get_highest_row",
"(",
")",
"with",
"open",
"(",
"outFileName",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"connText",
")",
"# write starting text",
"for",
"row",
"in",
"range",
"(",
"1",
",",
"numRows",
"+",
"1",
")",
":",
"if",
"sheet",
".",
"cell",
"(",
"row",
"=",
"row",
",",
"column",
"=",
"colProb",
")",
".",
"value",
":",
"# if not empty row",
"print",
"(",
"'Creating conn rule for row '",
"+",
"str",
"(",
"row",
")",
")",
"# read row values",
"pre",
"=",
"sheet",
".",
"cell",
"(",
"row",
"=",
"row",
",",
"column",
"=",
"colPreTags",
")",
".",
"value",
"post",
"=",
"sheet",
".",
"cell",
"(",
"row",
"=",
"row",
",",
"column",
"=",
"colPostTags",
")",
".",
"value",
"func",
"=",
"sheet",
".",
"cell",
"(",
"row",
"=",
"row",
",",
"column",
"=",
"colConnFunc",
")",
".",
"value",
"syn",
"=",
"sheet",
".",
"cell",
"(",
"row",
"=",
"row",
",",
"column",
"=",
"colSyn",
")",
".",
"value",
"prob",
"=",
"sheet",
".",
"cell",
"(",
"row",
"=",
"row",
",",
"column",
"=",
"colProb",
")",
".",
"value",
"weight",
"=",
"sheet",
".",
"cell",
"(",
"row",
"=",
"row",
",",
"column",
"=",
"colWeight",
")",
".",
"value",
"# write preTags",
"line",
"=",
"\"netParams['connParams'].append({'preConds': {\"",
"for",
"i",
",",
"cond",
"in",
"enumerate",
"(",
"pre",
".",
"split",
"(",
"';'",
")",
")",
":",
"# split into different conditions",
"if",
"i",
">",
"0",
":",
"line",
"=",
"line",
"+",
"\", \"",
"cond2",
"=",
"cond",
".",
"split",
"(",
"'='",
")",
"# split into key and value",
"line",
"=",
"line",
"+",
"\"'\"",
"+",
"cond2",
"[",
"0",
"]",
".",
"replace",
"(",
"' '",
",",
"''",
")",
"+",
"\"': \"",
"+",
"cond2",
"[",
"1",
"]",
".",
"replace",
"(",
"' '",
",",
"''",
")",
"# generate line",
"line",
"=",
"line",
"+",
"\"}\"",
"# end of preTags ",
"# write postTags",
"line",
"=",
"line",
"+",
"\",\\n'postConds': {\"",
"for",
"i",
",",
"cond",
"in",
"enumerate",
"(",
"post",
".",
"split",
"(",
"';'",
")",
")",
":",
"# split into different conditions",
"if",
"i",
">",
"0",
":",
"line",
"=",
"line",
"+",
"\", \"",
"cond2",
"=",
"cond",
".",
"split",
"(",
"'='",
")",
"# split into key and value",
"line",
"=",
"line",
"+",
"\"'\"",
"+",
"cond2",
"[",
"0",
"]",
".",
"replace",
"(",
"' '",
",",
"''",
")",
"+",
"\"': \"",
"+",
"cond2",
"[",
"1",
"]",
".",
"replace",
"(",
"' '",
",",
"''",
")",
"# generate line",
"line",
"=",
"line",
"+",
"\"}\"",
"# end of postTags ",
"line",
"=",
"line",
"+",
"\",\\n'connFunc': '\"",
"+",
"func",
"+",
"\"'\"",
"# write connFunc",
"line",
"=",
"line",
"+",
"\",\\n'synMech': '\"",
"+",
"syn",
"+",
"\"'\"",
"# write synReceptor",
"line",
"=",
"line",
"+",
"\",\\n'probability': \"",
"+",
"str",
"(",
"prob",
")",
"# write prob",
"line",
"=",
"line",
"+",
"\",\\n'weight': \"",
"+",
"str",
"(",
"weight",
")",
"# write prob",
"line",
"=",
"line",
"+",
"\"})\"",
"# add closing brackets",
"line",
"=",
"line",
"+",
"'\\n\\n'",
"# new line after each conn rule",
"f",
".",
"write",
"(",
"line",
")"
] | Import connectivity rules from Excel sheet | [
"Import",
"connectivity",
"rules",
"from",
"Excel",
"sheet"
] | edb67b5098b2e7923d55010ded59ad1bf75c0f18 | https://github.com/Neurosim-lab/netpyne/blob/edb67b5098b2e7923d55010ded59ad1bf75c0f18/netpyne/conversion/excel.py#L19-L75 |
4,763 | zerwes/hiyapyco | hiyapyco/odyldo.py | safe_dump | def safe_dump(data, stream=None, **kwds):
"""implementation of safe dumper using Ordered Dict Yaml Dumper"""
return yaml.dump(data, stream=stream, Dumper=ODYD, **kwds) | python | def safe_dump(data, stream=None, **kwds):
"""implementation of safe dumper using Ordered Dict Yaml Dumper"""
return yaml.dump(data, stream=stream, Dumper=ODYD, **kwds) | [
"def",
"safe_dump",
"(",
"data",
",",
"stream",
"=",
"None",
",",
"*",
"*",
"kwds",
")",
":",
"return",
"yaml",
".",
"dump",
"(",
"data",
",",
"stream",
"=",
"stream",
",",
"Dumper",
"=",
"ODYD",
",",
"*",
"*",
"kwds",
")"
] | implementation of safe dumper using Ordered Dict Yaml Dumper | [
"implementation",
"of",
"safe",
"dumper",
"using",
"Ordered",
"Dict",
"Yaml",
"Dumper"
] | b0b42724cc13b1412f5bb5d92fd4c637d6615edb | https://github.com/zerwes/hiyapyco/blob/b0b42724cc13b1412f5bb5d92fd4c637d6615edb/hiyapyco/odyldo.py#L76-L78 |
4,764 | zerwes/hiyapyco | hiyapyco/__init__.py | dump | def dump(data, **kwds):
"""dump the data as YAML"""
if _usedefaultyamlloader:
return yaml.safe_dump(data, **kwds)
else:
return odyldo.safe_dump(data, **kwds) | python | def dump(data, **kwds):
"""dump the data as YAML"""
if _usedefaultyamlloader:
return yaml.safe_dump(data, **kwds)
else:
return odyldo.safe_dump(data, **kwds) | [
"def",
"dump",
"(",
"data",
",",
"*",
"*",
"kwds",
")",
":",
"if",
"_usedefaultyamlloader",
":",
"return",
"yaml",
".",
"safe_dump",
"(",
"data",
",",
"*",
"*",
"kwds",
")",
"else",
":",
"return",
"odyldo",
".",
"safe_dump",
"(",
"data",
",",
"*",
"*",
"kwds",
")"
] | dump the data as YAML | [
"dump",
"the",
"data",
"as",
"YAML"
] | b0b42724cc13b1412f5bb5d92fd4c637d6615edb | https://github.com/zerwes/hiyapyco/blob/b0b42724cc13b1412f5bb5d92fd4c637d6615edb/hiyapyco/__init__.py#L413-L418 |
4,765 | andycasey/ads | ads/search.py | Article.bibtex | def bibtex(self):
"""Return a BiBTeX entry for the current article."""
warnings.warn("bibtex should be queried with ads.ExportQuery(); You will "
"hit API ratelimits very quickly otherwise.", UserWarning)
return ExportQuery(bibcodes=self.bibcode, format="bibtex").execute() | python | def bibtex(self):
"""Return a BiBTeX entry for the current article."""
warnings.warn("bibtex should be queried with ads.ExportQuery(); You will "
"hit API ratelimits very quickly otherwise.", UserWarning)
return ExportQuery(bibcodes=self.bibcode, format="bibtex").execute() | [
"def",
"bibtex",
"(",
"self",
")",
":",
"warnings",
".",
"warn",
"(",
"\"bibtex should be queried with ads.ExportQuery(); You will \"",
"\"hit API ratelimits very quickly otherwise.\"",
",",
"UserWarning",
")",
"return",
"ExportQuery",
"(",
"bibcodes",
"=",
"self",
".",
"bibcode",
",",
"format",
"=",
"\"bibtex\"",
")",
".",
"execute",
"(",
")"
] | Return a BiBTeX entry for the current article. | [
"Return",
"a",
"BiBTeX",
"entry",
"for",
"the",
"current",
"article",
"."
] | 928415e202db80658cd8532fa4c3a00d0296b5c5 | https://github.com/andycasey/ads/blob/928415e202db80658cd8532fa4c3a00d0296b5c5/ads/search.py#L292-L296 |
4,766 | andycasey/ads | examples/monthly-institute-publications/stromlo.py | get_pdf | def get_pdf(article, debug=False):
"""
Download an article PDF from arXiv.
:param article:
The ADS article to retrieve.
:type article:
:class:`ads.search.Article`
:returns:
The binary content of the requested PDF.
"""
print('Retrieving {0}'.format(article))
identifier = [_ for _ in article.identifier if 'arXiv' in _]
if identifier:
url = 'http://arXiv.org/pdf/{0}.{1}'.format(identifier[0][9:13],
''.join(_ for _ in identifier[0][14:] if _.isdigit()))
else:
# No arXiv version. Ask ADS to redirect us to the journal article.
params = {
'bibcode': article.bibcode,
'link_type': 'ARTICLE',
'db_key': 'AST'
}
url = requests.get('http://adsabs.harvard.edu/cgi-bin/nph-data_query',
params=params).url
q = requests.get(url)
if not q.ok:
print('Error retrieving {0}: {1} for {2}'.format(
article, q.status_code, url))
if debug: q.raise_for_status()
else: return None
# Check if the journal has given back forbidden HTML.
if q.content.endswith('</html>'):
print('Error retrieving {0}: 200 (access denied?) for {1}'.format(
article, url))
return None
return q.content | python | def get_pdf(article, debug=False):
"""
Download an article PDF from arXiv.
:param article:
The ADS article to retrieve.
:type article:
:class:`ads.search.Article`
:returns:
The binary content of the requested PDF.
"""
print('Retrieving {0}'.format(article))
identifier = [_ for _ in article.identifier if 'arXiv' in _]
if identifier:
url = 'http://arXiv.org/pdf/{0}.{1}'.format(identifier[0][9:13],
''.join(_ for _ in identifier[0][14:] if _.isdigit()))
else:
# No arXiv version. Ask ADS to redirect us to the journal article.
params = {
'bibcode': article.bibcode,
'link_type': 'ARTICLE',
'db_key': 'AST'
}
url = requests.get('http://adsabs.harvard.edu/cgi-bin/nph-data_query',
params=params).url
q = requests.get(url)
if not q.ok:
print('Error retrieving {0}: {1} for {2}'.format(
article, q.status_code, url))
if debug: q.raise_for_status()
else: return None
# Check if the journal has given back forbidden HTML.
if q.content.endswith('</html>'):
print('Error retrieving {0}: 200 (access denied?) for {1}'.format(
article, url))
return None
return q.content | [
"def",
"get_pdf",
"(",
"article",
",",
"debug",
"=",
"False",
")",
":",
"print",
"(",
"'Retrieving {0}'",
".",
"format",
"(",
"article",
")",
")",
"identifier",
"=",
"[",
"_",
"for",
"_",
"in",
"article",
".",
"identifier",
"if",
"'arXiv'",
"in",
"_",
"]",
"if",
"identifier",
":",
"url",
"=",
"'http://arXiv.org/pdf/{0}.{1}'",
".",
"format",
"(",
"identifier",
"[",
"0",
"]",
"[",
"9",
":",
"13",
"]",
",",
"''",
".",
"join",
"(",
"_",
"for",
"_",
"in",
"identifier",
"[",
"0",
"]",
"[",
"14",
":",
"]",
"if",
"_",
".",
"isdigit",
"(",
")",
")",
")",
"else",
":",
"# No arXiv version. Ask ADS to redirect us to the journal article.",
"params",
"=",
"{",
"'bibcode'",
":",
"article",
".",
"bibcode",
",",
"'link_type'",
":",
"'ARTICLE'",
",",
"'db_key'",
":",
"'AST'",
"}",
"url",
"=",
"requests",
".",
"get",
"(",
"'http://adsabs.harvard.edu/cgi-bin/nph-data_query'",
",",
"params",
"=",
"params",
")",
".",
"url",
"q",
"=",
"requests",
".",
"get",
"(",
"url",
")",
"if",
"not",
"q",
".",
"ok",
":",
"print",
"(",
"'Error retrieving {0}: {1} for {2}'",
".",
"format",
"(",
"article",
",",
"q",
".",
"status_code",
",",
"url",
")",
")",
"if",
"debug",
":",
"q",
".",
"raise_for_status",
"(",
")",
"else",
":",
"return",
"None",
"# Check if the journal has given back forbidden HTML.",
"if",
"q",
".",
"content",
".",
"endswith",
"(",
"'</html>'",
")",
":",
"print",
"(",
"'Error retrieving {0}: 200 (access denied?) for {1}'",
".",
"format",
"(",
"article",
",",
"url",
")",
")",
"return",
"None",
"return",
"q",
".",
"content"
] | Download an article PDF from arXiv.
:param article:
The ADS article to retrieve.
:type article:
:class:`ads.search.Article`
:returns:
The binary content of the requested PDF. | [
"Download",
"an",
"article",
"PDF",
"from",
"arXiv",
"."
] | 928415e202db80658cd8532fa4c3a00d0296b5c5 | https://github.com/andycasey/ads/blob/928415e202db80658cd8532fa4c3a00d0296b5c5/examples/monthly-institute-publications/stromlo.py#L22-L64 |
4,767 | andycasey/ads | examples/monthly-institute-publications/stromlo.py | summarise_pdfs | def summarise_pdfs(pdfs):
"""
Collate the first page from each of the PDFs provided into a single PDF.
:param pdfs:
The contents of several PDF files.
:type pdfs:
list of str
:returns:
The contents of single PDF, which can be written directly to disk.
"""
# Ignore None.
print('Summarising {0} articles ({1} had errors)'.format(
len(pdfs), pdfs.count(None)))
pdfs = [_ for _ in pdfs if _ is not None]
summary = PdfFileWriter()
for pdf in pdfs:
summary.addPage(PdfFileReader(StringIO(pdf)).getPage(0))
return summary | python | def summarise_pdfs(pdfs):
"""
Collate the first page from each of the PDFs provided into a single PDF.
:param pdfs:
The contents of several PDF files.
:type pdfs:
list of str
:returns:
The contents of single PDF, which can be written directly to disk.
"""
# Ignore None.
print('Summarising {0} articles ({1} had errors)'.format(
len(pdfs), pdfs.count(None)))
pdfs = [_ for _ in pdfs if _ is not None]
summary = PdfFileWriter()
for pdf in pdfs:
summary.addPage(PdfFileReader(StringIO(pdf)).getPage(0))
return summary | [
"def",
"summarise_pdfs",
"(",
"pdfs",
")",
":",
"# Ignore None.",
"print",
"(",
"'Summarising {0} articles ({1} had errors)'",
".",
"format",
"(",
"len",
"(",
"pdfs",
")",
",",
"pdfs",
".",
"count",
"(",
"None",
")",
")",
")",
"pdfs",
"=",
"[",
"_",
"for",
"_",
"in",
"pdfs",
"if",
"_",
"is",
"not",
"None",
"]",
"summary",
"=",
"PdfFileWriter",
"(",
")",
"for",
"pdf",
"in",
"pdfs",
":",
"summary",
".",
"addPage",
"(",
"PdfFileReader",
"(",
"StringIO",
"(",
"pdf",
")",
")",
".",
"getPage",
"(",
"0",
")",
")",
"return",
"summary"
] | Collate the first page from each of the PDFs provided into a single PDF.
:param pdfs:
The contents of several PDF files.
:type pdfs:
list of str
:returns:
The contents of single PDF, which can be written directly to disk. | [
"Collate",
"the",
"first",
"page",
"from",
"each",
"of",
"the",
"PDFs",
"provided",
"into",
"a",
"single",
"PDF",
"."
] | 928415e202db80658cd8532fa4c3a00d0296b5c5 | https://github.com/andycasey/ads/blob/928415e202db80658cd8532fa4c3a00d0296b5c5/examples/monthly-institute-publications/stromlo.py#L67-L89 |
4,768 | andycasey/ads | ads/metrics.py | MetricsQuery.execute | def execute(self):
"""
Execute the http request to the metrics service
"""
self.response = MetricsResponse.load_http_response(
self.session.post(self.HTTP_ENDPOINT, data=self.json_payload)
)
return self.response.metrics | python | def execute(self):
"""
Execute the http request to the metrics service
"""
self.response = MetricsResponse.load_http_response(
self.session.post(self.HTTP_ENDPOINT, data=self.json_payload)
)
return self.response.metrics | [
"def",
"execute",
"(",
"self",
")",
":",
"self",
".",
"response",
"=",
"MetricsResponse",
".",
"load_http_response",
"(",
"self",
".",
"session",
".",
"post",
"(",
"self",
".",
"HTTP_ENDPOINT",
",",
"data",
"=",
"self",
".",
"json_payload",
")",
")",
"return",
"self",
".",
"response",
".",
"metrics"
] | Execute the http request to the metrics service | [
"Execute",
"the",
"http",
"request",
"to",
"the",
"metrics",
"service"
] | 928415e202db80658cd8532fa4c3a00d0296b5c5 | https://github.com/andycasey/ads/blob/928415e202db80658cd8532fa4c3a00d0296b5c5/ads/metrics.py#L47-L54 |
4,769 | andycasey/ads | ads/base.py | _Singleton.get_info | def get_info(cls):
"""
Print all of the instantiated Singletons
"""
return '\n'.join(
[str(cls._instances[key]) for key in cls._instances]
) | python | def get_info(cls):
"""
Print all of the instantiated Singletons
"""
return '\n'.join(
[str(cls._instances[key]) for key in cls._instances]
) | [
"def",
"get_info",
"(",
"cls",
")",
":",
"return",
"'\\n'",
".",
"join",
"(",
"[",
"str",
"(",
"cls",
".",
"_instances",
"[",
"key",
"]",
")",
"for",
"key",
"in",
"cls",
".",
"_instances",
"]",
")"
] | Print all of the instantiated Singletons | [
"Print",
"all",
"of",
"the",
"instantiated",
"Singletons"
] | 928415e202db80658cd8532fa4c3a00d0296b5c5 | https://github.com/andycasey/ads/blob/928415e202db80658cd8532fa4c3a00d0296b5c5/ads/base.py#L25-L31 |
4,770 | andycasey/ads | ads/base.py | APIResponse.load_http_response | def load_http_response(cls, http_response):
"""
This method should return an instantiated class and set its response
to the requests.Response object.
"""
if not http_response.ok:
raise APIResponseError(http_response.text)
c = cls(http_response)
c.response = http_response
RateLimits.getRateLimits(cls.__name__).set(c.response.headers)
return c | python | def load_http_response(cls, http_response):
"""
This method should return an instantiated class and set its response
to the requests.Response object.
"""
if not http_response.ok:
raise APIResponseError(http_response.text)
c = cls(http_response)
c.response = http_response
RateLimits.getRateLimits(cls.__name__).set(c.response.headers)
return c | [
"def",
"load_http_response",
"(",
"cls",
",",
"http_response",
")",
":",
"if",
"not",
"http_response",
".",
"ok",
":",
"raise",
"APIResponseError",
"(",
"http_response",
".",
"text",
")",
"c",
"=",
"cls",
"(",
"http_response",
")",
"c",
".",
"response",
"=",
"http_response",
"RateLimits",
".",
"getRateLimits",
"(",
"cls",
".",
"__name__",
")",
".",
"set",
"(",
"c",
".",
"response",
".",
"headers",
")",
"return",
"c"
] | This method should return an instantiated class and set its response
to the requests.Response object. | [
"This",
"method",
"should",
"return",
"an",
"instantiated",
"class",
"and",
"set",
"its",
"response",
"to",
"the",
"requests",
".",
"Response",
"object",
"."
] | 928415e202db80658cd8532fa4c3a00d0296b5c5 | https://github.com/andycasey/ads/blob/928415e202db80658cd8532fa4c3a00d0296b5c5/ads/base.py#L88-L100 |
4,771 | andycasey/ads | ads/base.py | BaseQuery.token | def token(self):
"""
set the instance attribute `token` following the following logic,
stopping whenever a token is found. Raises NoTokenFound is no token
is found
- environment variables TOKEN_ENVIRON_VARS
- file containing plaintext as the contents in TOKEN_FILES
- ads.config.token
"""
if self._token is None:
for v in map(os.environ.get, TOKEN_ENVIRON_VARS):
if v is not None:
self._token = v
return self._token
for f in TOKEN_FILES:
try:
with open(f) as fp:
self._token = fp.read().strip()
return self._token
except IOError:
pass
if ads.config.token is not None:
self._token = ads.config.token
return self._token
warnings.warn("No token found", RuntimeWarning)
return self._token | python | def token(self):
"""
set the instance attribute `token` following the following logic,
stopping whenever a token is found. Raises NoTokenFound is no token
is found
- environment variables TOKEN_ENVIRON_VARS
- file containing plaintext as the contents in TOKEN_FILES
- ads.config.token
"""
if self._token is None:
for v in map(os.environ.get, TOKEN_ENVIRON_VARS):
if v is not None:
self._token = v
return self._token
for f in TOKEN_FILES:
try:
with open(f) as fp:
self._token = fp.read().strip()
return self._token
except IOError:
pass
if ads.config.token is not None:
self._token = ads.config.token
return self._token
warnings.warn("No token found", RuntimeWarning)
return self._token | [
"def",
"token",
"(",
"self",
")",
":",
"if",
"self",
".",
"_token",
"is",
"None",
":",
"for",
"v",
"in",
"map",
"(",
"os",
".",
"environ",
".",
"get",
",",
"TOKEN_ENVIRON_VARS",
")",
":",
"if",
"v",
"is",
"not",
"None",
":",
"self",
".",
"_token",
"=",
"v",
"return",
"self",
".",
"_token",
"for",
"f",
"in",
"TOKEN_FILES",
":",
"try",
":",
"with",
"open",
"(",
"f",
")",
"as",
"fp",
":",
"self",
".",
"_token",
"=",
"fp",
".",
"read",
"(",
")",
".",
"strip",
"(",
")",
"return",
"self",
".",
"_token",
"except",
"IOError",
":",
"pass",
"if",
"ads",
".",
"config",
".",
"token",
"is",
"not",
"None",
":",
"self",
".",
"_token",
"=",
"ads",
".",
"config",
".",
"token",
"return",
"self",
".",
"_token",
"warnings",
".",
"warn",
"(",
"\"No token found\"",
",",
"RuntimeWarning",
")",
"return",
"self",
".",
"_token"
] | set the instance attribute `token` following the following logic,
stopping whenever a token is found. Raises NoTokenFound is no token
is found
- environment variables TOKEN_ENVIRON_VARS
- file containing plaintext as the contents in TOKEN_FILES
- ads.config.token | [
"set",
"the",
"instance",
"attribute",
"token",
"following",
"the",
"following",
"logic",
"stopping",
"whenever",
"a",
"token",
"is",
"found",
".",
"Raises",
"NoTokenFound",
"is",
"no",
"token",
"is",
"found",
"-",
"environment",
"variables",
"TOKEN_ENVIRON_VARS",
"-",
"file",
"containing",
"plaintext",
"as",
"the",
"contents",
"in",
"TOKEN_FILES",
"-",
"ads",
".",
"config",
".",
"token"
] | 928415e202db80658cd8532fa4c3a00d0296b5c5 | https://github.com/andycasey/ads/blob/928415e202db80658cd8532fa4c3a00d0296b5c5/ads/base.py#L111-L136 |
4,772 | andycasey/ads | ads/base.py | BaseQuery.session | def session(self):
"""
http session interface, transparent proxy to requests.session
"""
if self._session is None:
self._session = requests.session()
self._session.headers.update(
{
"Authorization": "Bearer {}".format(self.token),
"User-Agent": "ads-api-client/{}".format(__version__),
"Content-Type": "application/json",
}
)
return self._session | python | def session(self):
"""
http session interface, transparent proxy to requests.session
"""
if self._session is None:
self._session = requests.session()
self._session.headers.update(
{
"Authorization": "Bearer {}".format(self.token),
"User-Agent": "ads-api-client/{}".format(__version__),
"Content-Type": "application/json",
}
)
return self._session | [
"def",
"session",
"(",
"self",
")",
":",
"if",
"self",
".",
"_session",
"is",
"None",
":",
"self",
".",
"_session",
"=",
"requests",
".",
"session",
"(",
")",
"self",
".",
"_session",
".",
"headers",
".",
"update",
"(",
"{",
"\"Authorization\"",
":",
"\"Bearer {}\"",
".",
"format",
"(",
"self",
".",
"token",
")",
",",
"\"User-Agent\"",
":",
"\"ads-api-client/{}\"",
".",
"format",
"(",
"__version__",
")",
",",
"\"Content-Type\"",
":",
"\"application/json\"",
",",
"}",
")",
"return",
"self",
".",
"_session"
] | http session interface, transparent proxy to requests.session | [
"http",
"session",
"interface",
"transparent",
"proxy",
"to",
"requests",
".",
"session"
] | 928415e202db80658cd8532fa4c3a00d0296b5c5 | https://github.com/andycasey/ads/blob/928415e202db80658cd8532fa4c3a00d0296b5c5/ads/base.py#L143-L156 |
4,773 | googledatalab/pydatalab | google/datalab/ml/_metrics.py | Metrics.from_csv | def from_csv(input_csv_pattern, headers=None, schema_file=None):
"""Create a Metrics instance from csv file pattern.
Args:
input_csv_pattern: Path to Csv file pattern (with no header). Can be local or GCS path.
headers: Csv headers.
schema_file: Path to a JSON file containing BigQuery schema. Used if "headers" is None.
Returns:
a Metrics instance.
Raises:
ValueError if both headers and schema_file are None.
"""
if headers is not None:
names = headers
elif schema_file is not None:
with _util.open_local_or_gcs(schema_file, mode='r') as f:
schema = json.load(f)
names = [x['name'] for x in schema]
else:
raise ValueError('Either headers or schema_file is needed')
metrics = Metrics(input_csv_pattern=input_csv_pattern, headers=names)
return metrics | python | def from_csv(input_csv_pattern, headers=None, schema_file=None):
"""Create a Metrics instance from csv file pattern.
Args:
input_csv_pattern: Path to Csv file pattern (with no header). Can be local or GCS path.
headers: Csv headers.
schema_file: Path to a JSON file containing BigQuery schema. Used if "headers" is None.
Returns:
a Metrics instance.
Raises:
ValueError if both headers and schema_file are None.
"""
if headers is not None:
names = headers
elif schema_file is not None:
with _util.open_local_or_gcs(schema_file, mode='r') as f:
schema = json.load(f)
names = [x['name'] for x in schema]
else:
raise ValueError('Either headers or schema_file is needed')
metrics = Metrics(input_csv_pattern=input_csv_pattern, headers=names)
return metrics | [
"def",
"from_csv",
"(",
"input_csv_pattern",
",",
"headers",
"=",
"None",
",",
"schema_file",
"=",
"None",
")",
":",
"if",
"headers",
"is",
"not",
"None",
":",
"names",
"=",
"headers",
"elif",
"schema_file",
"is",
"not",
"None",
":",
"with",
"_util",
".",
"open_local_or_gcs",
"(",
"schema_file",
",",
"mode",
"=",
"'r'",
")",
"as",
"f",
":",
"schema",
"=",
"json",
".",
"load",
"(",
"f",
")",
"names",
"=",
"[",
"x",
"[",
"'name'",
"]",
"for",
"x",
"in",
"schema",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"'Either headers or schema_file is needed'",
")",
"metrics",
"=",
"Metrics",
"(",
"input_csv_pattern",
"=",
"input_csv_pattern",
",",
"headers",
"=",
"names",
")",
"return",
"metrics"
] | Create a Metrics instance from csv file pattern.
Args:
input_csv_pattern: Path to Csv file pattern (with no header). Can be local or GCS path.
headers: Csv headers.
schema_file: Path to a JSON file containing BigQuery schema. Used if "headers" is None.
Returns:
a Metrics instance.
Raises:
ValueError if both headers and schema_file are None. | [
"Create",
"a",
"Metrics",
"instance",
"from",
"csv",
"file",
"pattern",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/ml/_metrics.py#L56-L81 |
4,774 | googledatalab/pydatalab | google/datalab/ml/_metrics.py | Metrics.from_bigquery | def from_bigquery(sql):
"""Create a Metrics instance from a bigquery query or table.
Returns:
a Metrics instance.
Args:
sql: A BigQuery table name or a query.
"""
if isinstance(sql, bq.Query):
sql = sql._expanded_sql()
parts = sql.split('.')
if len(parts) == 1 or len(parts) > 3 or any(' ' in x for x in parts):
sql = '(' + sql + ')' # query, not a table name
else:
sql = '`' + sql + '`' # table name
metrics = Metrics(bigquery=sql)
return metrics | python | def from_bigquery(sql):
"""Create a Metrics instance from a bigquery query or table.
Returns:
a Metrics instance.
Args:
sql: A BigQuery table name or a query.
"""
if isinstance(sql, bq.Query):
sql = sql._expanded_sql()
parts = sql.split('.')
if len(parts) == 1 or len(parts) > 3 or any(' ' in x for x in parts):
sql = '(' + sql + ')' # query, not a table name
else:
sql = '`' + sql + '`' # table name
metrics = Metrics(bigquery=sql)
return metrics | [
"def",
"from_bigquery",
"(",
"sql",
")",
":",
"if",
"isinstance",
"(",
"sql",
",",
"bq",
".",
"Query",
")",
":",
"sql",
"=",
"sql",
".",
"_expanded_sql",
"(",
")",
"parts",
"=",
"sql",
".",
"split",
"(",
"'.'",
")",
"if",
"len",
"(",
"parts",
")",
"==",
"1",
"or",
"len",
"(",
"parts",
")",
">",
"3",
"or",
"any",
"(",
"' '",
"in",
"x",
"for",
"x",
"in",
"parts",
")",
":",
"sql",
"=",
"'('",
"+",
"sql",
"+",
"')'",
"# query, not a table name",
"else",
":",
"sql",
"=",
"'`'",
"+",
"sql",
"+",
"'`'",
"# table name",
"metrics",
"=",
"Metrics",
"(",
"bigquery",
"=",
"sql",
")",
"return",
"metrics"
] | Create a Metrics instance from a bigquery query or table.
Returns:
a Metrics instance.
Args:
sql: A BigQuery table name or a query. | [
"Create",
"a",
"Metrics",
"instance",
"from",
"a",
"bigquery",
"query",
"or",
"table",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/ml/_metrics.py#L84-L104 |
4,775 | googledatalab/pydatalab | google/datalab/ml/_metrics.py | Metrics._get_data_from_csv_files | def _get_data_from_csv_files(self):
"""Get data from input csv files."""
all_df = []
for file_name in self._input_csv_files:
with _util.open_local_or_gcs(file_name, mode='r') as f:
all_df.append(pd.read_csv(f, names=self._headers))
df = pd.concat(all_df, ignore_index=True)
return df | python | def _get_data_from_csv_files(self):
"""Get data from input csv files."""
all_df = []
for file_name in self._input_csv_files:
with _util.open_local_or_gcs(file_name, mode='r') as f:
all_df.append(pd.read_csv(f, names=self._headers))
df = pd.concat(all_df, ignore_index=True)
return df | [
"def",
"_get_data_from_csv_files",
"(",
"self",
")",
":",
"all_df",
"=",
"[",
"]",
"for",
"file_name",
"in",
"self",
".",
"_input_csv_files",
":",
"with",
"_util",
".",
"open_local_or_gcs",
"(",
"file_name",
",",
"mode",
"=",
"'r'",
")",
"as",
"f",
":",
"all_df",
".",
"append",
"(",
"pd",
".",
"read_csv",
"(",
"f",
",",
"names",
"=",
"self",
".",
"_headers",
")",
")",
"df",
"=",
"pd",
".",
"concat",
"(",
"all_df",
",",
"ignore_index",
"=",
"True",
")",
"return",
"df"
] | Get data from input csv files. | [
"Get",
"data",
"from",
"input",
"csv",
"files",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/ml/_metrics.py#L106-L114 |
4,776 | googledatalab/pydatalab | google/datalab/ml/_metrics.py | Metrics._get_data_from_bigquery | def _get_data_from_bigquery(self, queries):
"""Get data from bigquery table or query."""
all_df = []
for query in queries:
all_df.append(query.execute().result().to_dataframe())
df = pd.concat(all_df, ignore_index=True)
return df | python | def _get_data_from_bigquery(self, queries):
"""Get data from bigquery table or query."""
all_df = []
for query in queries:
all_df.append(query.execute().result().to_dataframe())
df = pd.concat(all_df, ignore_index=True)
return df | [
"def",
"_get_data_from_bigquery",
"(",
"self",
",",
"queries",
")",
":",
"all_df",
"=",
"[",
"]",
"for",
"query",
"in",
"queries",
":",
"all_df",
".",
"append",
"(",
"query",
".",
"execute",
"(",
")",
".",
"result",
"(",
")",
".",
"to_dataframe",
"(",
")",
")",
"df",
"=",
"pd",
".",
"concat",
"(",
"all_df",
",",
"ignore_index",
"=",
"True",
")",
"return",
"df"
] | Get data from bigquery table or query. | [
"Get",
"data",
"from",
"bigquery",
"table",
"or",
"query",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/ml/_metrics.py#L116-L123 |
4,777 | googledatalab/pydatalab | google/datalab/bigquery/_udf.py | UDF._expanded_sql | def _expanded_sql(self):
"""Get the expanded BigQuery SQL string of this UDF
Returns
The expanded SQL string of this UDF
"""
if not self._sql:
self._sql = UDF._build_udf(self._name, self._code, self._return_type, self._params,
self._language, self._imports)
return self._sql | python | def _expanded_sql(self):
"""Get the expanded BigQuery SQL string of this UDF
Returns
The expanded SQL string of this UDF
"""
if not self._sql:
self._sql = UDF._build_udf(self._name, self._code, self._return_type, self._params,
self._language, self._imports)
return self._sql | [
"def",
"_expanded_sql",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_sql",
":",
"self",
".",
"_sql",
"=",
"UDF",
".",
"_build_udf",
"(",
"self",
".",
"_name",
",",
"self",
".",
"_code",
",",
"self",
".",
"_return_type",
",",
"self",
".",
"_params",
",",
"self",
".",
"_language",
",",
"self",
".",
"_imports",
")",
"return",
"self",
".",
"_sql"
] | Get the expanded BigQuery SQL string of this UDF
Returns
The expanded SQL string of this UDF | [
"Get",
"the",
"expanded",
"BigQuery",
"SQL",
"string",
"of",
"this",
"UDF"
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_udf.py#L65-L74 |
4,778 | googledatalab/pydatalab | google/datalab/bigquery/_udf.py | UDF._build_udf | def _build_udf(name, code, return_type, params, language, imports):
"""Creates the UDF part of a BigQuery query using its pieces
Args:
name: the name of the javascript function
code: function body implementing the logic.
return_type: BigQuery data type of the function return. See supported data types in
the BigQuery docs
params: dictionary of parameter names and types
language: see list of supported languages in the BigQuery docs
imports: a list of GCS paths containing further support code.
"""
params = ','.join(['%s %s' % named_param for named_param in params])
imports = ','.join(['library="%s"' % i for i in imports])
if language.lower() == 'sql':
udf = 'CREATE TEMPORARY FUNCTION {name} ({params})\n' + \
'RETURNS {return_type}\n' + \
'AS (\n' + \
'{code}\n' + \
');'
else:
udf = 'CREATE TEMPORARY FUNCTION {name} ({params})\n' +\
'RETURNS {return_type}\n' + \
'LANGUAGE {language}\n' + \
'AS """\n' +\
'{code}\n' +\
'"""\n' +\
'OPTIONS (\n' +\
'{imports}\n' +\
');'
return udf.format(name=name, params=params, return_type=return_type,
language=language, code=code, imports=imports) | python | def _build_udf(name, code, return_type, params, language, imports):
"""Creates the UDF part of a BigQuery query using its pieces
Args:
name: the name of the javascript function
code: function body implementing the logic.
return_type: BigQuery data type of the function return. See supported data types in
the BigQuery docs
params: dictionary of parameter names and types
language: see list of supported languages in the BigQuery docs
imports: a list of GCS paths containing further support code.
"""
params = ','.join(['%s %s' % named_param for named_param in params])
imports = ','.join(['library="%s"' % i for i in imports])
if language.lower() == 'sql':
udf = 'CREATE TEMPORARY FUNCTION {name} ({params})\n' + \
'RETURNS {return_type}\n' + \
'AS (\n' + \
'{code}\n' + \
');'
else:
udf = 'CREATE TEMPORARY FUNCTION {name} ({params})\n' +\
'RETURNS {return_type}\n' + \
'LANGUAGE {language}\n' + \
'AS """\n' +\
'{code}\n' +\
'"""\n' +\
'OPTIONS (\n' +\
'{imports}\n' +\
');'
return udf.format(name=name, params=params, return_type=return_type,
language=language, code=code, imports=imports) | [
"def",
"_build_udf",
"(",
"name",
",",
"code",
",",
"return_type",
",",
"params",
",",
"language",
",",
"imports",
")",
":",
"params",
"=",
"','",
".",
"join",
"(",
"[",
"'%s %s'",
"%",
"named_param",
"for",
"named_param",
"in",
"params",
"]",
")",
"imports",
"=",
"','",
".",
"join",
"(",
"[",
"'library=\"%s\"'",
"%",
"i",
"for",
"i",
"in",
"imports",
"]",
")",
"if",
"language",
".",
"lower",
"(",
")",
"==",
"'sql'",
":",
"udf",
"=",
"'CREATE TEMPORARY FUNCTION {name} ({params})\\n'",
"+",
"'RETURNS {return_type}\\n'",
"+",
"'AS (\\n'",
"+",
"'{code}\\n'",
"+",
"');'",
"else",
":",
"udf",
"=",
"'CREATE TEMPORARY FUNCTION {name} ({params})\\n'",
"+",
"'RETURNS {return_type}\\n'",
"+",
"'LANGUAGE {language}\\n'",
"+",
"'AS \"\"\"\\n'",
"+",
"'{code}\\n'",
"+",
"'\"\"\"\\n'",
"+",
"'OPTIONS (\\n'",
"+",
"'{imports}\\n'",
"+",
"');'",
"return",
"udf",
".",
"format",
"(",
"name",
"=",
"name",
",",
"params",
"=",
"params",
",",
"return_type",
"=",
"return_type",
",",
"language",
"=",
"language",
",",
"code",
"=",
"code",
",",
"imports",
"=",
"imports",
")"
] | Creates the UDF part of a BigQuery query using its pieces
Args:
name: the name of the javascript function
code: function body implementing the logic.
return_type: BigQuery data type of the function return. See supported data types in
the BigQuery docs
params: dictionary of parameter names and types
language: see list of supported languages in the BigQuery docs
imports: a list of GCS paths containing further support code. | [
"Creates",
"the",
"UDF",
"part",
"of",
"a",
"BigQuery",
"query",
"using",
"its",
"pieces"
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_udf.py#L83-L116 |
4,779 | googledatalab/pydatalab | google/datalab/storage/_bucket.py | BucketMetadata.created_on | def created_on(self):
"""The created timestamp of the bucket as a datetime.datetime."""
s = self._info.get('timeCreated', None)
return dateutil.parser.parse(s) if s else None | python | def created_on(self):
"""The created timestamp of the bucket as a datetime.datetime."""
s = self._info.get('timeCreated', None)
return dateutil.parser.parse(s) if s else None | [
"def",
"created_on",
"(",
"self",
")",
":",
"s",
"=",
"self",
".",
"_info",
".",
"get",
"(",
"'timeCreated'",
",",
"None",
")",
"return",
"dateutil",
".",
"parser",
".",
"parse",
"(",
"s",
")",
"if",
"s",
"else",
"None"
] | The created timestamp of the bucket as a datetime.datetime. | [
"The",
"created",
"timestamp",
"of",
"the",
"bucket",
"as",
"a",
"datetime",
".",
"datetime",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/storage/_bucket.py#L71-L74 |
4,780 | googledatalab/pydatalab | google/datalab/storage/_bucket.py | Bucket.metadata | def metadata(self):
"""Retrieves metadata about the bucket.
Returns:
A BucketMetadata instance with information about this bucket.
Raises:
Exception if there was an error requesting the bucket's metadata.
"""
if self._info is None:
try:
self._info = self._api.buckets_get(self._name)
except Exception as e:
raise e
return BucketMetadata(self._info) if self._info else None | python | def metadata(self):
"""Retrieves metadata about the bucket.
Returns:
A BucketMetadata instance with information about this bucket.
Raises:
Exception if there was an error requesting the bucket's metadata.
"""
if self._info is None:
try:
self._info = self._api.buckets_get(self._name)
except Exception as e:
raise e
return BucketMetadata(self._info) if self._info else None | [
"def",
"metadata",
"(",
"self",
")",
":",
"if",
"self",
".",
"_info",
"is",
"None",
":",
"try",
":",
"self",
".",
"_info",
"=",
"self",
".",
"_api",
".",
"buckets_get",
"(",
"self",
".",
"_name",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e",
"return",
"BucketMetadata",
"(",
"self",
".",
"_info",
")",
"if",
"self",
".",
"_info",
"else",
"None"
] | Retrieves metadata about the bucket.
Returns:
A BucketMetadata instance with information about this bucket.
Raises:
Exception if there was an error requesting the bucket's metadata. | [
"Retrieves",
"metadata",
"about",
"the",
"bucket",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/storage/_bucket.py#L118-L132 |
4,781 | googledatalab/pydatalab | google/datalab/storage/_bucket.py | Bucket.object | def object(self, key):
"""Retrieves a Storage Object for the specified key in this bucket.
The object need not exist.
Args:
key: the key of the object within the bucket.
Returns:
An Object instance representing the specified key.
"""
return _object.Object(self._name, key, context=self._context) | python | def object(self, key):
"""Retrieves a Storage Object for the specified key in this bucket.
The object need not exist.
Args:
key: the key of the object within the bucket.
Returns:
An Object instance representing the specified key.
"""
return _object.Object(self._name, key, context=self._context) | [
"def",
"object",
"(",
"self",
",",
"key",
")",
":",
"return",
"_object",
".",
"Object",
"(",
"self",
".",
"_name",
",",
"key",
",",
"context",
"=",
"self",
".",
"_context",
")"
] | Retrieves a Storage Object for the specified key in this bucket.
The object need not exist.
Args:
key: the key of the object within the bucket.
Returns:
An Object instance representing the specified key. | [
"Retrieves",
"a",
"Storage",
"Object",
"for",
"the",
"specified",
"key",
"in",
"this",
"bucket",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/storage/_bucket.py#L134-L144 |
4,782 | googledatalab/pydatalab | google/datalab/storage/_bucket.py | Bucket.objects | def objects(self, prefix=None, delimiter=None):
"""Get an iterator for the objects within this bucket.
Args:
prefix: an optional prefix to match objects.
delimiter: an optional string to simulate directory-like semantics. The returned objects
will be those whose names do not contain the delimiter after the prefix. For
the remaining objects, the names will be returned truncated after the delimiter
with duplicates removed (i.e. as pseudo-directories).
Returns:
An iterable list of objects within this bucket.
"""
return _object.Objects(self._name, prefix, delimiter, context=self._context) | python | def objects(self, prefix=None, delimiter=None):
"""Get an iterator for the objects within this bucket.
Args:
prefix: an optional prefix to match objects.
delimiter: an optional string to simulate directory-like semantics. The returned objects
will be those whose names do not contain the delimiter after the prefix. For
the remaining objects, the names will be returned truncated after the delimiter
with duplicates removed (i.e. as pseudo-directories).
Returns:
An iterable list of objects within this bucket.
"""
return _object.Objects(self._name, prefix, delimiter, context=self._context) | [
"def",
"objects",
"(",
"self",
",",
"prefix",
"=",
"None",
",",
"delimiter",
"=",
"None",
")",
":",
"return",
"_object",
".",
"Objects",
"(",
"self",
".",
"_name",
",",
"prefix",
",",
"delimiter",
",",
"context",
"=",
"self",
".",
"_context",
")"
] | Get an iterator for the objects within this bucket.
Args:
prefix: an optional prefix to match objects.
delimiter: an optional string to simulate directory-like semantics. The returned objects
will be those whose names do not contain the delimiter after the prefix. For
the remaining objects, the names will be returned truncated after the delimiter
with duplicates removed (i.e. as pseudo-directories).
Returns:
An iterable list of objects within this bucket. | [
"Get",
"an",
"iterator",
"for",
"the",
"objects",
"within",
"this",
"bucket",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/storage/_bucket.py#L146-L158 |
4,783 | googledatalab/pydatalab | google/datalab/storage/_bucket.py | Bucket.delete | def delete(self):
"""Deletes the bucket.
Raises:
Exception if there was an error deleting the bucket.
"""
if self.exists():
try:
self._api.buckets_delete(self._name)
except Exception as e:
raise e | python | def delete(self):
"""Deletes the bucket.
Raises:
Exception if there was an error deleting the bucket.
"""
if self.exists():
try:
self._api.buckets_delete(self._name)
except Exception as e:
raise e | [
"def",
"delete",
"(",
"self",
")",
":",
"if",
"self",
".",
"exists",
"(",
")",
":",
"try",
":",
"self",
".",
"_api",
".",
"buckets_delete",
"(",
"self",
".",
"_name",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e"
] | Deletes the bucket.
Raises:
Exception if there was an error deleting the bucket. | [
"Deletes",
"the",
"bucket",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/storage/_bucket.py#L185-L195 |
4,784 | googledatalab/pydatalab | google/datalab/storage/_bucket.py | Buckets.contains | def contains(self, name):
"""Checks if the specified bucket exists.
Args:
name: the name of the bucket to lookup.
Returns:
True if the bucket exists; False otherwise.
Raises:
Exception if there was an error requesting information about the bucket.
"""
try:
self._api.buckets_get(name)
except google.datalab.utils.RequestException as e:
if e.status == 404:
return False
raise e
except Exception as e:
raise e
return True | python | def contains(self, name):
"""Checks if the specified bucket exists.
Args:
name: the name of the bucket to lookup.
Returns:
True if the bucket exists; False otherwise.
Raises:
Exception if there was an error requesting information about the bucket.
"""
try:
self._api.buckets_get(name)
except google.datalab.utils.RequestException as e:
if e.status == 404:
return False
raise e
except Exception as e:
raise e
return True | [
"def",
"contains",
"(",
"self",
",",
"name",
")",
":",
"try",
":",
"self",
".",
"_api",
".",
"buckets_get",
"(",
"name",
")",
"except",
"google",
".",
"datalab",
".",
"utils",
".",
"RequestException",
"as",
"e",
":",
"if",
"e",
".",
"status",
"==",
"404",
":",
"return",
"False",
"raise",
"e",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e",
"return",
"True"
] | Checks if the specified bucket exists.
Args:
name: the name of the bucket to lookup.
Returns:
True if the bucket exists; False otherwise.
Raises:
Exception if there was an error requesting information about the bucket. | [
"Checks",
"if",
"the",
"specified",
"bucket",
"exists",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/storage/_bucket.py#L215-L233 |
4,785 | googledatalab/pydatalab | datalab/storage/_bucket.py | Bucket.item | def item(self, key):
"""Retrieves an Item object for the specified key in this bucket.
The item need not exist.
Args:
key: the key of the item within the bucket.
Returns:
An Item instance representing the specified key.
"""
return _item.Item(self._name, key, context=self._context) | python | def item(self, key):
"""Retrieves an Item object for the specified key in this bucket.
The item need not exist.
Args:
key: the key of the item within the bucket.
Returns:
An Item instance representing the specified key.
"""
return _item.Item(self._name, key, context=self._context) | [
"def",
"item",
"(",
"self",
",",
"key",
")",
":",
"return",
"_item",
".",
"Item",
"(",
"self",
".",
"_name",
",",
"key",
",",
"context",
"=",
"self",
".",
"_context",
")"
] | Retrieves an Item object for the specified key in this bucket.
The item need not exist.
Args:
key: the key of the item within the bucket.
Returns:
An Item instance representing the specified key. | [
"Retrieves",
"an",
"Item",
"object",
"for",
"the",
"specified",
"key",
"in",
"this",
"bucket",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/storage/_bucket.py#L134-L144 |
4,786 | googledatalab/pydatalab | datalab/storage/_bucket.py | Bucket.items | def items(self, prefix=None, delimiter=None):
"""Get an iterator for the items within this bucket.
Args:
prefix: an optional prefix to match items.
delimiter: an optional string to simulate directory-like semantics. The returned items
will be those whose names do not contain the delimiter after the prefix. For
the remaining items, the names will be returned truncated after the delimiter
with duplicates removed (i.e. as pseudo-directories).
Returns:
An iterable list of items within this bucket.
"""
return _item.Items(self._name, prefix, delimiter, context=self._context) | python | def items(self, prefix=None, delimiter=None):
"""Get an iterator for the items within this bucket.
Args:
prefix: an optional prefix to match items.
delimiter: an optional string to simulate directory-like semantics. The returned items
will be those whose names do not contain the delimiter after the prefix. For
the remaining items, the names will be returned truncated after the delimiter
with duplicates removed (i.e. as pseudo-directories).
Returns:
An iterable list of items within this bucket.
"""
return _item.Items(self._name, prefix, delimiter, context=self._context) | [
"def",
"items",
"(",
"self",
",",
"prefix",
"=",
"None",
",",
"delimiter",
"=",
"None",
")",
":",
"return",
"_item",
".",
"Items",
"(",
"self",
".",
"_name",
",",
"prefix",
",",
"delimiter",
",",
"context",
"=",
"self",
".",
"_context",
")"
] | Get an iterator for the items within this bucket.
Args:
prefix: an optional prefix to match items.
delimiter: an optional string to simulate directory-like semantics. The returned items
will be those whose names do not contain the delimiter after the prefix. For
the remaining items, the names will be returned truncated after the delimiter
with duplicates removed (i.e. as pseudo-directories).
Returns:
An iterable list of items within this bucket. | [
"Get",
"an",
"iterator",
"for",
"the",
"items",
"within",
"this",
"bucket",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/storage/_bucket.py#L146-L158 |
4,787 | googledatalab/pydatalab | datalab/storage/_bucket.py | Bucket.create | def create(self, project_id=None):
"""Creates the bucket.
Args:
project_id: the project in which to create the bucket.
Returns:
The bucket.
Raises:
Exception if there was an error creating the bucket.
"""
if not self.exists():
if project_id is None:
project_id = self._api.project_id
try:
self._info = self._api.buckets_insert(self._name, project_id=project_id)
except Exception as e:
raise e
return self | python | def create(self, project_id=None):
"""Creates the bucket.
Args:
project_id: the project in which to create the bucket.
Returns:
The bucket.
Raises:
Exception if there was an error creating the bucket.
"""
if not self.exists():
if project_id is None:
project_id = self._api.project_id
try:
self._info = self._api.buckets_insert(self._name, project_id=project_id)
except Exception as e:
raise e
return self | [
"def",
"create",
"(",
"self",
",",
"project_id",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"exists",
"(",
")",
":",
"if",
"project_id",
"is",
"None",
":",
"project_id",
"=",
"self",
".",
"_api",
".",
"project_id",
"try",
":",
"self",
".",
"_info",
"=",
"self",
".",
"_api",
".",
"buckets_insert",
"(",
"self",
".",
"_name",
",",
"project_id",
"=",
"project_id",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e",
"return",
"self"
] | Creates the bucket.
Args:
project_id: the project in which to create the bucket.
Returns:
The bucket.
Raises:
Exception if there was an error creating the bucket. | [
"Creates",
"the",
"bucket",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/storage/_bucket.py#L167-L184 |
4,788 | googledatalab/pydatalab | datalab/storage/_bucket.py | Buckets.create | def create(self, name):
"""Creates a new bucket.
Args:
name: a unique name for the new bucket.
Returns:
The newly created bucket.
Raises:
Exception if there was an error creating the bucket.
"""
return Bucket(name, context=self._context).create(self._project_id) | python | def create(self, name):
"""Creates a new bucket.
Args:
name: a unique name for the new bucket.
Returns:
The newly created bucket.
Raises:
Exception if there was an error creating the bucket.
"""
return Bucket(name, context=self._context).create(self._project_id) | [
"def",
"create",
"(",
"self",
",",
"name",
")",
":",
"return",
"Bucket",
"(",
"name",
",",
"context",
"=",
"self",
".",
"_context",
")",
".",
"create",
"(",
"self",
".",
"_project_id",
")"
] | Creates a new bucket.
Args:
name: a unique name for the new bucket.
Returns:
The newly created bucket.
Raises:
Exception if there was an error creating the bucket. | [
"Creates",
"a",
"new",
"bucket",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/storage/_bucket.py#L238-L248 |
4,789 | googledatalab/pydatalab | solutionbox/structured_data/mltoolbox/regression/dnn/_regression_dnn.py | train | def train(train_dataset,
eval_dataset,
analysis_dir,
output_dir,
features,
layer_sizes,
max_steps=5000,
num_epochs=None,
train_batch_size=100,
eval_batch_size=16,
min_eval_frequency=100,
learning_rate=0.01,
epsilon=0.0005,
job_name=None,
cloud=None,
):
"""Blocking version of train_async. See documentation for train_async."""
job = train_async(
train_dataset=train_dataset,
eval_dataset=eval_dataset,
analysis_dir=analysis_dir,
output_dir=output_dir,
features=features,
layer_sizes=layer_sizes,
max_steps=max_steps,
num_epochs=num_epochs,
train_batch_size=train_batch_size,
eval_batch_size=eval_batch_size,
min_eval_frequency=min_eval_frequency,
learning_rate=learning_rate,
epsilon=epsilon,
job_name=job_name,
cloud=cloud,
)
job.wait()
print('Training: ' + str(job.state)) | python | def train(train_dataset,
eval_dataset,
analysis_dir,
output_dir,
features,
layer_sizes,
max_steps=5000,
num_epochs=None,
train_batch_size=100,
eval_batch_size=16,
min_eval_frequency=100,
learning_rate=0.01,
epsilon=0.0005,
job_name=None,
cloud=None,
):
"""Blocking version of train_async. See documentation for train_async."""
job = train_async(
train_dataset=train_dataset,
eval_dataset=eval_dataset,
analysis_dir=analysis_dir,
output_dir=output_dir,
features=features,
layer_sizes=layer_sizes,
max_steps=max_steps,
num_epochs=num_epochs,
train_batch_size=train_batch_size,
eval_batch_size=eval_batch_size,
min_eval_frequency=min_eval_frequency,
learning_rate=learning_rate,
epsilon=epsilon,
job_name=job_name,
cloud=cloud,
)
job.wait()
print('Training: ' + str(job.state)) | [
"def",
"train",
"(",
"train_dataset",
",",
"eval_dataset",
",",
"analysis_dir",
",",
"output_dir",
",",
"features",
",",
"layer_sizes",
",",
"max_steps",
"=",
"5000",
",",
"num_epochs",
"=",
"None",
",",
"train_batch_size",
"=",
"100",
",",
"eval_batch_size",
"=",
"16",
",",
"min_eval_frequency",
"=",
"100",
",",
"learning_rate",
"=",
"0.01",
",",
"epsilon",
"=",
"0.0005",
",",
"job_name",
"=",
"None",
",",
"cloud",
"=",
"None",
",",
")",
":",
"job",
"=",
"train_async",
"(",
"train_dataset",
"=",
"train_dataset",
",",
"eval_dataset",
"=",
"eval_dataset",
",",
"analysis_dir",
"=",
"analysis_dir",
",",
"output_dir",
"=",
"output_dir",
",",
"features",
"=",
"features",
",",
"layer_sizes",
"=",
"layer_sizes",
",",
"max_steps",
"=",
"max_steps",
",",
"num_epochs",
"=",
"num_epochs",
",",
"train_batch_size",
"=",
"train_batch_size",
",",
"eval_batch_size",
"=",
"eval_batch_size",
",",
"min_eval_frequency",
"=",
"min_eval_frequency",
",",
"learning_rate",
"=",
"learning_rate",
",",
"epsilon",
"=",
"epsilon",
",",
"job_name",
"=",
"job_name",
",",
"cloud",
"=",
"cloud",
",",
")",
"job",
".",
"wait",
"(",
")",
"print",
"(",
"'Training: '",
"+",
"str",
"(",
"job",
".",
"state",
")",
")"
] | Blocking version of train_async. See documentation for train_async. | [
"Blocking",
"version",
"of",
"train_async",
".",
"See",
"documentation",
"for",
"train_async",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/structured_data/mltoolbox/regression/dnn/_regression_dnn.py#L4-L39 |
4,790 | googledatalab/pydatalab | datalab/stackdriver/monitoring/_resource.py | ResourceDescriptors.list | def list(self, pattern='*'):
"""Returns a list of resource descriptors that match the filters.
Args:
pattern: An optional pattern to further filter the descriptors. This can
include Unix shell-style wildcards. E.g. ``"aws*"``, ``"*cluster*"``.
Returns:
A list of ResourceDescriptor objects that match the filters.
"""
if self._descriptors is None:
self._descriptors = self._client.list_resource_descriptors(
filter_string=self._filter_string)
return [resource for resource in self._descriptors
if fnmatch.fnmatch(resource.type, pattern)] | python | def list(self, pattern='*'):
"""Returns a list of resource descriptors that match the filters.
Args:
pattern: An optional pattern to further filter the descriptors. This can
include Unix shell-style wildcards. E.g. ``"aws*"``, ``"*cluster*"``.
Returns:
A list of ResourceDescriptor objects that match the filters.
"""
if self._descriptors is None:
self._descriptors = self._client.list_resource_descriptors(
filter_string=self._filter_string)
return [resource for resource in self._descriptors
if fnmatch.fnmatch(resource.type, pattern)] | [
"def",
"list",
"(",
"self",
",",
"pattern",
"=",
"'*'",
")",
":",
"if",
"self",
".",
"_descriptors",
"is",
"None",
":",
"self",
".",
"_descriptors",
"=",
"self",
".",
"_client",
".",
"list_resource_descriptors",
"(",
"filter_string",
"=",
"self",
".",
"_filter_string",
")",
"return",
"[",
"resource",
"for",
"resource",
"in",
"self",
".",
"_descriptors",
"if",
"fnmatch",
".",
"fnmatch",
"(",
"resource",
".",
"type",
",",
"pattern",
")",
"]"
] | Returns a list of resource descriptors that match the filters.
Args:
pattern: An optional pattern to further filter the descriptors. This can
include Unix shell-style wildcards. E.g. ``"aws*"``, ``"*cluster*"``.
Returns:
A list of ResourceDescriptor objects that match the filters. | [
"Returns",
"a",
"list",
"of",
"resource",
"descriptors",
"that",
"match",
"the",
"filters",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/stackdriver/monitoring/_resource.py#L43-L57 |
4,791 | googledatalab/pydatalab | google/datalab/storage/commands/_storage.py | _gcs_list_buckets | def _gcs_list_buckets(project, pattern):
""" List all Google Cloud Storage buckets that match a pattern. """
data = [{'Bucket': 'gs://' + bucket.name, 'Created': bucket.metadata.created_on}
for bucket in google.datalab.storage.Buckets(_make_context(project))
if fnmatch.fnmatch(bucket.name, pattern)]
return google.datalab.utils.commands.render_dictionary(data, ['Bucket', 'Created']) | python | def _gcs_list_buckets(project, pattern):
""" List all Google Cloud Storage buckets that match a pattern. """
data = [{'Bucket': 'gs://' + bucket.name, 'Created': bucket.metadata.created_on}
for bucket in google.datalab.storage.Buckets(_make_context(project))
if fnmatch.fnmatch(bucket.name, pattern)]
return google.datalab.utils.commands.render_dictionary(data, ['Bucket', 'Created']) | [
"def",
"_gcs_list_buckets",
"(",
"project",
",",
"pattern",
")",
":",
"data",
"=",
"[",
"{",
"'Bucket'",
":",
"'gs://'",
"+",
"bucket",
".",
"name",
",",
"'Created'",
":",
"bucket",
".",
"metadata",
".",
"created_on",
"}",
"for",
"bucket",
"in",
"google",
".",
"datalab",
".",
"storage",
".",
"Buckets",
"(",
"_make_context",
"(",
"project",
")",
")",
"if",
"fnmatch",
".",
"fnmatch",
"(",
"bucket",
".",
"name",
",",
"pattern",
")",
"]",
"return",
"google",
".",
"datalab",
".",
"utils",
".",
"commands",
".",
"render_dictionary",
"(",
"data",
",",
"[",
"'Bucket'",
",",
"'Created'",
"]",
")"
] | List all Google Cloud Storage buckets that match a pattern. | [
"List",
"all",
"Google",
"Cloud",
"Storage",
"buckets",
"that",
"match",
"a",
"pattern",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/storage/commands/_storage.py#L278-L283 |
4,792 | googledatalab/pydatalab | google/datalab/storage/commands/_storage.py | _gcs_list_keys | def _gcs_list_keys(bucket, pattern):
""" List all Google Cloud Storage keys in a specified bucket that match a pattern. """
data = [{'Name': obj.metadata.name,
'Type': obj.metadata.content_type,
'Size': obj.metadata.size,
'Updated': obj.metadata.updated_on}
for obj in _gcs_get_keys(bucket, pattern)]
return google.datalab.utils.commands.render_dictionary(data, ['Name', 'Type', 'Size', 'Updated']) | python | def _gcs_list_keys(bucket, pattern):
""" List all Google Cloud Storage keys in a specified bucket that match a pattern. """
data = [{'Name': obj.metadata.name,
'Type': obj.metadata.content_type,
'Size': obj.metadata.size,
'Updated': obj.metadata.updated_on}
for obj in _gcs_get_keys(bucket, pattern)]
return google.datalab.utils.commands.render_dictionary(data, ['Name', 'Type', 'Size', 'Updated']) | [
"def",
"_gcs_list_keys",
"(",
"bucket",
",",
"pattern",
")",
":",
"data",
"=",
"[",
"{",
"'Name'",
":",
"obj",
".",
"metadata",
".",
"name",
",",
"'Type'",
":",
"obj",
".",
"metadata",
".",
"content_type",
",",
"'Size'",
":",
"obj",
".",
"metadata",
".",
"size",
",",
"'Updated'",
":",
"obj",
".",
"metadata",
".",
"updated_on",
"}",
"for",
"obj",
"in",
"_gcs_get_keys",
"(",
"bucket",
",",
"pattern",
")",
"]",
"return",
"google",
".",
"datalab",
".",
"utils",
".",
"commands",
".",
"render_dictionary",
"(",
"data",
",",
"[",
"'Name'",
",",
"'Type'",
",",
"'Size'",
",",
"'Updated'",
"]",
")"
] | List all Google Cloud Storage keys in a specified bucket that match a pattern. | [
"List",
"all",
"Google",
"Cloud",
"Storage",
"keys",
"in",
"a",
"specified",
"bucket",
"that",
"match",
"a",
"pattern",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/storage/commands/_storage.py#L296-L303 |
4,793 | googledatalab/pydatalab | solutionbox/ml_workbench/tensorflow/transform.py | prepare_image_transforms | def prepare_image_transforms(element, image_columns):
"""Replace an images url with its jpeg bytes.
Args:
element: one input row, as a dict
image_columns: list of columns that are image paths
Return:
element, where each image file path has been replaced by a base64 image.
"""
import base64
import cStringIO
from PIL import Image
from tensorflow.python.lib.io import file_io as tf_file_io
from apache_beam.metrics import Metrics
img_error_count = Metrics.counter('main', 'ImgErrorCount')
img_missing_count = Metrics.counter('main', 'ImgMissingCount')
for name in image_columns:
uri = element[name]
if not uri:
img_missing_count.inc()
continue
try:
with tf_file_io.FileIO(uri, 'r') as f:
img = Image.open(f).convert('RGB')
# A variety of different calling libraries throw different exceptions here.
# They all correspond to an unreadable file so we treat them equivalently.
# pylint: disable broad-except
except Exception as e:
logging.exception('Error processing image %s: %s', uri, str(e))
img_error_count.inc()
return
# Convert to desired format and output.
output = cStringIO.StringIO()
img.save(output, 'jpeg')
element[name] = base64.urlsafe_b64encode(output.getvalue())
return element | python | def prepare_image_transforms(element, image_columns):
"""Replace an images url with its jpeg bytes.
Args:
element: one input row, as a dict
image_columns: list of columns that are image paths
Return:
element, where each image file path has been replaced by a base64 image.
"""
import base64
import cStringIO
from PIL import Image
from tensorflow.python.lib.io import file_io as tf_file_io
from apache_beam.metrics import Metrics
img_error_count = Metrics.counter('main', 'ImgErrorCount')
img_missing_count = Metrics.counter('main', 'ImgMissingCount')
for name in image_columns:
uri = element[name]
if not uri:
img_missing_count.inc()
continue
try:
with tf_file_io.FileIO(uri, 'r') as f:
img = Image.open(f).convert('RGB')
# A variety of different calling libraries throw different exceptions here.
# They all correspond to an unreadable file so we treat them equivalently.
# pylint: disable broad-except
except Exception as e:
logging.exception('Error processing image %s: %s', uri, str(e))
img_error_count.inc()
return
# Convert to desired format and output.
output = cStringIO.StringIO()
img.save(output, 'jpeg')
element[name] = base64.urlsafe_b64encode(output.getvalue())
return element | [
"def",
"prepare_image_transforms",
"(",
"element",
",",
"image_columns",
")",
":",
"import",
"base64",
"import",
"cStringIO",
"from",
"PIL",
"import",
"Image",
"from",
"tensorflow",
".",
"python",
".",
"lib",
".",
"io",
"import",
"file_io",
"as",
"tf_file_io",
"from",
"apache_beam",
".",
"metrics",
"import",
"Metrics",
"img_error_count",
"=",
"Metrics",
".",
"counter",
"(",
"'main'",
",",
"'ImgErrorCount'",
")",
"img_missing_count",
"=",
"Metrics",
".",
"counter",
"(",
"'main'",
",",
"'ImgMissingCount'",
")",
"for",
"name",
"in",
"image_columns",
":",
"uri",
"=",
"element",
"[",
"name",
"]",
"if",
"not",
"uri",
":",
"img_missing_count",
".",
"inc",
"(",
")",
"continue",
"try",
":",
"with",
"tf_file_io",
".",
"FileIO",
"(",
"uri",
",",
"'r'",
")",
"as",
"f",
":",
"img",
"=",
"Image",
".",
"open",
"(",
"f",
")",
".",
"convert",
"(",
"'RGB'",
")",
"# A variety of different calling libraries throw different exceptions here.",
"# They all correspond to an unreadable file so we treat them equivalently.",
"# pylint: disable broad-except",
"except",
"Exception",
"as",
"e",
":",
"logging",
".",
"exception",
"(",
"'Error processing image %s: %s'",
",",
"uri",
",",
"str",
"(",
"e",
")",
")",
"img_error_count",
".",
"inc",
"(",
")",
"return",
"# Convert to desired format and output.",
"output",
"=",
"cStringIO",
".",
"StringIO",
"(",
")",
"img",
".",
"save",
"(",
"output",
",",
"'jpeg'",
")",
"element",
"[",
"name",
"]",
"=",
"base64",
".",
"urlsafe_b64encode",
"(",
"output",
".",
"getvalue",
"(",
")",
")",
"return",
"element"
] | Replace an images url with its jpeg bytes.
Args:
element: one input row, as a dict
image_columns: list of columns that are image paths
Return:
element, where each image file path has been replaced by a base64 image. | [
"Replace",
"an",
"images",
"url",
"with",
"its",
"jpeg",
"bytes",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/ml_workbench/tensorflow/transform.py#L197-L238 |
4,794 | googledatalab/pydatalab | solutionbox/ml_workbench/tensorflow/transform.py | decode_csv | def decode_csv(csv_string, column_names):
"""Parse a csv line into a dict.
Args:
csv_string: a csv string. May contain missing values "a,,c"
column_names: list of column names
Returns:
Dict of {column_name, value_from_csv}. If there are missing values,
value_from_csv will be ''.
"""
import csv
r = next(csv.reader([csv_string]))
if len(r) != len(column_names):
raise ValueError('csv line %s does not have %d columns' % (csv_string, len(column_names)))
return {k: v for k, v in zip(column_names, r)} | python | def decode_csv(csv_string, column_names):
"""Parse a csv line into a dict.
Args:
csv_string: a csv string. May contain missing values "a,,c"
column_names: list of column names
Returns:
Dict of {column_name, value_from_csv}. If there are missing values,
value_from_csv will be ''.
"""
import csv
r = next(csv.reader([csv_string]))
if len(r) != len(column_names):
raise ValueError('csv line %s does not have %d columns' % (csv_string, len(column_names)))
return {k: v for k, v in zip(column_names, r)} | [
"def",
"decode_csv",
"(",
"csv_string",
",",
"column_names",
")",
":",
"import",
"csv",
"r",
"=",
"next",
"(",
"csv",
".",
"reader",
"(",
"[",
"csv_string",
"]",
")",
")",
"if",
"len",
"(",
"r",
")",
"!=",
"len",
"(",
"column_names",
")",
":",
"raise",
"ValueError",
"(",
"'csv line %s does not have %d columns'",
"%",
"(",
"csv_string",
",",
"len",
"(",
"column_names",
")",
")",
")",
"return",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"zip",
"(",
"column_names",
",",
"r",
")",
"}"
] | Parse a csv line into a dict.
Args:
csv_string: a csv string. May contain missing values "a,,c"
column_names: list of column names
Returns:
Dict of {column_name, value_from_csv}. If there are missing values,
value_from_csv will be ''. | [
"Parse",
"a",
"csv",
"line",
"into",
"a",
"dict",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/ml_workbench/tensorflow/transform.py#L355-L370 |
4,795 | googledatalab/pydatalab | solutionbox/ml_workbench/tensorflow/transform.py | encode_csv | def encode_csv(data_dict, column_names):
"""Builds a csv string.
Args:
data_dict: dict of {column_name: 1 value}
column_names: list of column names
Returns:
A csv string version of data_dict
"""
import csv
import six
values = [str(data_dict[x]) for x in column_names]
str_buff = six.StringIO()
writer = csv.writer(str_buff, lineterminator='')
writer.writerow(values)
return str_buff.getvalue() | python | def encode_csv(data_dict, column_names):
"""Builds a csv string.
Args:
data_dict: dict of {column_name: 1 value}
column_names: list of column names
Returns:
A csv string version of data_dict
"""
import csv
import six
values = [str(data_dict[x]) for x in column_names]
str_buff = six.StringIO()
writer = csv.writer(str_buff, lineterminator='')
writer.writerow(values)
return str_buff.getvalue() | [
"def",
"encode_csv",
"(",
"data_dict",
",",
"column_names",
")",
":",
"import",
"csv",
"import",
"six",
"values",
"=",
"[",
"str",
"(",
"data_dict",
"[",
"x",
"]",
")",
"for",
"x",
"in",
"column_names",
"]",
"str_buff",
"=",
"six",
".",
"StringIO",
"(",
")",
"writer",
"=",
"csv",
".",
"writer",
"(",
"str_buff",
",",
"lineterminator",
"=",
"''",
")",
"writer",
".",
"writerow",
"(",
"values",
")",
"return",
"str_buff",
".",
"getvalue",
"(",
")"
] | Builds a csv string.
Args:
data_dict: dict of {column_name: 1 value}
column_names: list of column names
Returns:
A csv string version of data_dict | [
"Builds",
"a",
"csv",
"string",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/ml_workbench/tensorflow/transform.py#L373-L389 |
4,796 | googledatalab/pydatalab | solutionbox/ml_workbench/tensorflow/transform.py | serialize_example | def serialize_example(transformed_json_data, info_dict):
"""Makes a serialized tf.example.
Args:
transformed_json_data: dict of transformed data.
info_dict: output of feature_transforms.get_transfrormed_feature_info()
Returns:
The serialized tf.example version of transformed_json_data.
"""
import six
import tensorflow as tf
def _make_int64_list(x):
return tf.train.Feature(int64_list=tf.train.Int64List(value=x))
def _make_bytes_list(x):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=x))
def _make_float_list(x):
return tf.train.Feature(float_list=tf.train.FloatList(value=x))
if sorted(six.iterkeys(transformed_json_data)) != sorted(six.iterkeys(info_dict)):
raise ValueError('Keys do not match %s, %s' % (list(six.iterkeys(transformed_json_data)),
list(six.iterkeys(info_dict))))
ex_dict = {}
for name, info in six.iteritems(info_dict):
if info['dtype'] == tf.int64:
ex_dict[name] = _make_int64_list(transformed_json_data[name])
elif info['dtype'] == tf.float32:
ex_dict[name] = _make_float_list(transformed_json_data[name])
elif info['dtype'] == tf.string:
ex_dict[name] = _make_bytes_list(transformed_json_data[name])
else:
raise ValueError('Unsupported data type %s' % info['dtype'])
ex = tf.train.Example(features=tf.train.Features(feature=ex_dict))
return ex.SerializeToString() | python | def serialize_example(transformed_json_data, info_dict):
"""Makes a serialized tf.example.
Args:
transformed_json_data: dict of transformed data.
info_dict: output of feature_transforms.get_transfrormed_feature_info()
Returns:
The serialized tf.example version of transformed_json_data.
"""
import six
import tensorflow as tf
def _make_int64_list(x):
return tf.train.Feature(int64_list=tf.train.Int64List(value=x))
def _make_bytes_list(x):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=x))
def _make_float_list(x):
return tf.train.Feature(float_list=tf.train.FloatList(value=x))
if sorted(six.iterkeys(transformed_json_data)) != sorted(six.iterkeys(info_dict)):
raise ValueError('Keys do not match %s, %s' % (list(six.iterkeys(transformed_json_data)),
list(six.iterkeys(info_dict))))
ex_dict = {}
for name, info in six.iteritems(info_dict):
if info['dtype'] == tf.int64:
ex_dict[name] = _make_int64_list(transformed_json_data[name])
elif info['dtype'] == tf.float32:
ex_dict[name] = _make_float_list(transformed_json_data[name])
elif info['dtype'] == tf.string:
ex_dict[name] = _make_bytes_list(transformed_json_data[name])
else:
raise ValueError('Unsupported data type %s' % info['dtype'])
ex = tf.train.Example(features=tf.train.Features(feature=ex_dict))
return ex.SerializeToString() | [
"def",
"serialize_example",
"(",
"transformed_json_data",
",",
"info_dict",
")",
":",
"import",
"six",
"import",
"tensorflow",
"as",
"tf",
"def",
"_make_int64_list",
"(",
"x",
")",
":",
"return",
"tf",
".",
"train",
".",
"Feature",
"(",
"int64_list",
"=",
"tf",
".",
"train",
".",
"Int64List",
"(",
"value",
"=",
"x",
")",
")",
"def",
"_make_bytes_list",
"(",
"x",
")",
":",
"return",
"tf",
".",
"train",
".",
"Feature",
"(",
"bytes_list",
"=",
"tf",
".",
"train",
".",
"BytesList",
"(",
"value",
"=",
"x",
")",
")",
"def",
"_make_float_list",
"(",
"x",
")",
":",
"return",
"tf",
".",
"train",
".",
"Feature",
"(",
"float_list",
"=",
"tf",
".",
"train",
".",
"FloatList",
"(",
"value",
"=",
"x",
")",
")",
"if",
"sorted",
"(",
"six",
".",
"iterkeys",
"(",
"transformed_json_data",
")",
")",
"!=",
"sorted",
"(",
"six",
".",
"iterkeys",
"(",
"info_dict",
")",
")",
":",
"raise",
"ValueError",
"(",
"'Keys do not match %s, %s'",
"%",
"(",
"list",
"(",
"six",
".",
"iterkeys",
"(",
"transformed_json_data",
")",
")",
",",
"list",
"(",
"six",
".",
"iterkeys",
"(",
"info_dict",
")",
")",
")",
")",
"ex_dict",
"=",
"{",
"}",
"for",
"name",
",",
"info",
"in",
"six",
".",
"iteritems",
"(",
"info_dict",
")",
":",
"if",
"info",
"[",
"'dtype'",
"]",
"==",
"tf",
".",
"int64",
":",
"ex_dict",
"[",
"name",
"]",
"=",
"_make_int64_list",
"(",
"transformed_json_data",
"[",
"name",
"]",
")",
"elif",
"info",
"[",
"'dtype'",
"]",
"==",
"tf",
".",
"float32",
":",
"ex_dict",
"[",
"name",
"]",
"=",
"_make_float_list",
"(",
"transformed_json_data",
"[",
"name",
"]",
")",
"elif",
"info",
"[",
"'dtype'",
"]",
"==",
"tf",
".",
"string",
":",
"ex_dict",
"[",
"name",
"]",
"=",
"_make_bytes_list",
"(",
"transformed_json_data",
"[",
"name",
"]",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Unsupported data type %s'",
"%",
"info",
"[",
"'dtype'",
"]",
")",
"ex",
"=",
"tf",
".",
"train",
".",
"Example",
"(",
"features",
"=",
"tf",
".",
"train",
".",
"Features",
"(",
"feature",
"=",
"ex_dict",
")",
")",
"return",
"ex",
".",
"SerializeToString",
"(",
")"
] | Makes a serialized tf.example.
Args:
transformed_json_data: dict of transformed data.
info_dict: output of feature_transforms.get_transfrormed_feature_info()
Returns:
The serialized tf.example version of transformed_json_data. | [
"Makes",
"a",
"serialized",
"tf",
".",
"example",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/ml_workbench/tensorflow/transform.py#L392-L428 |
4,797 | googledatalab/pydatalab | solutionbox/ml_workbench/tensorflow/transform.py | preprocess | def preprocess(pipeline, args):
"""Transfrom csv data into transfromed tf.example files.
Outline:
1) read the input data (as csv or bigquery) into a dict format
2) replace image paths with base64 encoded image files
3) build a csv input string with images paths replaced with base64. This
matches the serving csv that a trained model would expect.
4) batch the csv strings
5) run the transformations
6) write the results to tf.example files and save any errors.
"""
from tensorflow.python.lib.io import file_io
from trainer import feature_transforms
schema = json.loads(file_io.read_file_to_string(
os.path.join(args.analysis, feature_transforms.SCHEMA_FILE)).decode())
features = json.loads(file_io.read_file_to_string(
os.path.join(args.analysis, feature_transforms.FEATURES_FILE)).decode())
stats = json.loads(file_io.read_file_to_string(
os.path.join(args.analysis, feature_transforms.STATS_FILE)).decode())
column_names = [col['name'] for col in schema]
if args.csv:
all_files = []
for i, file_pattern in enumerate(args.csv):
all_files.append(pipeline | ('ReadCSVFile%d' % i) >> beam.io.ReadFromText(file_pattern))
raw_data = (
all_files
| 'MergeCSVFiles' >> beam.Flatten()
| 'ParseCSVData' >> beam.Map(decode_csv, column_names))
else:
columns = ', '.join(column_names)
query = 'SELECT {columns} FROM `{table}`'.format(columns=columns,
table=args.bigquery)
raw_data = (
pipeline
| 'ReadBiqQueryData'
>> beam.io.Read(beam.io.BigQuerySource(query=query,
use_standard_sql=True)))
# Note that prepare_image_transforms does not make embeddings, it justs reads
# the image files and converts them to byte stings. TransformFeaturesDoFn()
# will make the image embeddings.
image_columns = image_transform_columns(features)
clean_csv_data = (
raw_data
| 'PreprocessTransferredLearningTransformations'
>> beam.Map(prepare_image_transforms, image_columns)
| 'BuildCSVString'
>> beam.Map(encode_csv, column_names))
if args.shuffle:
clean_csv_data = clean_csv_data | 'ShuffleData' >> shuffle()
transform_dofn = TransformFeaturesDoFn(args.analysis, features, schema, stats)
(transformed_data, errors) = (
clean_csv_data
| 'Batch Input'
>> beam.ParDo(EmitAsBatchDoFn(args.batch_size))
| 'Run TF Graph on Batches'
>> beam.ParDo(transform_dofn).with_outputs('errors', main='main'))
_ = (transformed_data
| 'SerializeExamples' >> beam.Map(serialize_example, feature_transforms.get_transformed_feature_info(features, schema))
| 'WriteExamples'
>> beam.io.WriteToTFRecord(
os.path.join(args.output, args.prefix),
file_name_suffix='.tfrecord.gz'))
_ = (errors
| 'WriteErrors'
>> beam.io.WriteToText(
os.path.join(args.output, 'errors_' + args.prefix),
file_name_suffix='.txt')) | python | def preprocess(pipeline, args):
"""Transfrom csv data into transfromed tf.example files.
Outline:
1) read the input data (as csv or bigquery) into a dict format
2) replace image paths with base64 encoded image files
3) build a csv input string with images paths replaced with base64. This
matches the serving csv that a trained model would expect.
4) batch the csv strings
5) run the transformations
6) write the results to tf.example files and save any errors.
"""
from tensorflow.python.lib.io import file_io
from trainer import feature_transforms
schema = json.loads(file_io.read_file_to_string(
os.path.join(args.analysis, feature_transforms.SCHEMA_FILE)).decode())
features = json.loads(file_io.read_file_to_string(
os.path.join(args.analysis, feature_transforms.FEATURES_FILE)).decode())
stats = json.loads(file_io.read_file_to_string(
os.path.join(args.analysis, feature_transforms.STATS_FILE)).decode())
column_names = [col['name'] for col in schema]
if args.csv:
all_files = []
for i, file_pattern in enumerate(args.csv):
all_files.append(pipeline | ('ReadCSVFile%d' % i) >> beam.io.ReadFromText(file_pattern))
raw_data = (
all_files
| 'MergeCSVFiles' >> beam.Flatten()
| 'ParseCSVData' >> beam.Map(decode_csv, column_names))
else:
columns = ', '.join(column_names)
query = 'SELECT {columns} FROM `{table}`'.format(columns=columns,
table=args.bigquery)
raw_data = (
pipeline
| 'ReadBiqQueryData'
>> beam.io.Read(beam.io.BigQuerySource(query=query,
use_standard_sql=True)))
# Note that prepare_image_transforms does not make embeddings, it justs reads
# the image files and converts them to byte stings. TransformFeaturesDoFn()
# will make the image embeddings.
image_columns = image_transform_columns(features)
clean_csv_data = (
raw_data
| 'PreprocessTransferredLearningTransformations'
>> beam.Map(prepare_image_transforms, image_columns)
| 'BuildCSVString'
>> beam.Map(encode_csv, column_names))
if args.shuffle:
clean_csv_data = clean_csv_data | 'ShuffleData' >> shuffle()
transform_dofn = TransformFeaturesDoFn(args.analysis, features, schema, stats)
(transformed_data, errors) = (
clean_csv_data
| 'Batch Input'
>> beam.ParDo(EmitAsBatchDoFn(args.batch_size))
| 'Run TF Graph on Batches'
>> beam.ParDo(transform_dofn).with_outputs('errors', main='main'))
_ = (transformed_data
| 'SerializeExamples' >> beam.Map(serialize_example, feature_transforms.get_transformed_feature_info(features, schema))
| 'WriteExamples'
>> beam.io.WriteToTFRecord(
os.path.join(args.output, args.prefix),
file_name_suffix='.tfrecord.gz'))
_ = (errors
| 'WriteErrors'
>> beam.io.WriteToText(
os.path.join(args.output, 'errors_' + args.prefix),
file_name_suffix='.txt')) | [
"def",
"preprocess",
"(",
"pipeline",
",",
"args",
")",
":",
"from",
"tensorflow",
".",
"python",
".",
"lib",
".",
"io",
"import",
"file_io",
"from",
"trainer",
"import",
"feature_transforms",
"schema",
"=",
"json",
".",
"loads",
"(",
"file_io",
".",
"read_file_to_string",
"(",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"analysis",
",",
"feature_transforms",
".",
"SCHEMA_FILE",
")",
")",
".",
"decode",
"(",
")",
")",
"features",
"=",
"json",
".",
"loads",
"(",
"file_io",
".",
"read_file_to_string",
"(",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"analysis",
",",
"feature_transforms",
".",
"FEATURES_FILE",
")",
")",
".",
"decode",
"(",
")",
")",
"stats",
"=",
"json",
".",
"loads",
"(",
"file_io",
".",
"read_file_to_string",
"(",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"analysis",
",",
"feature_transforms",
".",
"STATS_FILE",
")",
")",
".",
"decode",
"(",
")",
")",
"column_names",
"=",
"[",
"col",
"[",
"'name'",
"]",
"for",
"col",
"in",
"schema",
"]",
"if",
"args",
".",
"csv",
":",
"all_files",
"=",
"[",
"]",
"for",
"i",
",",
"file_pattern",
"in",
"enumerate",
"(",
"args",
".",
"csv",
")",
":",
"all_files",
".",
"append",
"(",
"pipeline",
"|",
"(",
"'ReadCSVFile%d'",
"%",
"i",
")",
">>",
"beam",
".",
"io",
".",
"ReadFromText",
"(",
"file_pattern",
")",
")",
"raw_data",
"=",
"(",
"all_files",
"|",
"'MergeCSVFiles'",
">>",
"beam",
".",
"Flatten",
"(",
")",
"|",
"'ParseCSVData'",
">>",
"beam",
".",
"Map",
"(",
"decode_csv",
",",
"column_names",
")",
")",
"else",
":",
"columns",
"=",
"', '",
".",
"join",
"(",
"column_names",
")",
"query",
"=",
"'SELECT {columns} FROM `{table}`'",
".",
"format",
"(",
"columns",
"=",
"columns",
",",
"table",
"=",
"args",
".",
"bigquery",
")",
"raw_data",
"=",
"(",
"pipeline",
"|",
"'ReadBiqQueryData'",
">>",
"beam",
".",
"io",
".",
"Read",
"(",
"beam",
".",
"io",
".",
"BigQuerySource",
"(",
"query",
"=",
"query",
",",
"use_standard_sql",
"=",
"True",
")",
")",
")",
"# Note that prepare_image_transforms does not make embeddings, it justs reads",
"# the image files and converts them to byte stings. TransformFeaturesDoFn()",
"# will make the image embeddings.",
"image_columns",
"=",
"image_transform_columns",
"(",
"features",
")",
"clean_csv_data",
"=",
"(",
"raw_data",
"|",
"'PreprocessTransferredLearningTransformations'",
">>",
"beam",
".",
"Map",
"(",
"prepare_image_transforms",
",",
"image_columns",
")",
"|",
"'BuildCSVString'",
">>",
"beam",
".",
"Map",
"(",
"encode_csv",
",",
"column_names",
")",
")",
"if",
"args",
".",
"shuffle",
":",
"clean_csv_data",
"=",
"clean_csv_data",
"|",
"'ShuffleData'",
">>",
"shuffle",
"(",
")",
"transform_dofn",
"=",
"TransformFeaturesDoFn",
"(",
"args",
".",
"analysis",
",",
"features",
",",
"schema",
",",
"stats",
")",
"(",
"transformed_data",
",",
"errors",
")",
"=",
"(",
"clean_csv_data",
"|",
"'Batch Input'",
">>",
"beam",
".",
"ParDo",
"(",
"EmitAsBatchDoFn",
"(",
"args",
".",
"batch_size",
")",
")",
"|",
"'Run TF Graph on Batches'",
">>",
"beam",
".",
"ParDo",
"(",
"transform_dofn",
")",
".",
"with_outputs",
"(",
"'errors'",
",",
"main",
"=",
"'main'",
")",
")",
"_",
"=",
"(",
"transformed_data",
"|",
"'SerializeExamples'",
">>",
"beam",
".",
"Map",
"(",
"serialize_example",
",",
"feature_transforms",
".",
"get_transformed_feature_info",
"(",
"features",
",",
"schema",
")",
")",
"|",
"'WriteExamples'",
">>",
"beam",
".",
"io",
".",
"WriteToTFRecord",
"(",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"output",
",",
"args",
".",
"prefix",
")",
",",
"file_name_suffix",
"=",
"'.tfrecord.gz'",
")",
")",
"_",
"=",
"(",
"errors",
"|",
"'WriteErrors'",
">>",
"beam",
".",
"io",
".",
"WriteToText",
"(",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"output",
",",
"'errors_'",
"+",
"args",
".",
"prefix",
")",
",",
"file_name_suffix",
"=",
"'.txt'",
")",
")"
] | Transfrom csv data into transfromed tf.example files.
Outline:
1) read the input data (as csv or bigquery) into a dict format
2) replace image paths with base64 encoded image files
3) build a csv input string with images paths replaced with base64. This
matches the serving csv that a trained model would expect.
4) batch the csv strings
5) run the transformations
6) write the results to tf.example files and save any errors. | [
"Transfrom",
"csv",
"data",
"into",
"transfromed",
"tf",
".",
"example",
"files",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/ml_workbench/tensorflow/transform.py#L431-L506 |
4,798 | googledatalab/pydatalab | solutionbox/ml_workbench/tensorflow/transform.py | main | def main(argv=None):
"""Run Preprocessing as a Dataflow."""
args = parse_arguments(sys.argv if argv is None else argv)
temp_dir = os.path.join(args.output, 'tmp')
if args.cloud:
pipeline_name = 'DataflowRunner'
else:
pipeline_name = 'DirectRunner'
# Suppress TF warnings.
os.environ['TF_CPP_MIN_LOG_LEVEL']='3'
options = {
'job_name': args.job_name,
'temp_location': temp_dir,
'project': args.project_id,
'setup_file':
os.path.abspath(os.path.join(
os.path.dirname(__file__),
'setup.py')),
}
if args.num_workers:
options['num_workers'] = args.num_workers
if args.worker_machine_type:
options['worker_machine_type'] = args.worker_machine_type
pipeline_options = beam.pipeline.PipelineOptions(flags=[], **options)
p = beam.Pipeline(pipeline_name, options=pipeline_options)
preprocess(pipeline=p, args=args)
pipeline_result = p.run()
if not args.async:
pipeline_result.wait_until_finish()
if args.async and args.cloud:
print('View job at https://console.developers.google.com/dataflow/job/%s?project=%s' %
(pipeline_result.job_id(), args.project_id)) | python | def main(argv=None):
"""Run Preprocessing as a Dataflow."""
args = parse_arguments(sys.argv if argv is None else argv)
temp_dir = os.path.join(args.output, 'tmp')
if args.cloud:
pipeline_name = 'DataflowRunner'
else:
pipeline_name = 'DirectRunner'
# Suppress TF warnings.
os.environ['TF_CPP_MIN_LOG_LEVEL']='3'
options = {
'job_name': args.job_name,
'temp_location': temp_dir,
'project': args.project_id,
'setup_file':
os.path.abspath(os.path.join(
os.path.dirname(__file__),
'setup.py')),
}
if args.num_workers:
options['num_workers'] = args.num_workers
if args.worker_machine_type:
options['worker_machine_type'] = args.worker_machine_type
pipeline_options = beam.pipeline.PipelineOptions(flags=[], **options)
p = beam.Pipeline(pipeline_name, options=pipeline_options)
preprocess(pipeline=p, args=args)
pipeline_result = p.run()
if not args.async:
pipeline_result.wait_until_finish()
if args.async and args.cloud:
print('View job at https://console.developers.google.com/dataflow/job/%s?project=%s' %
(pipeline_result.job_id(), args.project_id)) | [
"def",
"main",
"(",
"argv",
"=",
"None",
")",
":",
"args",
"=",
"parse_arguments",
"(",
"sys",
".",
"argv",
"if",
"argv",
"is",
"None",
"else",
"argv",
")",
"temp_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"output",
",",
"'tmp'",
")",
"if",
"args",
".",
"cloud",
":",
"pipeline_name",
"=",
"'DataflowRunner'",
"else",
":",
"pipeline_name",
"=",
"'DirectRunner'",
"# Suppress TF warnings.",
"os",
".",
"environ",
"[",
"'TF_CPP_MIN_LOG_LEVEL'",
"]",
"=",
"'3'",
"options",
"=",
"{",
"'job_name'",
":",
"args",
".",
"job_name",
",",
"'temp_location'",
":",
"temp_dir",
",",
"'project'",
":",
"args",
".",
"project_id",
",",
"'setup_file'",
":",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
",",
"'setup.py'",
")",
")",
",",
"}",
"if",
"args",
".",
"num_workers",
":",
"options",
"[",
"'num_workers'",
"]",
"=",
"args",
".",
"num_workers",
"if",
"args",
".",
"worker_machine_type",
":",
"options",
"[",
"'worker_machine_type'",
"]",
"=",
"args",
".",
"worker_machine_type",
"pipeline_options",
"=",
"beam",
".",
"pipeline",
".",
"PipelineOptions",
"(",
"flags",
"=",
"[",
"]",
",",
"*",
"*",
"options",
")",
"p",
"=",
"beam",
".",
"Pipeline",
"(",
"pipeline_name",
",",
"options",
"=",
"pipeline_options",
")",
"preprocess",
"(",
"pipeline",
"=",
"p",
",",
"args",
"=",
"args",
")",
"pipeline_result",
"=",
"p",
".",
"run",
"(",
")",
"if",
"not",
"args",
".",
"async",
":",
"pipeline_result",
".",
"wait_until_finish",
"(",
")",
"if",
"args",
".",
"async",
"and",
"args",
".",
"cloud",
":",
"print",
"(",
"'View job at https://console.developers.google.com/dataflow/job/%s?project=%s'",
"%",
"(",
"pipeline_result",
".",
"job_id",
"(",
")",
",",
"args",
".",
"project_id",
")",
")"
] | Run Preprocessing as a Dataflow. | [
"Run",
"Preprocessing",
"as",
"a",
"Dataflow",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/ml_workbench/tensorflow/transform.py#L509-L545 |
4,799 | googledatalab/pydatalab | solutionbox/ml_workbench/tensorflow/transform.py | TransformFeaturesDoFn.start_bundle | def start_bundle(self, element=None):
"""Build the transfromation graph once."""
import tensorflow as tf
from trainer import feature_transforms
g = tf.Graph()
session = tf.Session(graph=g)
# Build the transformation graph
with g.as_default():
transformed_features, _, placeholders = (
feature_transforms.build_csv_serving_tensors_for_transform_step(
analysis_path=self._analysis_output_dir,
features=self._features,
schema=self._schema,
stats=self._stats,
keep_target=True))
session.run(tf.tables_initializer())
self._session = session
self._transformed_features = transformed_features
self._input_placeholder_tensor = placeholders['csv_example'] | python | def start_bundle(self, element=None):
"""Build the transfromation graph once."""
import tensorflow as tf
from trainer import feature_transforms
g = tf.Graph()
session = tf.Session(graph=g)
# Build the transformation graph
with g.as_default():
transformed_features, _, placeholders = (
feature_transforms.build_csv_serving_tensors_for_transform_step(
analysis_path=self._analysis_output_dir,
features=self._features,
schema=self._schema,
stats=self._stats,
keep_target=True))
session.run(tf.tables_initializer())
self._session = session
self._transformed_features = transformed_features
self._input_placeholder_tensor = placeholders['csv_example'] | [
"def",
"start_bundle",
"(",
"self",
",",
"element",
"=",
"None",
")",
":",
"import",
"tensorflow",
"as",
"tf",
"from",
"trainer",
"import",
"feature_transforms",
"g",
"=",
"tf",
".",
"Graph",
"(",
")",
"session",
"=",
"tf",
".",
"Session",
"(",
"graph",
"=",
"g",
")",
"# Build the transformation graph",
"with",
"g",
".",
"as_default",
"(",
")",
":",
"transformed_features",
",",
"_",
",",
"placeholders",
"=",
"(",
"feature_transforms",
".",
"build_csv_serving_tensors_for_transform_step",
"(",
"analysis_path",
"=",
"self",
".",
"_analysis_output_dir",
",",
"features",
"=",
"self",
".",
"_features",
",",
"schema",
"=",
"self",
".",
"_schema",
",",
"stats",
"=",
"self",
".",
"_stats",
",",
"keep_target",
"=",
"True",
")",
")",
"session",
".",
"run",
"(",
"tf",
".",
"tables_initializer",
"(",
")",
")",
"self",
".",
"_session",
"=",
"session",
"self",
".",
"_transformed_features",
"=",
"transformed_features",
"self",
".",
"_input_placeholder_tensor",
"=",
"placeholders",
"[",
"'csv_example'",
"]"
] | Build the transfromation graph once. | [
"Build",
"the",
"transfromation",
"graph",
"once",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/ml_workbench/tensorflow/transform.py#L278-L299 |
Subsets and Splits