Search is not available for this dataset
identifier
stringlengths 1
155
| parameters
stringlengths 2
6.09k
| docstring
stringlengths 11
63.4k
| docstring_summary
stringlengths 0
63.4k
| function
stringlengths 29
99.8k
| function_tokens
sequence | start_point
sequence | end_point
sequence | language
stringclasses 1
value | docstring_language
stringlengths 2
7
| docstring_language_predictions
stringlengths 18
23
| is_langid_reliable
stringclasses 2
values |
---|---|---|---|---|---|---|---|---|---|---|---|
ReconfigStats.__init__ | (self, logger: logging.Logger,
max_incr_between_checks=100, max_time_between_checks=600,
max_config_between_timers=10, max_time_between_timers=120
) |
Initialize this ReconfigStats.
:param max_incr_between_checks: Maximum number of outstanding incrementals before a sanity check
:param max_time_between_checks: Maximum number of seconds between sanity checks
:param max_config_between_timers: Maximum number of configurations before logging timers
:param max_time_between_timers: Maximum number of seconds between logging timers
|
Initialize this ReconfigStats. | def __init__(self, logger: logging.Logger,
max_incr_between_checks=100, max_time_between_checks=600,
max_config_between_timers=10, max_time_between_timers=120
) -> None:
"""
Initialize this ReconfigStats.
:param max_incr_between_checks: Maximum number of outstanding incrementals before a sanity check
:param max_time_between_checks: Maximum number of seconds between sanity checks
:param max_config_between_timers: Maximum number of configurations before logging timers
:param max_time_between_timers: Maximum number of seconds between logging timers
"""
# Save config elements.
self.logger = logger
self.max_incr_between_checks = max_incr_between_checks
self.max_time_between_checks = max_time_between_checks
self.max_config_between_timers = max_config_between_timers
self.max_time_between_timers = max_time_between_timers
# self.reconfigures tracks the last few reconfigures, both for business
# logic around the last reconfigure and for logging.
self.reconfigures: List[Tuple[str, PerfCounter]] = []
# self.counts tracks how many of each kind of reconfiguration have
# happened, for metrics.
self.counts = {
"incremental": 0,
"complete": 0
}
# In many cases, the previous complete reconfigure will have fallen out
# of self.reconfigures, so we remember its timestamp separately.
self.last_complete: Optional[PerfCounter] = None
# Likewise, remember the time of the last sanity check...
self.last_check: Optional[PerfCounter] = None
# ...and the time of the last timer logs.
self.last_timer_log: Optional[PerfCounter] = None
# self.incrementals_outstanding is the number of incrementals since the
# last complete. Once too many incrementals pile up, we do a sanity check.
self.incrementals_outstanding = 0
# self.configs_outstanding is the number of configurations (either kind)
# since we last logged the timers. Once too many configurations pile up,
# we log the timers.
self.configs_outstanding = 0
# self.checks is how many sanity checks we've done. self.errors is how many
# of them failed.
self.checks = 0
self.errors = 0 | [
"def",
"__init__",
"(",
"self",
",",
"logger",
":",
"logging",
".",
"Logger",
",",
"max_incr_between_checks",
"=",
"100",
",",
"max_time_between_checks",
"=",
"600",
",",
"max_config_between_timers",
"=",
"10",
",",
"max_time_between_timers",
"=",
"120",
")",
"->",
"None",
":",
"# Save config elements.",
"self",
".",
"logger",
"=",
"logger",
"self",
".",
"max_incr_between_checks",
"=",
"max_incr_between_checks",
"self",
".",
"max_time_between_checks",
"=",
"max_time_between_checks",
"self",
".",
"max_config_between_timers",
"=",
"max_config_between_timers",
"self",
".",
"max_time_between_timers",
"=",
"max_time_between_timers",
"# self.reconfigures tracks the last few reconfigures, both for business",
"# logic around the last reconfigure and for logging.",
"self",
".",
"reconfigures",
":",
"List",
"[",
"Tuple",
"[",
"str",
",",
"PerfCounter",
"]",
"]",
"=",
"[",
"]",
"# self.counts tracks how many of each kind of reconfiguration have ",
"# happened, for metrics.",
"self",
".",
"counts",
"=",
"{",
"\"incremental\"",
":",
"0",
",",
"\"complete\"",
":",
"0",
"}",
"# In many cases, the previous complete reconfigure will have fallen out",
"# of self.reconfigures, so we remember its timestamp separately.",
"self",
".",
"last_complete",
":",
"Optional",
"[",
"PerfCounter",
"]",
"=",
"None",
"# Likewise, remember the time of the last sanity check...",
"self",
".",
"last_check",
":",
"Optional",
"[",
"PerfCounter",
"]",
"=",
"None",
"# ...and the time of the last timer logs.",
"self",
".",
"last_timer_log",
":",
"Optional",
"[",
"PerfCounter",
"]",
"=",
"None",
"# self.incrementals_outstanding is the number of incrementals since the",
"# last complete. Once too many incrementals pile up, we do a sanity check.",
"self",
".",
"incrementals_outstanding",
"=",
"0",
"# self.configs_outstanding is the number of configurations (either kind)",
"# since we last logged the timers. Once too many configurations pile up,",
"# we log the timers.",
"self",
".",
"configs_outstanding",
"=",
"0",
"# self.checks is how many sanity checks we've done. self.errors is how many",
"# of them failed.",
"self",
".",
"checks",
"=",
"0",
"self",
".",
"errors",
"=",
"0"
] | [
31,
4
] | [
84,
23
] | python | en | ['en', 'error', 'th'] | False |
ReconfigStats.mark | (self, what: str, when: Optional[PerfCounter]=None) |
Mark that a reconfigure has occurred. The 'what' parameter is one of
"complete" for a complete reconfigure, "incremental" for an incremental,
or "diag" to indicate that we're not really reconfiguring, we just generated
the diagnostics so may need to log timers.
:param what: "complete", "incremental", or "diag".
:param when: The time at which this occurred. Can be None, meaning "now".
|
Mark that a reconfigure has occurred. The 'what' parameter is one of
"complete" for a complete reconfigure, "incremental" for an incremental,
or "diag" to indicate that we're not really reconfiguring, we just generated
the diagnostics so may need to log timers. | def mark(self, what: str, when: Optional[PerfCounter]=None) -> None:
"""
Mark that a reconfigure has occurred. The 'what' parameter is one of
"complete" for a complete reconfigure, "incremental" for an incremental,
or "diag" to indicate that we're not really reconfiguring, we just generated
the diagnostics so may need to log timers.
:param what: "complete", "incremental", or "diag".
:param when: The time at which this occurred. Can be None, meaning "now".
"""
if not when:
when = time.perf_counter()
if (what == 'incremental') and not self.last_complete:
# You can't have an incremental without a complete to start.
# If this is the first reconfigure, it's a complete reconfigure.
what = 'complete'
# Should we update all the counters?
update_counters = True
if what == 'complete':
# For a complete reconfigure, we need to clear all the outstanding
# incrementals, and also remember when it happened.
self.incrementals_outstanding = 0
self.last_complete = when
# A complete reconfigure also resets the last check time, because
# we consider the complete reconfigure to be a sanity check, basically.
# Note that it does _not_ reset any timer-logging stuff.
self.last_check = when
self.logger.debug(f"MARK COMPLETE @ {when}")
elif what == 'incremental':
# For an incremental reconfigure, we need to remember that we have
# one more incremental outstanding.
self.incrementals_outstanding += 1
self.logger.debug(f"MARK INCREMENTAL @ {when}")
elif what == 'diag':
# Don't update all the counters for a diagnostic update.
update_counters = False
else:
raise RuntimeError(f"ReconfigStats: unknown reconfigure type {what}")
# If we should update the counters...
if update_counters:
# ...then update the counts and our reconfigures list.
self.counts[what] += 1
self.reconfigures.append((what, when))
if len(self.reconfigures) > 10:
self.reconfigures.pop(0)
# In all cases, update the number of outstanding configurations. This will
# trigger timer logging for diagnostics updates.
self.configs_outstanding += 1 | [
"def",
"mark",
"(",
"self",
",",
"what",
":",
"str",
",",
"when",
":",
"Optional",
"[",
"PerfCounter",
"]",
"=",
"None",
")",
"->",
"None",
":",
"if",
"not",
"when",
":",
"when",
"=",
"time",
".",
"perf_counter",
"(",
")",
"if",
"(",
"what",
"==",
"'incremental'",
")",
"and",
"not",
"self",
".",
"last_complete",
":",
"# You can't have an incremental without a complete to start.",
"# If this is the first reconfigure, it's a complete reconfigure.",
"what",
"=",
"'complete'",
"# Should we update all the counters?",
"update_counters",
"=",
"True",
"if",
"what",
"==",
"'complete'",
":",
"# For a complete reconfigure, we need to clear all the outstanding ",
"# incrementals, and also remember when it happened.",
"self",
".",
"incrementals_outstanding",
"=",
"0",
"self",
".",
"last_complete",
"=",
"when",
"# A complete reconfigure also resets the last check time, because",
"# we consider the complete reconfigure to be a sanity check, basically.",
"# Note that it does _not_ reset any timer-logging stuff.",
"self",
".",
"last_check",
"=",
"when",
"self",
".",
"logger",
".",
"debug",
"(",
"f\"MARK COMPLETE @ {when}\"",
")",
"elif",
"what",
"==",
"'incremental'",
":",
"# For an incremental reconfigure, we need to remember that we have",
"# one more incremental outstanding.",
"self",
".",
"incrementals_outstanding",
"+=",
"1",
"self",
".",
"logger",
".",
"debug",
"(",
"f\"MARK INCREMENTAL @ {when}\"",
")",
"elif",
"what",
"==",
"'diag'",
":",
"# Don't update all the counters for a diagnostic update.",
"update_counters",
"=",
"False",
"else",
":",
"raise",
"RuntimeError",
"(",
"f\"ReconfigStats: unknown reconfigure type {what}\"",
")",
"# If we should update the counters...",
"if",
"update_counters",
":",
"# ...then update the counts and our reconfigures list.",
"self",
".",
"counts",
"[",
"what",
"]",
"+=",
"1",
"self",
".",
"reconfigures",
".",
"append",
"(",
"(",
"what",
",",
"when",
")",
")",
"if",
"len",
"(",
"self",
".",
"reconfigures",
")",
">",
"10",
":",
"self",
".",
"reconfigures",
".",
"pop",
"(",
"0",
")",
"# In all cases, update the number of outstanding configurations. This will",
"# trigger timer logging for diagnostics updates.",
"self",
".",
"configs_outstanding",
"+=",
"1"
] | [
86,
4
] | [
143,
37
] | python | en | ['en', 'error', 'th'] | False |
ReconfigStats.needs_check | (self, when: Optional[PerfCounter]=None) |
Determine if we need to do a complete reconfigure to doublecheck our
incrementals. The logic here is that we need a check every 100 incrementals
or every 10 minutes, whichever comes first.
:param when: Override the effective time of the check. Primarily useful for testing.
:return: True if a check is needed, False if not
|
Determine if we need to do a complete reconfigure to doublecheck our
incrementals. The logic here is that we need a check every 100 incrementals
or every 10 minutes, whichever comes first. | def needs_check(self, when: Optional[PerfCounter]=None) -> bool:
"""
Determine if we need to do a complete reconfigure to doublecheck our
incrementals. The logic here is that we need a check every 100 incrementals
or every 10 minutes, whichever comes first.
:param when: Override the effective time of the check. Primarily useful for testing.
:return: True if a check is needed, False if not
"""
if not when:
when = time.perf_counter()
if len(self.reconfigures) == 0:
# No reconfigures, so no need to check.
# self.logger.debug(f"NEEDS_CHECK @ {when}: no reconfigures, skip")
return False
# Grab information about our last reconfiguration.
what, _ = self.reconfigures[-1]
if what == 'complete':
# Last reconfiguration was a complete reconfiguration, so
# no need to check.
# self.logger.debug(f"NEEDS_CHECK @ {when}: last was complete, skip")
return False
if self.incrementals_outstanding == 0:
# If we have a bunch of incrementals, then we do a check, we can land
# here with no outstanding incrementals, in which case it's pointless to
# do a check.
# self.logger.debug(f"NEEDS_CHECK @ {when}: outstanding 0, skip")
return False
# OK, the last one was an incremental, which implies that we must have some
# outstanding incrementals. Have we hit our maximum between checks?
if self.incrementals_outstanding >= self.max_incr_between_checks:
# Yup, time to check.
# self.logger.debug(f"NEEDS_CHECK @ {when}: outstanding {self.incrementals_outstanding}, check")
return True
# self.logger.debug(f"NEEDS_CHECK @ {when}: outstanding {self.incrementals_outstanding}")
# We're good for outstanding incrementals. How about the max time between checks?
# (We must have a last check time - which may be the time of the last complete
# reconfigure, of course - to go on at this point.)
assert(self.last_check is not None)
delta = when - self.last_check
if delta > self.max_time_between_checks:
# Yup, it's been long enough.
# self.logger.debug(f"NEEDS_CHECK @ {when}: delta {delta}, check")
return True
# self.logger.debug(f"NEEDS_CHECK @ {when}: delta {delta}, skip")
return False | [
"def",
"needs_check",
"(",
"self",
",",
"when",
":",
"Optional",
"[",
"PerfCounter",
"]",
"=",
"None",
")",
"->",
"bool",
":",
"if",
"not",
"when",
":",
"when",
"=",
"time",
".",
"perf_counter",
"(",
")",
"if",
"len",
"(",
"self",
".",
"reconfigures",
")",
"==",
"0",
":",
"# No reconfigures, so no need to check.",
"# self.logger.debug(f\"NEEDS_CHECK @ {when}: no reconfigures, skip\")",
"return",
"False",
"# Grab information about our last reconfiguration.",
"what",
",",
"_",
"=",
"self",
".",
"reconfigures",
"[",
"-",
"1",
"]",
"if",
"what",
"==",
"'complete'",
":",
"# Last reconfiguration was a complete reconfiguration, so",
"# no need to check.",
"# self.logger.debug(f\"NEEDS_CHECK @ {when}: last was complete, skip\")",
"return",
"False",
"if",
"self",
".",
"incrementals_outstanding",
"==",
"0",
":",
"# If we have a bunch of incrementals, then we do a check, we can land",
"# here with no outstanding incrementals, in which case it's pointless to",
"# do a check.",
"# self.logger.debug(f\"NEEDS_CHECK @ {when}: outstanding 0, skip\")",
"return",
"False",
"# OK, the last one was an incremental, which implies that we must have some",
"# outstanding incrementals. Have we hit our maximum between checks?",
"if",
"self",
".",
"incrementals_outstanding",
">=",
"self",
".",
"max_incr_between_checks",
":",
"# Yup, time to check.",
"# self.logger.debug(f\"NEEDS_CHECK @ {when}: outstanding {self.incrementals_outstanding}, check\")",
"return",
"True",
"# self.logger.debug(f\"NEEDS_CHECK @ {when}: outstanding {self.incrementals_outstanding}\")",
"# We're good for outstanding incrementals. How about the max time between checks?",
"# (We must have a last check time - which may be the time of the last complete",
"# reconfigure, of course - to go on at this point.)",
"assert",
"(",
"self",
".",
"last_check",
"is",
"not",
"None",
")",
"delta",
"=",
"when",
"-",
"self",
".",
"last_check",
"if",
"delta",
">",
"self",
".",
"max_time_between_checks",
":",
"# Yup, it's been long enough.",
"# self.logger.debug(f\"NEEDS_CHECK @ {when}: delta {delta}, check\")",
"return",
"True",
"# self.logger.debug(f\"NEEDS_CHECK @ {when}: delta {delta}, skip\")",
"return",
"False"
] | [
145,
4
] | [
201,
20
] | python | en | ['en', 'error', 'th'] | False |
ReconfigStats.needs_timers | (self, when: Optional[PerfCounter]=None) |
Determine if we need to log the timers or not. The logic here is that
we need to log every max_configs_between_timers incrementals or every
or every max_time_between_timers seconds, whichever comes first.
:param when: Override the effective time of the check. Primarily useful for testing.
:return: True if we need to log timers, False if not
|
Determine if we need to log the timers or not. The logic here is that
we need to log every max_configs_between_timers incrementals or every
or every max_time_between_timers seconds, whichever comes first. | def needs_timers(self, when: Optional[PerfCounter]=None) -> bool:
"""
Determine if we need to log the timers or not. The logic here is that
we need to log every max_configs_between_timers incrementals or every
or every max_time_between_timers seconds, whichever comes first.
:param when: Override the effective time of the check. Primarily useful for testing.
:return: True if we need to log timers, False if not
"""
if not when:
when = time.perf_counter()
if len(self.reconfigures) == 0:
# No reconfigures, so no need to check.
# self.logger.debug(f"NEEDS_TIMERS @ {when}: no reconfigures, skip")
return False
# If we have no configurations outstanding, we're done.
if self.configs_outstanding == 0:
# self.logger.debug(f"NEEDS_TIMERS @ {when}: outstanding 0, skip")
return False
# Have we hit our maximum number of outstanding configurations?
if self.configs_outstanding >= self.max_config_between_timers:
# Yup, time to log.
# self.logger.debug(f"NEEDS_TIMERS @ {when}: outstanding {self.configs_outstanding}, check")
return True
# self.logger.debug(f"NEEDS_TIMERS @ {when}: outstanding {self.configs_outstanding}")
# We're good for outstanding incrementals. How about the max time between timers?
# Note that we may _never_ have logged timers before -- if that's the case, use
# the time of our last complete reconfigure, which must always be set, as a
# baseline.
assert(self.last_complete is not None)
baseline = self.last_timer_log or self.last_complete
delta = when - baseline
if delta > self.max_time_between_timers:
# Yup, it's been long enough.
# self.logger.debug(f"NEEDS_TIMERS @ {when}: delta {delta}, check")
return True
# self.logger.debug(f"NEEDS_TIMERS @ {when}: delta {delta}, skip")
return False | [
"def",
"needs_timers",
"(",
"self",
",",
"when",
":",
"Optional",
"[",
"PerfCounter",
"]",
"=",
"None",
")",
"->",
"bool",
":",
"if",
"not",
"when",
":",
"when",
"=",
"time",
".",
"perf_counter",
"(",
")",
"if",
"len",
"(",
"self",
".",
"reconfigures",
")",
"==",
"0",
":",
"# No reconfigures, so no need to check.",
"# self.logger.debug(f\"NEEDS_TIMERS @ {when}: no reconfigures, skip\")",
"return",
"False",
"# If we have no configurations outstanding, we're done.",
"if",
"self",
".",
"configs_outstanding",
"==",
"0",
":",
"# self.logger.debug(f\"NEEDS_TIMERS @ {when}: outstanding 0, skip\")",
"return",
"False",
"# Have we hit our maximum number of outstanding configurations?",
"if",
"self",
".",
"configs_outstanding",
">=",
"self",
".",
"max_config_between_timers",
":",
"# Yup, time to log.",
"# self.logger.debug(f\"NEEDS_TIMERS @ {when}: outstanding {self.configs_outstanding}, check\")",
"return",
"True",
"# self.logger.debug(f\"NEEDS_TIMERS @ {when}: outstanding {self.configs_outstanding}\")",
"# We're good for outstanding incrementals. How about the max time between timers?",
"# Note that we may _never_ have logged timers before -- if that's the case, use ",
"# the time of our last complete reconfigure, which must always be set, as a ",
"# baseline.",
"assert",
"(",
"self",
".",
"last_complete",
"is",
"not",
"None",
")",
"baseline",
"=",
"self",
".",
"last_timer_log",
"or",
"self",
".",
"last_complete",
"delta",
"=",
"when",
"-",
"baseline",
"if",
"delta",
">",
"self",
".",
"max_time_between_timers",
":",
"# Yup, it's been long enough.",
"# self.logger.debug(f\"NEEDS_TIMERS @ {when}: delta {delta}, check\")",
"return",
"True",
"# self.logger.debug(f\"NEEDS_TIMERS @ {when}: delta {delta}, skip\")",
"return",
"False"
] | [
203,
4
] | [
250,
20
] | python | en | ['en', 'error', 'th'] | False |
ReconfigStats.mark_checked | (self, result: bool, when: Optional[PerfCounter]=None) |
Mark that we have done a check, and note the results. This resets our
outstanding incrementals to 0, and also resets our last check time.
:param result: True if the check was good, False if not
:param when: Override the effective time. Primarily useful for testing.
|
Mark that we have done a check, and note the results. This resets our
outstanding incrementals to 0, and also resets our last check time. | def mark_checked(self, result: bool, when: Optional[PerfCounter]=None) -> None:
"""
Mark that we have done a check, and note the results. This resets our
outstanding incrementals to 0, and also resets our last check time.
:param result: True if the check was good, False if not
:param when: Override the effective time. Primarily useful for testing.
"""
self.logger.debug(f"MARK_CHECKED @ {when}: {result}")
self.incrementals_outstanding = 0
self.checks += 1
if not result:
self.errors += 1
self.last_check = when or time.perf_counter() | [
"def",
"mark_checked",
"(",
"self",
",",
"result",
":",
"bool",
",",
"when",
":",
"Optional",
"[",
"PerfCounter",
"]",
"=",
"None",
")",
"->",
"None",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"f\"MARK_CHECKED @ {when}: {result}\"",
")",
"self",
".",
"incrementals_outstanding",
"=",
"0",
"self",
".",
"checks",
"+=",
"1",
"if",
"not",
"result",
":",
"self",
".",
"errors",
"+=",
"1",
"self",
".",
"last_check",
"=",
"when",
"or",
"time",
".",
"perf_counter",
"(",
")"
] | [
252,
4
] | [
269,
53
] | python | en | ['en', 'error', 'th'] | False |
ReconfigStats.mark_timers_logged | (self, when: Optional[PerfCounter]=None) |
Mark that we have logged timers. This resets our outstanding configurations
to 0, and also resets our last timer log time.
:param when: Override the effective time. Primarily useful for testing.
|
Mark that we have logged timers. This resets our outstanding configurations
to 0, and also resets our last timer log time. | def mark_timers_logged(self, when: Optional[PerfCounter]=None) -> None:
"""
Mark that we have logged timers. This resets our outstanding configurations
to 0, and also resets our last timer log time.
:param when: Override the effective time. Primarily useful for testing.
"""
self.logger.debug(f"MARK_TIMERS @ {when}")
self.configs_outstanding = 0
self.last_timer_log = when or time.perf_counter() | [
"def",
"mark_timers_logged",
"(",
"self",
",",
"when",
":",
"Optional",
"[",
"PerfCounter",
"]",
"=",
"None",
")",
"->",
"None",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"f\"MARK_TIMERS @ {when}\"",
")",
"self",
".",
"configs_outstanding",
"=",
"0",
"self",
".",
"last_timer_log",
"=",
"when",
"or",
"time",
".",
"perf_counter",
"(",
")"
] | [
271,
4
] | [
282,
57
] | python | en | ['en', 'error', 'th'] | False |
xarray_values_in | (data, values, data_vars=None) |
Returns a mask for an xarray Dataset or DataArray, with `True` wherever the value is in values.
Parameters
----------
data: xarray.Dataset or xarray.DataArray
The data to check for value matches.
values: list-like
The values to check for.
data_vars: list-like
The names of the data variables to check.
Returns
-------
mask: np.ndarray
A NumPy array shaped like ``data``. The mask can be used to mask ``data``.
That is, ``data.where(mask)`` is an intended use.
|
Returns a mask for an xarray Dataset or DataArray, with `True` wherever the value is in values. | def xarray_values_in(data, values, data_vars=None):
"""
Returns a mask for an xarray Dataset or DataArray, with `True` wherever the value is in values.
Parameters
----------
data: xarray.Dataset or xarray.DataArray
The data to check for value matches.
values: list-like
The values to check for.
data_vars: list-like
The names of the data variables to check.
Returns
-------
mask: np.ndarray
A NumPy array shaped like ``data``. The mask can be used to mask ``data``.
That is, ``data.where(mask)`` is an intended use.
"""
data_vars_to_check = data_vars if data_vars is not None else list(data.data_vars.keys())
if isinstance(data, xr.Dataset):
mask = np.full_like(data[data_vars_to_check[0]].values, False, dtype=np.bool)
for data_arr in data[data_vars_to_check].values():
for value in values:
mask = mask | (data_arr.values == value)
elif isinstance(data, xr.DataArray):
mask = np.full_like(data, False, dtype=np.bool)
for value in values:
mask = mask | (data.values == value)
return mask | [
"def",
"xarray_values_in",
"(",
"data",
",",
"values",
",",
"data_vars",
"=",
"None",
")",
":",
"data_vars_to_check",
"=",
"data_vars",
"if",
"data_vars",
"is",
"not",
"None",
"else",
"list",
"(",
"data",
".",
"data_vars",
".",
"keys",
"(",
")",
")",
"if",
"isinstance",
"(",
"data",
",",
"xr",
".",
"Dataset",
")",
":",
"mask",
"=",
"np",
".",
"full_like",
"(",
"data",
"[",
"data_vars_to_check",
"[",
"0",
"]",
"]",
".",
"values",
",",
"False",
",",
"dtype",
"=",
"np",
".",
"bool",
")",
"for",
"data_arr",
"in",
"data",
"[",
"data_vars_to_check",
"]",
".",
"values",
"(",
")",
":",
"for",
"value",
"in",
"values",
":",
"mask",
"=",
"mask",
"|",
"(",
"data_arr",
".",
"values",
"==",
"value",
")",
"elif",
"isinstance",
"(",
"data",
",",
"xr",
".",
"DataArray",
")",
":",
"mask",
"=",
"np",
".",
"full_like",
"(",
"data",
",",
"False",
",",
"dtype",
"=",
"np",
".",
"bool",
")",
"for",
"value",
"in",
"values",
":",
"mask",
"=",
"mask",
"|",
"(",
"data",
".",
"values",
"==",
"value",
")",
"return",
"mask"
] | [
8,
0
] | [
37,
15
] | python | en | ['en', 'error', 'th'] | False |
create_2D_mosaic_clean_mask | (clean_mask) |
The clean mask of a mosaic should be determined by the compositing function (e.g. mean
mosaic, median mosaic, etc.). This is simply supposed to be a decent approximation of a
clean mask for a mosaic that has no time dimension.
Parameters
----------
clean_mask: np.ndarray
The 3D clean mask used to construct the mosaic.
Returns
-------
mosaic_clean_mask: np.ndarray
A 2D clean mask for a mosaic.
|
The clean mask of a mosaic should be determined by the compositing function (e.g. mean
mosaic, median mosaic, etc.). This is simply supposed to be a decent approximation of a
clean mask for a mosaic that has no time dimension.
Parameters
----------
clean_mask: np.ndarray
The 3D clean mask used to construct the mosaic.
Returns
-------
mosaic_clean_mask: np.ndarray
A 2D clean mask for a mosaic.
| def create_2D_mosaic_clean_mask(clean_mask):
"""
The clean mask of a mosaic should be determined by the compositing function (e.g. mean
mosaic, median mosaic, etc.). This is simply supposed to be a decent approximation of a
clean mask for a mosaic that has no time dimension.
Parameters
----------
clean_mask: np.ndarray
The 3D clean mask used to construct the mosaic.
Returns
-------
mosaic_clean_mask: np.ndarray
A 2D clean mask for a mosaic.
"""
mosaic_clean_mask = clean_mask[0]
# Take the logical OR of clean masks through time.
for i in range(1, clean_mask.shape[0]):
mosaic_clean_mask = np.logical_or(mosaic_clean_mask, clean_mask[i])
return mosaic_clean_mask | [
"def",
"create_2D_mosaic_clean_mask",
"(",
"clean_mask",
")",
":",
"mosaic_clean_mask",
"=",
"clean_mask",
"[",
"0",
"]",
"# Take the logical OR of clean masks through time.",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"clean_mask",
".",
"shape",
"[",
"0",
"]",
")",
":",
"mosaic_clean_mask",
"=",
"np",
".",
"logical_or",
"(",
"mosaic_clean_mask",
",",
"clean_mask",
"[",
"i",
"]",
")",
"return",
"mosaic_clean_mask"
] | [
43,
0
] | [
63,
28
] | python | en | ['en', 'error', 'th'] | False |
create_circular_mask | (h, w, center=None, radius=None) |
Creates a NumPy array mask with a circle.
Credit goes to https://stackoverflow.com/a/44874588/5449970.
Parameters
----------
h, w: int
The height and width of the data to mask, respectively.
center: 2-tuple of int
The center of the circle, specified as a 2-tuple of the x and y indices.
By default, the center will be the center of the image.
radius: numeric
The radius of the circle.
Be default, the radius will be the smallest distance between
the center and the image walls.
Returns
-------
mask: np.ndarray
A boolean 2D NumPy array.
|
Creates a NumPy array mask with a circle.
Credit goes to https://stackoverflow.com/a/44874588/5449970. | def create_circular_mask(h, w, center=None, radius=None):
"""
Creates a NumPy array mask with a circle.
Credit goes to https://stackoverflow.com/a/44874588/5449970.
Parameters
----------
h, w: int
The height and width of the data to mask, respectively.
center: 2-tuple of int
The center of the circle, specified as a 2-tuple of the x and y indices.
By default, the center will be the center of the image.
radius: numeric
The radius of the circle.
Be default, the radius will be the smallest distance between
the center and the image walls.
Returns
-------
mask: np.ndarray
A boolean 2D NumPy array.
"""
if center is None: # use the middle of the image
center = [int(w/2), int(h/2)]
if radius is None: # use the smallest distance between the center and image walls
radius = min(center[0], center[1], w-center[0], h-center[1])
Y, X = np.ogrid[:h, :w]
dist_from_center = np.sqrt((X - center[0])**2 + (Y-center[1])**2)
mask = dist_from_center <= radius
return mask | [
"def",
"create_circular_mask",
"(",
"h",
",",
"w",
",",
"center",
"=",
"None",
",",
"radius",
"=",
"None",
")",
":",
"if",
"center",
"is",
"None",
":",
"# use the middle of the image",
"center",
"=",
"[",
"int",
"(",
"w",
"/",
"2",
")",
",",
"int",
"(",
"h",
"/",
"2",
")",
"]",
"if",
"radius",
"is",
"None",
":",
"# use the smallest distance between the center and image walls",
"radius",
"=",
"min",
"(",
"center",
"[",
"0",
"]",
",",
"center",
"[",
"1",
"]",
",",
"w",
"-",
"center",
"[",
"0",
"]",
",",
"h",
"-",
"center",
"[",
"1",
"]",
")",
"Y",
",",
"X",
"=",
"np",
".",
"ogrid",
"[",
":",
"h",
",",
":",
"w",
"]",
"dist_from_center",
"=",
"np",
".",
"sqrt",
"(",
"(",
"X",
"-",
"center",
"[",
"0",
"]",
")",
"**",
"2",
"+",
"(",
"Y",
"-",
"center",
"[",
"1",
"]",
")",
"**",
"2",
")",
"mask",
"=",
"dist_from_center",
"<=",
"radius",
"return",
"mask"
] | [
65,
0
] | [
96,
15
] | python | en | ['en', 'error', 'th'] | False |
landsat_clean_mask_invalid | (dataset) |
Masks out invalid data according to the LANDSAT
surface reflectance specifications. See this document:
https://landsat.usgs.gov/sites/default/files/documents/ledaps_product_guide.pdf pages 19-20.
Parameters
----------
dataset: xarray.Dataset
An `xarray.Dataset` containing bands such as 'red', 'green', or 'blue'.
Returns
-------
invalid_mask: xarray.DataArray
An `xarray.DataArray` with the same number and order of coordinates as in `dataset`.
The `True` values specify what pixels are valid.
|
Masks out invalid data according to the LANDSAT
surface reflectance specifications. See this document:
https://landsat.usgs.gov/sites/default/files/documents/ledaps_product_guide.pdf pages 19-20. | def landsat_clean_mask_invalid(dataset):
"""
Masks out invalid data according to the LANDSAT
surface reflectance specifications. See this document:
https://landsat.usgs.gov/sites/default/files/documents/ledaps_product_guide.pdf pages 19-20.
Parameters
----------
dataset: xarray.Dataset
An `xarray.Dataset` containing bands such as 'red', 'green', or 'blue'.
Returns
-------
invalid_mask: xarray.DataArray
An `xarray.DataArray` with the same number and order of coordinates as in `dataset`.
The `True` values specify what pixels are valid.
"""
invalid_mask = None
data_arr_names = [arr_name for arr_name in list(dataset.data_vars)
if arr_name not in ['pixel_qa', 'radsat_qa', 'cloud_qa']]
# Only keep data where all bands are in the valid range.
for i, data_arr_name in enumerate(data_arr_names):
invalid_mask_arr = (0 < dataset[data_arr_name]) & (dataset[data_arr_name] < 10000)
invalid_mask = invalid_mask_arr if i == 0 else (invalid_mask & invalid_mask_arr)
return invalid_mask | [
"def",
"landsat_clean_mask_invalid",
"(",
"dataset",
")",
":",
"invalid_mask",
"=",
"None",
"data_arr_names",
"=",
"[",
"arr_name",
"for",
"arr_name",
"in",
"list",
"(",
"dataset",
".",
"data_vars",
")",
"if",
"arr_name",
"not",
"in",
"[",
"'pixel_qa'",
",",
"'radsat_qa'",
",",
"'cloud_qa'",
"]",
"]",
"# Only keep data where all bands are in the valid range.",
"for",
"i",
",",
"data_arr_name",
"in",
"enumerate",
"(",
"data_arr_names",
")",
":",
"invalid_mask_arr",
"=",
"(",
"0",
"<",
"dataset",
"[",
"data_arr_name",
"]",
")",
"&",
"(",
"dataset",
"[",
"data_arr_name",
"]",
"<",
"10000",
")",
"invalid_mask",
"=",
"invalid_mask_arr",
"if",
"i",
"==",
"0",
"else",
"(",
"invalid_mask",
"&",
"invalid_mask_arr",
")",
"return",
"invalid_mask"
] | [
102,
0
] | [
126,
23
] | python | en | ['en', 'error', 'th'] | False |
landsat_qa_clean_mask | (dataset, platform, cover_types=['clear', 'water']) |
Returns a clean_mask for `dataset` that masks out various types of terrain cover using the
Landsat pixel_qa band. Note that Landsat masks specify what to keep, not what to remove.
This means that using `cover_types=['clear', 'water']` should keep only clear land and water.
See "pixel_qa band" here: https://landsat.usgs.gov/landsat-surface-reflectance-quality-assessment
and Section 7 here: https://landsat.usgs.gov/sites/default/files/documents/lasrc_product_guide.pdf.
Parameters
----------
dataset: xarray.Dataset
An xarray (usually produced by `datacube.load()`) that contains a `pixel_qa` data
variable.
platform: str
A string denoting the platform to be used. Can be "LANDSAT_5", "LANDSAT_7", or
"LANDSAT_8".
cover_types: list
A list of the cover types to include. Adding a cover type allows it to remain in the masked data.
Cover types for all Landsat platforms include:
['fill', 'clear', 'water', 'shadow', 'snow', 'cloud', 'low_conf_cl', 'med_conf_cl', 'high_conf_cl'].
'fill' removes "no_data" values, which indicates an absense of data. This value is -9999 for Landsat platforms.
Generally, don't use 'fill'.
'clear' allows only clear terrain. 'water' allows only water. 'shadow' allows only cloud shadows.
'snow' allows only snow. 'cloud' allows only clouds, but note that it often only selects cloud boundaries.
'low_conf_cl', 'med_conf_cl', and 'high_conf_cl' denote low, medium, and high confidence in cloud coverage.
'low_conf_cl' is useful on its own for only removing clouds, however, 'clear' is usually better suited for this.
'med_conf_cl' is useful in combination with 'low_conf_cl' to allow slightly heavier cloud coverage.
Note that 'med_conf_cl' and 'cloud' are very similar.
'high_conf_cl' is useful in combination with both 'low_conf_cl' and 'med_conf_cl'.
For Landsat 8, there are more cover types: ['low_conf_cir', 'high_conf_cir', 'terrain_occ'].
'low_conf_cir' and 'high_conf_cir' denote low and high confidence in cirrus clouds.
'terrain_occ' allows only occluded terrain.
Returns
-------
clean_mask: xarray.DataArray
An xarray DataArray with the same number and order of coordinates as in `dataset`.
|
Returns a clean_mask for `dataset` that masks out various types of terrain cover using the
Landsat pixel_qa band. Note that Landsat masks specify what to keep, not what to remove.
This means that using `cover_types=['clear', 'water']` should keep only clear land and water. | def landsat_qa_clean_mask(dataset, platform, cover_types=['clear', 'water']):
"""
Returns a clean_mask for `dataset` that masks out various types of terrain cover using the
Landsat pixel_qa band. Note that Landsat masks specify what to keep, not what to remove.
This means that using `cover_types=['clear', 'water']` should keep only clear land and water.
See "pixel_qa band" here: https://landsat.usgs.gov/landsat-surface-reflectance-quality-assessment
and Section 7 here: https://landsat.usgs.gov/sites/default/files/documents/lasrc_product_guide.pdf.
Parameters
----------
dataset: xarray.Dataset
An xarray (usually produced by `datacube.load()`) that contains a `pixel_qa` data
variable.
platform: str
A string denoting the platform to be used. Can be "LANDSAT_5", "LANDSAT_7", or
"LANDSAT_8".
cover_types: list
A list of the cover types to include. Adding a cover type allows it to remain in the masked data.
Cover types for all Landsat platforms include:
['fill', 'clear', 'water', 'shadow', 'snow', 'cloud', 'low_conf_cl', 'med_conf_cl', 'high_conf_cl'].
'fill' removes "no_data" values, which indicates an absense of data. This value is -9999 for Landsat platforms.
Generally, don't use 'fill'.
'clear' allows only clear terrain. 'water' allows only water. 'shadow' allows only cloud shadows.
'snow' allows only snow. 'cloud' allows only clouds, but note that it often only selects cloud boundaries.
'low_conf_cl', 'med_conf_cl', and 'high_conf_cl' denote low, medium, and high confidence in cloud coverage.
'low_conf_cl' is useful on its own for only removing clouds, however, 'clear' is usually better suited for this.
'med_conf_cl' is useful in combination with 'low_conf_cl' to allow slightly heavier cloud coverage.
Note that 'med_conf_cl' and 'cloud' are very similar.
'high_conf_cl' is useful in combination with both 'low_conf_cl' and 'med_conf_cl'.
For Landsat 8, there are more cover types: ['low_conf_cir', 'high_conf_cir', 'terrain_occ'].
'low_conf_cir' and 'high_conf_cir' denote low and high confidence in cirrus clouds.
'terrain_occ' allows only occluded terrain.
Returns
-------
clean_mask: xarray.DataArray
An xarray DataArray with the same number and order of coordinates as in `dataset`.
"""
processing_options = {
"LANDSAT_5": ls5_unpack_qa,
"LANDSAT_7": ls7_unpack_qa,
"LANDSAT_8": ls8_unpack_qa
}
clean_mask = None
# Keep all specified cover types (e.g. 'clear', 'water'), so logically or the separate masks.
for i, cover_type in enumerate(cover_types):
cover_type_clean_mask = processing_options[platform](dataset.pixel_qa, cover_type)
clean_mask = cover_type_clean_mask if i == 0 else xr_or(clean_mask, cover_type_clean_mask)
return clean_mask | [
"def",
"landsat_qa_clean_mask",
"(",
"dataset",
",",
"platform",
",",
"cover_types",
"=",
"[",
"'clear'",
",",
"'water'",
"]",
")",
":",
"processing_options",
"=",
"{",
"\"LANDSAT_5\"",
":",
"ls5_unpack_qa",
",",
"\"LANDSAT_7\"",
":",
"ls7_unpack_qa",
",",
"\"LANDSAT_8\"",
":",
"ls8_unpack_qa",
"}",
"clean_mask",
"=",
"None",
"# Keep all specified cover types (e.g. 'clear', 'water'), so logically or the separate masks.",
"for",
"i",
",",
"cover_type",
"in",
"enumerate",
"(",
"cover_types",
")",
":",
"cover_type_clean_mask",
"=",
"processing_options",
"[",
"platform",
"]",
"(",
"dataset",
".",
"pixel_qa",
",",
"cover_type",
")",
"clean_mask",
"=",
"cover_type_clean_mask",
"if",
"i",
"==",
"0",
"else",
"xr_or",
"(",
"clean_mask",
",",
"cover_type_clean_mask",
")",
"return",
"clean_mask"
] | [
129,
0
] | [
181,
21
] | python | en | ['en', 'error', 'th'] | False |
sentinel2_fmask_clean_mask | (dataset, cover_types=['valid', 'water']) |
Returns a clean_mask for `dataset` that masks out various types of terrain cover using the
Sentinel 2 fmask band. Note that clean masks specify what to keep, not what to remove.
This means that using `cover_types=['valid', 'water']` should keep only clear land and water.
See "Classification Mask Generation" here:
https://earth.esa.int/web/sentinel/technical-guides/sentinel-2-msi/level-2a/algorithm
Parameters
----------
dataset: xarray.Dataset
An xarray (usually produced by `datacube.load()`) that contains a `fmask` data
variable.
cover_types: list
A list of the cover types to include. Adding a cover type allows it to remain in the masked data.
Cover types for all Landsat platforms include:
['null', 'valid', 'cloud', 'cloud_shadow', 'snow', 'water'].
'null' removes null values, which indicates an absense of data.
'valid' allows clear views that are not cloud shadow, snow, or water.
'cloud' allows clouds.
'cloud_shadow' allows only cloud shadows.
'snow' allows only snow.
'water' allows only water.
Here is a table of fmask values and their significances:
Value Description
0 Null
1 Valid
2 Cloud
3 Cloud shadow
4 Snow
5 water
Returns
-------
clean_mask: xarray.DataArray of boolean
A boolean `xarray.DataArray` denoting which elements in `dataset` to keep -
with the same number and order of coordinates as in `dataset`.
|
Returns a clean_mask for `dataset` that masks out various types of terrain cover using the
Sentinel 2 fmask band. Note that clean masks specify what to keep, not what to remove.
This means that using `cover_types=['valid', 'water']` should keep only clear land and water. | def sentinel2_fmask_clean_mask(dataset, cover_types=['valid', 'water']):
"""
Returns a clean_mask for `dataset` that masks out various types of terrain cover using the
Sentinel 2 fmask band. Note that clean masks specify what to keep, not what to remove.
This means that using `cover_types=['valid', 'water']` should keep only clear land and water.
See "Classification Mask Generation" here:
https://earth.esa.int/web/sentinel/technical-guides/sentinel-2-msi/level-2a/algorithm
Parameters
----------
dataset: xarray.Dataset
An xarray (usually produced by `datacube.load()`) that contains a `fmask` data
variable.
cover_types: list
A list of the cover types to include. Adding a cover type allows it to remain in the masked data.
Cover types for all Landsat platforms include:
['null', 'valid', 'cloud', 'cloud_shadow', 'snow', 'water'].
'null' removes null values, which indicates an absense of data.
'valid' allows clear views that are not cloud shadow, snow, or water.
'cloud' allows clouds.
'cloud_shadow' allows only cloud shadows.
'snow' allows only snow.
'water' allows only water.
Here is a table of fmask values and their significances:
Value Description
0 Null
1 Valid
2 Cloud
3 Cloud shadow
4 Snow
5 water
Returns
-------
clean_mask: xarray.DataArray of boolean
A boolean `xarray.DataArray` denoting which elements in `dataset` to keep -
with the same number and order of coordinates as in `dataset`.
"""
fmask_table = {'null': 0, 'valid': 1, 'cloud': 2, 'cloud_shadow': 3, 'snow': 4, 'water': 5}
fmask_values_to_keep = [fmask_table[cover_type] for cover_type in cover_types]
clean_mask = xarray_values_in(dataset.fmask, fmask_values_to_keep)
return clean_mask | [
"def",
"sentinel2_fmask_clean_mask",
"(",
"dataset",
",",
"cover_types",
"=",
"[",
"'valid'",
",",
"'water'",
"]",
")",
":",
"fmask_table",
"=",
"{",
"'null'",
":",
"0",
",",
"'valid'",
":",
"1",
",",
"'cloud'",
":",
"2",
",",
"'cloud_shadow'",
":",
"3",
",",
"'snow'",
":",
"4",
",",
"'water'",
":",
"5",
"}",
"fmask_values_to_keep",
"=",
"[",
"fmask_table",
"[",
"cover_type",
"]",
"for",
"cover_type",
"in",
"cover_types",
"]",
"clean_mask",
"=",
"xarray_values_in",
"(",
"dataset",
".",
"fmask",
",",
"fmask_values_to_keep",
")",
"return",
"clean_mask"
] | [
187,
0
] | [
231,
21
] | python | en | ['en', 'error', 'th'] | False |
index_entry_t.__init__ | (self, filesigs, configsig) |
:param filesigs: a list of tuples( `fileid`, `sig`)...
:param configsig: the signature of the configuration object.
|
:param filesigs: a list of tuples( `fileid`, `sig`)...
:param configsig: the signature of the configuration object.
| def __init__(self, filesigs, configsig):
"""
:param filesigs: a list of tuples( `fileid`, `sig`)...
:param configsig: the signature of the configuration object.
"""
self.filesigs = filesigs
self.configsig = configsig | [
"def",
"__init__",
"(",
"self",
",",
"filesigs",
",",
"configsig",
")",
":",
"self",
".",
"filesigs",
"=",
"filesigs",
"self",
".",
"configsig",
"=",
"configsig"
] | [
43,
4
] | [
50,
34
] | python | en | ['en', 'error', 'th'] | False |
directory_cache_t.__init__ | (
self, dir="cache", directory="cache",
compression=False, sha1_sigs=True) |
:param dir: cache directory path, it is created, if it does not exist
:param compression: if `True`, the cache files will be compressed
using `gzip`
:param sha1_sigs: `sha1_sigs` determines whether file modifications is
checked by computing a `sha1` digest or by checking
the modification date
|
:param dir: cache directory path, it is created, if it does not exist | def __init__(
self, dir="cache", directory="cache",
compression=False, sha1_sigs=True):
"""
:param dir: cache directory path, it is created, if it does not exist
:param compression: if `True`, the cache files will be compressed
using `gzip`
:param sha1_sigs: `sha1_sigs` determines whether file modifications is
checked by computing a `sha1` digest or by checking
the modification date
"""
if dir != "cache":
# Somebody explicitly set a different value for dir
warnings.warn(
"The dir argument is deprecated.\n" +
"Please use the directory argument instead.",
DeprecationWarning)
# Deprecated since 1.9.0, will be removed in 2.0.0
directory = dir
declarations_cache.cache_base_t.__init__(self)
# Cache directory
self.__dir = os.path.abspath(directory)
# Flag that determines whether the cache files will be compressed
self.__compression = compression
# Flag that determines whether the signature is a sha1 digest or
# the modification time
# (this flag is passed to the filename_repository_t class)
self.__sha1_sigs = sha1_sigs
# Filename repository
self.__filename_rep = filename_repository_t(self.__sha1_sigs)
# Index dictionary (Key is the value returned by _create_cache_key()
# (which is based on the header file name) and value is an
# index_entry_t object)
self.__index = {}
# Flag that indicates whether the index was modified
self.__modified_flag = False
# Check if dir refers to an existing file...
if os.path.isfile(self.__dir):
raise ValueError((
"Cannot use %s as cache directory. There is already a file " +
"with that name.") % self.__dir)
# Load the cache or create the cache directory...
if os.path.isdir(self.__dir):
self._load()
else:
# Create the cache directory...
os.mkdir(self.__dir) | [
"def",
"__init__",
"(",
"self",
",",
"dir",
"=",
"\"cache\"",
",",
"directory",
"=",
"\"cache\"",
",",
"compression",
"=",
"False",
",",
"sha1_sigs",
"=",
"True",
")",
":",
"if",
"dir",
"!=",
"\"cache\"",
":",
"# Somebody explicitly set a different value for dir",
"warnings",
".",
"warn",
"(",
"\"The dir argument is deprecated.\\n\"",
"+",
"\"Please use the directory argument instead.\"",
",",
"DeprecationWarning",
")",
"# Deprecated since 1.9.0, will be removed in 2.0.0",
"directory",
"=",
"dir",
"declarations_cache",
".",
"cache_base_t",
".",
"__init__",
"(",
"self",
")",
"# Cache directory",
"self",
".",
"__dir",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"directory",
")",
"# Flag that determines whether the cache files will be compressed",
"self",
".",
"__compression",
"=",
"compression",
"# Flag that determines whether the signature is a sha1 digest or",
"# the modification time",
"# (this flag is passed to the filename_repository_t class)",
"self",
".",
"__sha1_sigs",
"=",
"sha1_sigs",
"# Filename repository",
"self",
".",
"__filename_rep",
"=",
"filename_repository_t",
"(",
"self",
".",
"__sha1_sigs",
")",
"# Index dictionary (Key is the value returned by _create_cache_key()",
"# (which is based on the header file name) and value is an",
"# index_entry_t object)",
"self",
".",
"__index",
"=",
"{",
"}",
"# Flag that indicates whether the index was modified",
"self",
".",
"__modified_flag",
"=",
"False",
"# Check if dir refers to an existing file...",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"self",
".",
"__dir",
")",
":",
"raise",
"ValueError",
"(",
"(",
"\"Cannot use %s as cache directory. There is already a file \"",
"+",
"\"with that name.\"",
")",
"%",
"self",
".",
"__dir",
")",
"# Load the cache or create the cache directory...",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"self",
".",
"__dir",
")",
":",
"self",
".",
"_load",
"(",
")",
"else",
":",
"# Create the cache directory...",
"os",
".",
"mkdir",
"(",
"self",
".",
"__dir",
")"
] | [
73,
4
] | [
131,
32
] | python | en | ['en', 'error', 'th'] | False |
directory_cache_t.flush | (self) | Save the index table to disk. | Save the index table to disk. | def flush(self):
"""Save the index table to disk."""
self._save() | [
"def",
"flush",
"(",
"self",
")",
":",
"self",
".",
"_save",
"(",
")"
] | [
133,
4
] | [
135,
20
] | python | en | ['en', 'en', 'en'] | True |
directory_cache_t.update | (self, source_file, configuration, declarations, included_files) | Replace a cache entry by a new value.
:param source_file: a C++ source file name.
:type source_file: str
:param configuration: configuration object.
:type configuration: :class:`xml_generator_configuration_t`
:param declarations: declarations contained in the `source_file`
:type declarations: pickable object
:param included_files: included files
:type included_files: list of str
| Replace a cache entry by a new value. | def update(self, source_file, configuration, declarations, included_files):
"""Replace a cache entry by a new value.
:param source_file: a C++ source file name.
:type source_file: str
:param configuration: configuration object.
:type configuration: :class:`xml_generator_configuration_t`
:param declarations: declarations contained in the `source_file`
:type declarations: pickable object
:param included_files: included files
:type included_files: list of str
"""
# Normlize all paths...
source_file = os.path.normpath(source_file)
included_files = [os.path.normpath(p) for p in included_files]
# Create the list of dependent files. This is the included_files list
# + the source file. Duplicate names are removed.
dependent_files = {}
for name in [source_file] + included_files:
dependent_files[name] = 1
key = self._create_cache_key(source_file)
# Remove an existing entry (if there is one)
# After calling this method, it is guaranteed that __index[key]
# does not exist anymore.
self._remove_entry(source_file, key)
# Create a new entry...
# Create the sigs of all dependent files...
filesigs = []
for filename in list(dependent_files.keys()):
id_, sig = self.__filename_rep.acquire_filename(filename)
filesigs.append((id_, sig))
configsig = self._create_config_signature(configuration)
entry = index_entry_t(filesigs, configsig)
self.__index[key] = entry
self.__modified_flag = True
# Write the declarations into the cache file...
cachefilename = self._create_cache_filename(source_file)
self._write_file(cachefilename, declarations) | [
"def",
"update",
"(",
"self",
",",
"source_file",
",",
"configuration",
",",
"declarations",
",",
"included_files",
")",
":",
"# Normlize all paths...",
"source_file",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"source_file",
")",
"included_files",
"=",
"[",
"os",
".",
"path",
".",
"normpath",
"(",
"p",
")",
"for",
"p",
"in",
"included_files",
"]",
"# Create the list of dependent files. This is the included_files list",
"# + the source file. Duplicate names are removed.",
"dependent_files",
"=",
"{",
"}",
"for",
"name",
"in",
"[",
"source_file",
"]",
"+",
"included_files",
":",
"dependent_files",
"[",
"name",
"]",
"=",
"1",
"key",
"=",
"self",
".",
"_create_cache_key",
"(",
"source_file",
")",
"# Remove an existing entry (if there is one)",
"# After calling this method, it is guaranteed that __index[key]",
"# does not exist anymore.",
"self",
".",
"_remove_entry",
"(",
"source_file",
",",
"key",
")",
"# Create a new entry...",
"# Create the sigs of all dependent files...",
"filesigs",
"=",
"[",
"]",
"for",
"filename",
"in",
"list",
"(",
"dependent_files",
".",
"keys",
"(",
")",
")",
":",
"id_",
",",
"sig",
"=",
"self",
".",
"__filename_rep",
".",
"acquire_filename",
"(",
"filename",
")",
"filesigs",
".",
"append",
"(",
"(",
"id_",
",",
"sig",
")",
")",
"configsig",
"=",
"self",
".",
"_create_config_signature",
"(",
"configuration",
")",
"entry",
"=",
"index_entry_t",
"(",
"filesigs",
",",
"configsig",
")",
"self",
".",
"__index",
"[",
"key",
"]",
"=",
"entry",
"self",
".",
"__modified_flag",
"=",
"True",
"# Write the declarations into the cache file...",
"cachefilename",
"=",
"self",
".",
"_create_cache_filename",
"(",
"source_file",
")",
"self",
".",
"_write_file",
"(",
"cachefilename",
",",
"declarations",
")"
] | [
137,
4
] | [
181,
53
] | python | en | ['en', 'en', 'en'] | True |
directory_cache_t.cached_value | (self, source_file, configuration) | Return the cached declarations or None.
:param source_file: Header file name
:type source_file: str
:param configuration: Configuration object
:type configuration: :class:`parser.xml_generator_configuration_t`
:rtype: Cached declarations or None
| Return the cached declarations or None. | def cached_value(self, source_file, configuration):
"""Return the cached declarations or None.
:param source_file: Header file name
:type source_file: str
:param configuration: Configuration object
:type configuration: :class:`parser.xml_generator_configuration_t`
:rtype: Cached declarations or None
"""
# Check if the cache contains an entry for source_file
key = self._create_cache_key(source_file)
entry = self.__index.get(key)
if entry is None:
# print "CACHE: %s: Not cached"%source_file
return None
# Check if the entry is still valid. It is not valid if:
# - the source_file has been updated
# - the configuration object has changed (i.e. the header is parsed
# by gccxml with different settings which may influence the
# declarations)
# - the included files have been updated
# (this list is part of the cache entry as it cannot be known
# by the caller when cached_value() is called. It was instead
# passed to update())
# Check if the config is different...
configsig = self._create_config_signature(configuration)
if configsig != entry.configsig:
# print "CACHE: %s: Config mismatch"%source_file
return None
# Check if any of the dependent files has been modified...
for id_, sig in entry.filesigs:
if self.__filename_rep.is_file_modified(id_, sig):
# print "CACHE: %s: Entry not up to date"%source_file
return None
# Load and return the cached declarations
cachefilename = self._create_cache_filename(source_file)
decls = self._read_file(cachefilename)
# print "CACHE: Using cached decls for",source_file
return decls | [
"def",
"cached_value",
"(",
"self",
",",
"source_file",
",",
"configuration",
")",
":",
"# Check if the cache contains an entry for source_file",
"key",
"=",
"self",
".",
"_create_cache_key",
"(",
"source_file",
")",
"entry",
"=",
"self",
".",
"__index",
".",
"get",
"(",
"key",
")",
"if",
"entry",
"is",
"None",
":",
"# print \"CACHE: %s: Not cached\"%source_file",
"return",
"None",
"# Check if the entry is still valid. It is not valid if:",
"# - the source_file has been updated",
"# - the configuration object has changed (i.e. the header is parsed",
"# by gccxml with different settings which may influence the",
"# declarations)",
"# - the included files have been updated",
"# (this list is part of the cache entry as it cannot be known",
"# by the caller when cached_value() is called. It was instead",
"# passed to update())",
"# Check if the config is different...",
"configsig",
"=",
"self",
".",
"_create_config_signature",
"(",
"configuration",
")",
"if",
"configsig",
"!=",
"entry",
".",
"configsig",
":",
"# print \"CACHE: %s: Config mismatch\"%source_file",
"return",
"None",
"# Check if any of the dependent files has been modified...",
"for",
"id_",
",",
"sig",
"in",
"entry",
".",
"filesigs",
":",
"if",
"self",
".",
"__filename_rep",
".",
"is_file_modified",
"(",
"id_",
",",
"sig",
")",
":",
"# print \"CACHE: %s: Entry not up to date\"%source_file",
"return",
"None",
"# Load and return the cached declarations",
"cachefilename",
"=",
"self",
".",
"_create_cache_filename",
"(",
"source_file",
")",
"decls",
"=",
"self",
".",
"_read_file",
"(",
"cachefilename",
")",
"# print \"CACHE: Using cached decls for\",source_file",
"return",
"decls"
] | [
183,
4
] | [
227,
20
] | python | en | ['en', 'en', 'en'] | True |
directory_cache_t._load | (self) | Load the cache.
Loads the `index.dat` file, which contains the index table and the
file name repository.
This method is called by the :meth:`__init__`
| Load the cache. | def _load(self):
"""Load the cache.
Loads the `index.dat` file, which contains the index table and the
file name repository.
This method is called by the :meth:`__init__`
"""
indexfilename = os.path.join(self.__dir, "index.dat")
if os.path.exists(indexfilename):
data = self._read_file(indexfilename)
self.__index = data[0]
self.__filename_rep = data[1]
if self.__filename_rep._sha1_sigs != self.__sha1_sigs:
print((
"CACHE: Warning: sha1_sigs stored in the cache is set " +
"to %s.") % self.__filename_rep._sha1_sigs)
print("Please remove the cache to change this setting.")
self.__sha1_sigs = self.__filename_rep._sha1_sigs
else:
self.__index = {}
self.__filename_rep = filename_repository_t(self.__sha1_sigs)
self.__modified_flag = False | [
"def",
"_load",
"(",
"self",
")",
":",
"indexfilename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"__dir",
",",
"\"index.dat\"",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"indexfilename",
")",
":",
"data",
"=",
"self",
".",
"_read_file",
"(",
"indexfilename",
")",
"self",
".",
"__index",
"=",
"data",
"[",
"0",
"]",
"self",
".",
"__filename_rep",
"=",
"data",
"[",
"1",
"]",
"if",
"self",
".",
"__filename_rep",
".",
"_sha1_sigs",
"!=",
"self",
".",
"__sha1_sigs",
":",
"print",
"(",
"(",
"\"CACHE: Warning: sha1_sigs stored in the cache is set \"",
"+",
"\"to %s.\"",
")",
"%",
"self",
".",
"__filename_rep",
".",
"_sha1_sigs",
")",
"print",
"(",
"\"Please remove the cache to change this setting.\"",
")",
"self",
".",
"__sha1_sigs",
"=",
"self",
".",
"__filename_rep",
".",
"_sha1_sigs",
"else",
":",
"self",
".",
"__index",
"=",
"{",
"}",
"self",
".",
"__filename_rep",
"=",
"filename_repository_t",
"(",
"self",
".",
"__sha1_sigs",
")",
"self",
".",
"__modified_flag",
"=",
"False"
] | [
229,
4
] | [
253,
36
] | python | en | ['en', 'it', 'en'] | True |
directory_cache_t._save | (self) |
save the cache index, in case it was modified.
Saves the index table and the file name repository in the file
`index.dat`
|
save the cache index, in case it was modified. | def _save(self):
"""
save the cache index, in case it was modified.
Saves the index table and the file name repository in the file
`index.dat`
"""
if self.__modified_flag:
self.__filename_rep.update_id_counter()
indexfilename = os.path.join(self.__dir, "index.dat")
self._write_file(
indexfilename,
(self.__index,
self.__filename_rep))
self.__modified_flag = False | [
"def",
"_save",
"(",
"self",
")",
":",
"if",
"self",
".",
"__modified_flag",
":",
"self",
".",
"__filename_rep",
".",
"update_id_counter",
"(",
")",
"indexfilename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"__dir",
",",
"\"index.dat\"",
")",
"self",
".",
"_write_file",
"(",
"indexfilename",
",",
"(",
"self",
".",
"__index",
",",
"self",
".",
"__filename_rep",
")",
")",
"self",
".",
"__modified_flag",
"=",
"False"
] | [
255,
4
] | [
271,
40
] | python | en | ['en', 'error', 'th'] | False |
directory_cache_t._read_file | (self, filename) |
read a Python object from a cache file.
Reads a pickled object from disk and returns it.
:param filename: Name of the file that should be read.
:type filename: str
:rtype: object
|
read a Python object from a cache file. | def _read_file(self, filename):
"""
read a Python object from a cache file.
Reads a pickled object from disk and returns it.
:param filename: Name of the file that should be read.
:type filename: str
:rtype: object
"""
if self.__compression:
f = gzip.GzipFile(filename, "rb")
else:
f = open(filename, "rb")
res = pickle.load(f)
f.close()
return res | [
"def",
"_read_file",
"(",
"self",
",",
"filename",
")",
":",
"if",
"self",
".",
"__compression",
":",
"f",
"=",
"gzip",
".",
"GzipFile",
"(",
"filename",
",",
"\"rb\"",
")",
"else",
":",
"f",
"=",
"open",
"(",
"filename",
",",
"\"rb\"",
")",
"res",
"=",
"pickle",
".",
"load",
"(",
"f",
")",
"f",
".",
"close",
"(",
")",
"return",
"res"
] | [
273,
4
] | [
290,
18
] | python | en | ['en', 'error', 'th'] | False |
directory_cache_t._write_file | (self, filename, data) | Write a data item into a file.
The data object is written to a file using the pickle mechanism.
:param filename: Output file name
:type filename: str
:param data: A Python object that will be pickled
| Write a data item into a file. | def _write_file(self, filename, data):
"""Write a data item into a file.
The data object is written to a file using the pickle mechanism.
:param filename: Output file name
:type filename: str
:param data: A Python object that will be pickled
"""
if self.__compression:
f = gzip.GzipFile(filename, "wb")
else:
f = open(filename, "wb")
pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
f.close() | [
"def",
"_write_file",
"(",
"self",
",",
"filename",
",",
"data",
")",
":",
"if",
"self",
".",
"__compression",
":",
"f",
"=",
"gzip",
".",
"GzipFile",
"(",
"filename",
",",
"\"wb\"",
")",
"else",
":",
"f",
"=",
"open",
"(",
"filename",
",",
"\"wb\"",
")",
"pickle",
".",
"dump",
"(",
"data",
",",
"f",
",",
"pickle",
".",
"HIGHEST_PROTOCOL",
")",
"f",
".",
"close",
"(",
")"
] | [
292,
4
] | [
307,
17
] | python | en | ['it', 'en', 'en'] | True |
directory_cache_t._remove_entry | (self, source_file, key) | Remove an entry from the cache.
source_file is the name of the header and key is its corresponding
cache key (obtained by a call to :meth:_create_cache_key ).
The entry is removed from the index table, any referenced file
name is released and the cache file is deleted.
If key references a non-existing entry, the method returns
immediately.
:param source_file: Header file name
:type source_file: str
:param key: Key value for the specified header file
:type key: hash table object
| Remove an entry from the cache. | def _remove_entry(self, source_file, key):
"""Remove an entry from the cache.
source_file is the name of the header and key is its corresponding
cache key (obtained by a call to :meth:_create_cache_key ).
The entry is removed from the index table, any referenced file
name is released and the cache file is deleted.
If key references a non-existing entry, the method returns
immediately.
:param source_file: Header file name
:type source_file: str
:param key: Key value for the specified header file
:type key: hash table object
"""
entry = self.__index.get(key)
if entry is None:
return
# Release the referenced files...
for id_, _ in entry.filesigs:
self.__filename_rep.release_filename(id_)
# Remove the cache entry...
del self.__index[key]
self.__modified_flag = True
# Delete the corresponding cache file...
cachefilename = self._create_cache_filename(source_file)
try:
os.remove(cachefilename)
except OSError as e:
print("Could not remove cache file (%s)" % e) | [
"def",
"_remove_entry",
"(",
"self",
",",
"source_file",
",",
"key",
")",
":",
"entry",
"=",
"self",
".",
"__index",
".",
"get",
"(",
"key",
")",
"if",
"entry",
"is",
"None",
":",
"return",
"# Release the referenced files...",
"for",
"id_",
",",
"_",
"in",
"entry",
".",
"filesigs",
":",
"self",
".",
"__filename_rep",
".",
"release_filename",
"(",
"id_",
")",
"# Remove the cache entry...",
"del",
"self",
".",
"__index",
"[",
"key",
"]",
"self",
".",
"__modified_flag",
"=",
"True",
"# Delete the corresponding cache file...",
"cachefilename",
"=",
"self",
".",
"_create_cache_filename",
"(",
"source_file",
")",
"try",
":",
"os",
".",
"remove",
"(",
"cachefilename",
")",
"except",
"OSError",
"as",
"e",
":",
"print",
"(",
"\"Could not remove cache file (%s)\"",
"%",
"e",
")"
] | [
309,
4
] | [
343,
57
] | python | en | ['en', 'en', 'en'] | True |
directory_cache_t._create_cache_key | (source_file) |
return the cache key for a header file.
:param source_file: Header file name
:type source_file: str
:rtype: str
|
return the cache key for a header file. | def _create_cache_key(source_file):
"""
return the cache key for a header file.
:param source_file: Header file name
:type source_file: str
:rtype: str
"""
path, name = os.path.split(source_file)
return name + str(hash(path)) | [
"def",
"_create_cache_key",
"(",
"source_file",
")",
":",
"path",
",",
"name",
"=",
"os",
".",
"path",
".",
"split",
"(",
"source_file",
")",
"return",
"name",
"+",
"str",
"(",
"hash",
"(",
"path",
")",
")"
] | [
346,
4
] | [
355,
37
] | python | en | ['en', 'error', 'th'] | False |
directory_cache_t._create_cache_filename | (self, source_file) |
return the cache file name for a header file.
:param source_file: Header file name
:type source_file: str
:rtype: str
|
return the cache file name for a header file. | def _create_cache_filename(self, source_file):
"""
return the cache file name for a header file.
:param source_file: Header file name
:type source_file: str
:rtype: str
"""
res = self._create_cache_key(source_file) + ".cache"
return os.path.join(self.__dir, res) | [
"def",
"_create_cache_filename",
"(",
"self",
",",
"source_file",
")",
":",
"res",
"=",
"self",
".",
"_create_cache_key",
"(",
"source_file",
")",
"+",
"\".cache\"",
"return",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"__dir",
",",
"res",
")"
] | [
357,
4
] | [
366,
44
] | python | en | ['en', 'error', 'th'] | False |
directory_cache_t._create_config_signature | (config) |
return the signature for a config object.
The signature is computed as sha1 digest of the contents of
working_directory, include_paths, define_symbols and
undefine_symbols.
:param config: Configuration object
:type config: :class:`parser.xml_generator_configuration_t`
:rtype: str
|
return the signature for a config object. | def _create_config_signature(config):
"""
return the signature for a config object.
The signature is computed as sha1 digest of the contents of
working_directory, include_paths, define_symbols and
undefine_symbols.
:param config: Configuration object
:type config: :class:`parser.xml_generator_configuration_t`
:rtype: str
"""
m = hashlib.sha1()
m.update(config.working_directory.encode("utf-8"))
for p in config.include_paths:
m.update(p.encode("utf-8"))
for p in config.define_symbols:
m.update(p.encode("utf-8"))
for p in config.undefine_symbols:
m.update(p.encode("utf-8"))
for p in config.cflags:
m.update(p.encode("utf-8"))
return m.digest() | [
"def",
"_create_config_signature",
"(",
"config",
")",
":",
"m",
"=",
"hashlib",
".",
"sha1",
"(",
")",
"m",
".",
"update",
"(",
"config",
".",
"working_directory",
".",
"encode",
"(",
"\"utf-8\"",
")",
")",
"for",
"p",
"in",
"config",
".",
"include_paths",
":",
"m",
".",
"update",
"(",
"p",
".",
"encode",
"(",
"\"utf-8\"",
")",
")",
"for",
"p",
"in",
"config",
".",
"define_symbols",
":",
"m",
".",
"update",
"(",
"p",
".",
"encode",
"(",
"\"utf-8\"",
")",
")",
"for",
"p",
"in",
"config",
".",
"undefine_symbols",
":",
"m",
".",
"update",
"(",
"p",
".",
"encode",
"(",
"\"utf-8\"",
")",
")",
"for",
"p",
"in",
"config",
".",
"cflags",
":",
"m",
".",
"update",
"(",
"p",
".",
"encode",
"(",
"\"utf-8\"",
")",
")",
"return",
"m",
".",
"digest",
"(",
")"
] | [
369,
4
] | [
391,
25
] | python | en | ['en', 'error', 'th'] | False |
filename_entry_t.__init__ | (self, filename) | Constructor.
The reference count is initially set to 0.
| Constructor. | def __init__(self, filename):
"""Constructor.
The reference count is initially set to 0.
"""
# Filename
self.filename = filename
# Reference count
self.refcount = 0
# Cached signature value for the file.
# If sig_valid flag is False, the signature still has to be computed,
# otherwise the cached value can be used.
# These attributes must not be pickled!
self.sig_valid = False
self.signature = None | [
"def",
"__init__",
"(",
"self",
",",
"filename",
")",
":",
"# Filename",
"self",
".",
"filename",
"=",
"filename",
"# Reference count",
"self",
".",
"refcount",
"=",
"0",
"# Cached signature value for the file.",
"# If sig_valid flag is False, the signature still has to be computed,",
"# otherwise the cached value can be used.",
"# These attributes must not be pickled!",
"self",
".",
"sig_valid",
"=",
"False",
"self",
".",
"signature",
"=",
"None"
] | [
403,
4
] | [
418,
29
] | python | en | ['en', 'en', 'en'] | False |
filename_entry_t.inc_ref_count | (self) | Increase the reference count by 1. | Increase the reference count by 1. | def inc_ref_count(self):
"""Increase the reference count by 1."""
self.refcount += 1 | [
"def",
"inc_ref_count",
"(",
"self",
")",
":",
"self",
".",
"refcount",
"+=",
"1"
] | [
429,
4
] | [
432,
26
] | python | en | ['en', 'en', 'en'] | True |
filename_entry_t.dec_ref_count | (self) | Decrease the reference count by 1 and return the new count. | Decrease the reference count by 1 and return the new count. | def dec_ref_count(self):
"""Decrease the reference count by 1 and return the new count."""
self.refcount -= 1
return self.refcount | [
"def",
"dec_ref_count",
"(",
"self",
")",
":",
"self",
".",
"refcount",
"-=",
"1",
"return",
"self",
".",
"refcount"
] | [
434,
4
] | [
438,
28
] | python | en | ['en', 'en', 'en'] | True |
filename_repository_t.__init__ | (self, sha1_sigs) | Constructor.
| Constructor.
| def __init__(self, sha1_sigs):
"""Constructor.
"""
# Flag that determines whether the signature is a sha1 digest or
# the modification time
# (this flag is passed to the filename_repository_t class)
self._sha1_sigs = sha1_sigs
# ID lookup table (key: filename / value: id_)
self.__id_lut = {}
# Entry dictionary (key: id_ / value: filename_entry_t)
# This dictionary contains the actual data.
# It must always hold that each entry in __entries has a corresponding
# entry in __id_lut (i.e. the keys in __id_lut must be the names
# stored in __entries)
self.__entries = {}
# A counter for new ids
self.__next_id = 1 | [
"def",
"__init__",
"(",
"self",
",",
"sha1_sigs",
")",
":",
"# Flag that determines whether the signature is a sha1 digest or",
"# the modification time",
"# (this flag is passed to the filename_repository_t class)",
"self",
".",
"_sha1_sigs",
"=",
"sha1_sigs",
"# ID lookup table (key: filename / value: id_)",
"self",
".",
"__id_lut",
"=",
"{",
"}",
"# Entry dictionary (key: id_ / value: filename_entry_t)",
"# This dictionary contains the actual data.",
"# It must always hold that each entry in __entries has a corresponding",
"# entry in __id_lut (i.e. the keys in __id_lut must be the names",
"# stored in __entries)",
"self",
".",
"__entries",
"=",
"{",
"}",
"# A counter for new ids",
"self",
".",
"__next_id",
"=",
"1"
] | [
454,
4
] | [
474,
26
] | python | en | ['en', 'en', 'en'] | False |
filename_repository_t.acquire_filename | (self, name) | Acquire a file name and return its id and its signature.
| Acquire a file name and return its id and its signature.
| def acquire_filename(self, name):
"""Acquire a file name and return its id and its signature.
"""
id_ = self.__id_lut.get(name)
# Is this a new entry?
if id_ is None:
# then create one...
id_ = self.__next_id
self.__next_id += 1
self.__id_lut[name] = id_
entry = filename_entry_t(name)
self.__entries[id_] = entry
else:
# otherwise obtain the entry...
entry = self.__entries[id_]
entry.inc_ref_count()
return id_, self._get_signature(entry) | [
"def",
"acquire_filename",
"(",
"self",
",",
"name",
")",
":",
"id_",
"=",
"self",
".",
"__id_lut",
".",
"get",
"(",
"name",
")",
"# Is this a new entry?",
"if",
"id_",
"is",
"None",
":",
"# then create one...",
"id_",
"=",
"self",
".",
"__next_id",
"self",
".",
"__next_id",
"+=",
"1",
"self",
".",
"__id_lut",
"[",
"name",
"]",
"=",
"id_",
"entry",
"=",
"filename_entry_t",
"(",
"name",
")",
"self",
".",
"__entries",
"[",
"id_",
"]",
"=",
"entry",
"else",
":",
"# otherwise obtain the entry...",
"entry",
"=",
"self",
".",
"__entries",
"[",
"id_",
"]",
"entry",
".",
"inc_ref_count",
"(",
")",
"return",
"id_",
",",
"self",
".",
"_get_signature",
"(",
"entry",
")"
] | [
476,
4
] | [
494,
46
] | python | en | ['en', 'en', 'en'] | True |
filename_repository_t.release_filename | (self, id_) | Release a file name.
| Release a file name.
| def release_filename(self, id_):
"""Release a file name.
"""
entry = self.__entries.get(id_)
if entry is None:
raise ValueError("Invalid filename id (%d)" % id_)
# Decrease reference count and check if the entry has to be removed...
if entry.dec_ref_count() == 0:
del self.__entries[id_]
del self.__id_lut[entry.filename] | [
"def",
"release_filename",
"(",
"self",
",",
"id_",
")",
":",
"entry",
"=",
"self",
".",
"__entries",
".",
"get",
"(",
"id_",
")",
"if",
"entry",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Invalid filename id (%d)\"",
"%",
"id_",
")",
"# Decrease reference count and check if the entry has to be removed...",
"if",
"entry",
".",
"dec_ref_count",
"(",
")",
"==",
"0",
":",
"del",
"self",
".",
"__entries",
"[",
"id_",
"]",
"del",
"self",
".",
"__id_lut",
"[",
"entry",
".",
"filename",
"]"
] | [
496,
4
] | [
507,
45
] | python | en | ['en', 'en', 'en'] | True |
filename_repository_t.is_file_modified | (self, id_, signature) | Check if the file referred to by `id_` has been modified.
| Check if the file referred to by `id_` has been modified.
| def is_file_modified(self, id_, signature):
"""Check if the file referred to by `id_` has been modified.
"""
entry = self.__entries.get(id_)
if entry is None:
raise ValueError("Invalid filename id_ (%d)" % id_)
# Is the signature already known?
if entry.sig_valid:
# use the cached signature
filesig = entry.signature
else:
# compute the signature and store it
filesig = self._get_signature(entry)
entry.signature = filesig
entry.sig_valid = True
return filesig != signature | [
"def",
"is_file_modified",
"(",
"self",
",",
"id_",
",",
"signature",
")",
":",
"entry",
"=",
"self",
".",
"__entries",
".",
"get",
"(",
"id_",
")",
"if",
"entry",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Invalid filename id_ (%d)\"",
"%",
"id_",
")",
"# Is the signature already known?",
"if",
"entry",
".",
"sig_valid",
":",
"# use the cached signature",
"filesig",
"=",
"entry",
".",
"signature",
"else",
":",
"# compute the signature and store it",
"filesig",
"=",
"self",
".",
"_get_signature",
"(",
"entry",
")",
"entry",
".",
"signature",
"=",
"filesig",
"entry",
".",
"sig_valid",
"=",
"True",
"return",
"filesig",
"!=",
"signature"
] | [
509,
4
] | [
527,
35
] | python | en | ['en', 'en', 'en'] | True |
filename_repository_t.update_id_counter | (self) | Update the `id_` counter so that it doesn't grow forever.
| Update the `id_` counter so that it doesn't grow forever.
| def update_id_counter(self):
"""Update the `id_` counter so that it doesn't grow forever.
"""
if not self.__entries:
self.__next_id = 1
else:
self.__next_id = max(self.__entries.keys()) + 1 | [
"def",
"update_id_counter",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"__entries",
":",
"self",
".",
"__next_id",
"=",
"1",
"else",
":",
"self",
".",
"__next_id",
"=",
"max",
"(",
"self",
".",
"__entries",
".",
"keys",
"(",
")",
")",
"+",
"1"
] | [
529,
4
] | [
536,
59
] | python | en | ['en', 'en', 'en'] | True |
filename_repository_t._get_signature | (self, entry) | Return the signature of the file stored in entry.
| Return the signature of the file stored in entry.
| def _get_signature(self, entry):
"""Return the signature of the file stored in entry.
"""
if self._sha1_sigs:
# return sha1 digest of the file content...
if not os.path.exists(entry.filename):
return None
try:
with open(entry.filename, "r") as f:
data = f.read()
return hashlib.sha1(data.encode("utf-8")).digest()
except IOError as e:
print("Cannot determine sha1 digest:", e)
return None
else:
# return file modification date...
try:
return os.path.getmtime(entry.filename)
except OSError:
return None | [
"def",
"_get_signature",
"(",
"self",
",",
"entry",
")",
":",
"if",
"self",
".",
"_sha1_sigs",
":",
"# return sha1 digest of the file content...",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"entry",
".",
"filename",
")",
":",
"return",
"None",
"try",
":",
"with",
"open",
"(",
"entry",
".",
"filename",
",",
"\"r\"",
")",
"as",
"f",
":",
"data",
"=",
"f",
".",
"read",
"(",
")",
"return",
"hashlib",
".",
"sha1",
"(",
"data",
".",
"encode",
"(",
"\"utf-8\"",
")",
")",
".",
"digest",
"(",
")",
"except",
"IOError",
"as",
"e",
":",
"print",
"(",
"\"Cannot determine sha1 digest:\"",
",",
"e",
")",
"return",
"None",
"else",
":",
"# return file modification date...",
"try",
":",
"return",
"os",
".",
"path",
".",
"getmtime",
"(",
"entry",
".",
"filename",
")",
"except",
"OSError",
":",
"return",
"None"
] | [
538,
4
] | [
558,
27
] | python | en | ['en', 'en', 'en'] | True |
filename_repository_t._dump | (self) |
Dump contents for debugging/testing.
|
Dump contents for debugging/testing.
| def _dump(self): # pragma: no cover
"""
Dump contents for debugging/testing.
"""
print(70 * "-")
print("ID lookup table:")
for name in self.__id_lut:
id_ = self.__id_lut[name]
print(" %s -> %d" % (name, id_))
print(70 * "-")
print("%-4s %-60s %s" % ("ID", "Filename", "Refcount"))
print(70 * "-")
for id_ in self.__entries:
entry = self.__entries[id_]
print("%04d %-60s %d" % (id_, entry.filename, entry.refcount)) | [
"def",
"_dump",
"(",
"self",
")",
":",
"# pragma: no cover",
"print",
"(",
"70",
"*",
"\"-\"",
")",
"print",
"(",
"\"ID lookup table:\"",
")",
"for",
"name",
"in",
"self",
".",
"__id_lut",
":",
"id_",
"=",
"self",
".",
"__id_lut",
"[",
"name",
"]",
"print",
"(",
"\" %s -> %d\"",
"%",
"(",
"name",
",",
"id_",
")",
")",
"print",
"(",
"70",
"*",
"\"-\"",
")",
"print",
"(",
"\"%-4s %-60s %s\"",
"%",
"(",
"\"ID\"",
",",
"\"Filename\"",
",",
"\"Refcount\"",
")",
")",
"print",
"(",
"70",
"*",
"\"-\"",
")",
"for",
"id_",
"in",
"self",
".",
"__entries",
":",
"entry",
"=",
"self",
".",
"__entries",
"[",
"id_",
"]",
"print",
"(",
"\"%04d %-60s %d\"",
"%",
"(",
"id_",
",",
"entry",
".",
"filename",
",",
"entry",
".",
"refcount",
")",
")"
] | [
560,
4
] | [
576,
74
] | python | en | ['en', 'error', 'th'] | False |
DatabaseStoreBackend.store_backend_id | (self) |
Create a store_backend_id if one does not exist, and return it if it exists
Ephemeral store_backend_id for database_store_backend until there is a place to store metadata
Returns:
store_backend_id which is a UUID(version=4)
|
Create a store_backend_id if one does not exist, and return it if it exists
Ephemeral store_backend_id for database_store_backend until there is a place to store metadata
Returns:
store_backend_id which is a UUID(version=4)
| def store_backend_id(self) -> str:
"""
Create a store_backend_id if one does not exist, and return it if it exists
Ephemeral store_backend_id for database_store_backend until there is a place to store metadata
Returns:
store_backend_id which is a UUID(version=4)
"""
if not self._store_backend_id:
store_id = (
self._manually_initialize_store_backend_id
if self._manually_initialize_store_backend_id
else str(uuid.uuid4())
)
self._store_backend_id = f"{self.STORE_BACKEND_ID_PREFIX}{store_id}"
return self._store_backend_id.replace(self.STORE_BACKEND_ID_PREFIX, "") | [
"def",
"store_backend_id",
"(",
"self",
")",
"->",
"str",
":",
"if",
"not",
"self",
".",
"_store_backend_id",
":",
"store_id",
"=",
"(",
"self",
".",
"_manually_initialize_store_backend_id",
"if",
"self",
".",
"_manually_initialize_store_backend_id",
"else",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
")",
"self",
".",
"_store_backend_id",
"=",
"f\"{self.STORE_BACKEND_ID_PREFIX}{store_id}\"",
"return",
"self",
".",
"_store_backend_id",
".",
"replace",
"(",
"self",
".",
"STORE_BACKEND_ID_PREFIX",
",",
"\"\"",
")"
] | [
146,
4
] | [
161,
79
] | python | en | ['en', 'error', 'th'] | False |
DatabaseStoreBackend._build_engine | (self, credentials, **kwargs) |
Using a set of given credentials, constructs an Execution Engine , connecting to a database using a URL or a
private key path.
|
Using a set of given credentials, constructs an Execution Engine , connecting to a database using a URL or a
private key path.
| def _build_engine(self, credentials, **kwargs) -> "sa.engine.Engine":
"""
Using a set of given credentials, constructs an Execution Engine , connecting to a database using a URL or a
private key path.
"""
# Update credentials with anything passed during connection time
drivername = credentials.pop("drivername")
create_engine_kwargs = kwargs
self._schema_name = credentials.pop("schema", None)
connect_args = credentials.pop("connect_args", None)
if connect_args:
create_engine_kwargs["connect_args"] = connect_args
if "private_key_path" in credentials:
options, create_engine_kwargs = self._get_sqlalchemy_key_pair_auth_url(
drivername, credentials
)
else:
options = sa.engine.url.URL(drivername, **credentials)
self.drivername = drivername
engine = sa.create_engine(options, **create_engine_kwargs)
return engine | [
"def",
"_build_engine",
"(",
"self",
",",
"credentials",
",",
"*",
"*",
"kwargs",
")",
"->",
"\"sa.engine.Engine\"",
":",
"# Update credentials with anything passed during connection time",
"drivername",
"=",
"credentials",
".",
"pop",
"(",
"\"drivername\"",
")",
"create_engine_kwargs",
"=",
"kwargs",
"self",
".",
"_schema_name",
"=",
"credentials",
".",
"pop",
"(",
"\"schema\"",
",",
"None",
")",
"connect_args",
"=",
"credentials",
".",
"pop",
"(",
"\"connect_args\"",
",",
"None",
")",
"if",
"connect_args",
":",
"create_engine_kwargs",
"[",
"\"connect_args\"",
"]",
"=",
"connect_args",
"if",
"\"private_key_path\"",
"in",
"credentials",
":",
"options",
",",
"create_engine_kwargs",
"=",
"self",
".",
"_get_sqlalchemy_key_pair_auth_url",
"(",
"drivername",
",",
"credentials",
")",
"else",
":",
"options",
"=",
"sa",
".",
"engine",
".",
"url",
".",
"URL",
"(",
"drivername",
",",
"*",
"*",
"credentials",
")",
"self",
".",
"drivername",
"=",
"drivername",
"engine",
"=",
"sa",
".",
"create_engine",
"(",
"options",
",",
"*",
"*",
"create_engine_kwargs",
")",
"return",
"engine"
] | [
163,
4
] | [
186,
21
] | python | en | ['en', 'error', 'th'] | False |
DatabaseStoreBackend._get_sqlalchemy_key_pair_auth_url | (
self, drivername: str, credentials: dict
) |
Utilizing a private key path and a passphrase in a given credentials dictionary, attempts to encode the provided
values into a private key. If passphrase is incorrect, this will fail and an exception is raised.
Args:
drivername(str) - The name of the driver class
credentials(dict) - A dictionary of database credentials used to access the database
Returns:
a tuple consisting of a url with the serialized key-pair authentication, and a dictionary of engine kwargs.
|
Utilizing a private key path and a passphrase in a given credentials dictionary, attempts to encode the provided
values into a private key. If passphrase is incorrect, this will fail and an exception is raised. | def _get_sqlalchemy_key_pair_auth_url(
self, drivername: str, credentials: dict
) -> Tuple["URL", Dict]:
"""
Utilizing a private key path and a passphrase in a given credentials dictionary, attempts to encode the provided
values into a private key. If passphrase is incorrect, this will fail and an exception is raised.
Args:
drivername(str) - The name of the driver class
credentials(dict) - A dictionary of database credentials used to access the database
Returns:
a tuple consisting of a url with the serialized key-pair authentication, and a dictionary of engine kwargs.
"""
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
private_key_path = credentials.pop("private_key_path")
private_key_passphrase = credentials.pop("private_key_passphrase")
with Path(private_key_path).expanduser().resolve().open(mode="rb") as key:
try:
p_key = serialization.load_pem_private_key(
key.read(),
password=private_key_passphrase.encode()
if private_key_passphrase
else None,
backend=default_backend(),
)
except ValueError as e:
if "incorrect password" in str(e).lower():
raise ge_exceptions.DatasourceKeyPairAuthBadPassphraseError(
datasource_name="SqlAlchemyDatasource",
message="Decryption of key failed, was the passphrase incorrect?",
) from e
else:
raise e
pkb = p_key.private_bytes(
encoding=serialization.Encoding.DER,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption(),
)
credentials_driver_name = credentials.pop("drivername", None)
create_engine_kwargs = {"connect_args": {"private_key": pkb}}
return (
sa.engine.url.URL(drivername or credentials_driver_name, **credentials),
create_engine_kwargs,
) | [
"def",
"_get_sqlalchemy_key_pair_auth_url",
"(",
"self",
",",
"drivername",
":",
"str",
",",
"credentials",
":",
"dict",
")",
"->",
"Tuple",
"[",
"\"URL\"",
",",
"Dict",
"]",
":",
"from",
"cryptography",
".",
"hazmat",
".",
"backends",
"import",
"default_backend",
"from",
"cryptography",
".",
"hazmat",
".",
"primitives",
"import",
"serialization",
"private_key_path",
"=",
"credentials",
".",
"pop",
"(",
"\"private_key_path\"",
")",
"private_key_passphrase",
"=",
"credentials",
".",
"pop",
"(",
"\"private_key_passphrase\"",
")",
"with",
"Path",
"(",
"private_key_path",
")",
".",
"expanduser",
"(",
")",
".",
"resolve",
"(",
")",
".",
"open",
"(",
"mode",
"=",
"\"rb\"",
")",
"as",
"key",
":",
"try",
":",
"p_key",
"=",
"serialization",
".",
"load_pem_private_key",
"(",
"key",
".",
"read",
"(",
")",
",",
"password",
"=",
"private_key_passphrase",
".",
"encode",
"(",
")",
"if",
"private_key_passphrase",
"else",
"None",
",",
"backend",
"=",
"default_backend",
"(",
")",
",",
")",
"except",
"ValueError",
"as",
"e",
":",
"if",
"\"incorrect password\"",
"in",
"str",
"(",
"e",
")",
".",
"lower",
"(",
")",
":",
"raise",
"ge_exceptions",
".",
"DatasourceKeyPairAuthBadPassphraseError",
"(",
"datasource_name",
"=",
"\"SqlAlchemyDatasource\"",
",",
"message",
"=",
"\"Decryption of key failed, was the passphrase incorrect?\"",
",",
")",
"from",
"e",
"else",
":",
"raise",
"e",
"pkb",
"=",
"p_key",
".",
"private_bytes",
"(",
"encoding",
"=",
"serialization",
".",
"Encoding",
".",
"DER",
",",
"format",
"=",
"serialization",
".",
"PrivateFormat",
".",
"PKCS8",
",",
"encryption_algorithm",
"=",
"serialization",
".",
"NoEncryption",
"(",
")",
",",
")",
"credentials_driver_name",
"=",
"credentials",
".",
"pop",
"(",
"\"drivername\"",
",",
"None",
")",
"create_engine_kwargs",
"=",
"{",
"\"connect_args\"",
":",
"{",
"\"private_key\"",
":",
"pkb",
"}",
"}",
"return",
"(",
"sa",
".",
"engine",
".",
"url",
".",
"URL",
"(",
"drivername",
"or",
"credentials_driver_name",
",",
"*",
"*",
"credentials",
")",
",",
"create_engine_kwargs",
",",
")"
] | [
188,
4
] | [
236,
9
] | python | en | ['en', 'error', 'th'] | False |
TransformersTranslator.__init__ | (
self,
model_name_or_path: str,
tokenizer_name: Optional[str] = None,
max_seq_len: Optional[int] = None,
clean_up_tokenization_spaces: Optional[bool] = True
) | Initialize the translator with a model that fits your targeted languages. While we support all seq2seq
models from Hugging Face's model hub, we recommend using the OPUS models from Helsiniki NLP. They provide plenty
of different models, usually one model per language pair and translation direction.
They have a pretty standardized naming that should help you find the right model:
- "Helsinki-NLP/opus-mt-en-de" => translating from English to German
- "Helsinki-NLP/opus-mt-de-en" => translating from German to English
- "Helsinki-NLP/opus-mt-fr-en" => translating from French to English
- "Helsinki-NLP/opus-mt-hi-en"=> translating from Hindi to English
...
They also have a few multilingual models that support multiple languages at once.
:param model_name_or_path: Name of the seq2seq model that shall be used for translation.
Can be a remote name from Huggingface's modelhub or a local path.
:param tokenizer_name: Optional tokenizer name. If not supplied, `model_name_or_path` will also be used for the
tokenizer.
:param max_seq_len: The maximum sentence length the model accepts. (Optional)
:param clean_up_tokenization_spaces: Whether or not to clean up the tokenization spaces. (default True)
| Initialize the translator with a model that fits your targeted languages. While we support all seq2seq
models from Hugging Face's model hub, we recommend using the OPUS models from Helsiniki NLP. They provide plenty
of different models, usually one model per language pair and translation direction.
They have a pretty standardized naming that should help you find the right model:
- "Helsinki-NLP/opus-mt-en-de" => translating from English to German
- "Helsinki-NLP/opus-mt-de-en" => translating from German to English
- "Helsinki-NLP/opus-mt-fr-en" => translating from French to English
- "Helsinki-NLP/opus-mt-hi-en"=> translating from Hindi to English
... | def __init__(
self,
model_name_or_path: str,
tokenizer_name: Optional[str] = None,
max_seq_len: Optional[int] = None,
clean_up_tokenization_spaces: Optional[bool] = True
):
""" Initialize the translator with a model that fits your targeted languages. While we support all seq2seq
models from Hugging Face's model hub, we recommend using the OPUS models from Helsiniki NLP. They provide plenty
of different models, usually one model per language pair and translation direction.
They have a pretty standardized naming that should help you find the right model:
- "Helsinki-NLP/opus-mt-en-de" => translating from English to German
- "Helsinki-NLP/opus-mt-de-en" => translating from German to English
- "Helsinki-NLP/opus-mt-fr-en" => translating from French to English
- "Helsinki-NLP/opus-mt-hi-en"=> translating from Hindi to English
...
They also have a few multilingual models that support multiple languages at once.
:param model_name_or_path: Name of the seq2seq model that shall be used for translation.
Can be a remote name from Huggingface's modelhub or a local path.
:param tokenizer_name: Optional tokenizer name. If not supplied, `model_name_or_path` will also be used for the
tokenizer.
:param max_seq_len: The maximum sentence length the model accepts. (Optional)
:param clean_up_tokenization_spaces: Whether or not to clean up the tokenization spaces. (default True)
"""
self.max_seq_len = max_seq_len
self.clean_up_tokenization_spaces = clean_up_tokenization_spaces
tokenizer_name = tokenizer_name or model_name_or_path
self.tokenizer = AutoTokenizer.from_pretrained(
tokenizer_name
)
self.model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path) | [
"def",
"__init__",
"(",
"self",
",",
"model_name_or_path",
":",
"str",
",",
"tokenizer_name",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"max_seq_len",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
",",
"clean_up_tokenization_spaces",
":",
"Optional",
"[",
"bool",
"]",
"=",
"True",
")",
":",
"self",
".",
"max_seq_len",
"=",
"max_seq_len",
"self",
".",
"clean_up_tokenization_spaces",
"=",
"clean_up_tokenization_spaces",
"tokenizer_name",
"=",
"tokenizer_name",
"or",
"model_name_or_path",
"self",
".",
"tokenizer",
"=",
"AutoTokenizer",
".",
"from_pretrained",
"(",
"tokenizer_name",
")",
"self",
".",
"model",
"=",
"AutoModelForSeq2SeqLM",
".",
"from_pretrained",
"(",
"model_name_or_path",
")"
] | [
31,
4
] | [
64,
78
] | python | en | ['en', 'en', 'en'] | True |
TransformersTranslator.translate | (
self,
query: Optional[str] = None,
documents: Optional[Union[List[Document], List[str], List[Dict[str, Any]]]] = None,
dict_key: Optional[str] = None,
**kwargs
) |
Run the actual translation. You can supply a query or a list of documents. Whatever is supplied will be translated.
|
Run the actual translation. You can supply a query or a list of documents. Whatever is supplied will be translated.
| def translate(
self,
query: Optional[str] = None,
documents: Optional[Union[List[Document], List[str], List[Dict[str, Any]]]] = None,
dict_key: Optional[str] = None,
**kwargs
) -> Union[str, List[Document], List[str], List[Dict[str, Any]]]:
"""
Run the actual translation. You can supply a query or a list of documents. Whatever is supplied will be translated.
"""
if not query and not documents:
raise AttributeError("Translator need query or documents to perform translation")
if query and documents:
raise AttributeError("Translator need either query or documents but not both")
if documents and len(documents) == 0:
logger.warning("Empty documents list is passed")
return documents
dict_key = dict_key or "text"
if isinstance(documents, list):
if isinstance(documents[0], Document):
text_for_translator = [doc.text for doc in documents] # type: ignore
elif isinstance(documents[0], str):
text_for_translator = documents # type: ignore
else:
if not isinstance(documents[0].get(dict_key, None), str): # type: ignore
raise AttributeError(f"Dictionary should have {dict_key} key and it's value should be `str` type")
text_for_translator = [doc[dict_key] for doc in documents] # type: ignore
else:
text_for_translator: List[str] = [query] # type: ignore
batch = self.tokenizer.prepare_seq2seq_batch(
src_texts=text_for_translator,
return_tensors="pt",
max_length=self.max_seq_len
)
generated_output = self.model.generate(**batch)
translated_texts = self.tokenizer.batch_decode(
generated_output,
skip_special_tokens=True,
clean_up_tokenization_spaces=self.clean_up_tokenization_spaces
)
if query:
return translated_texts[0]
elif documents:
if isinstance(documents, list) and isinstance(documents[0], str):
return [translated_text for translated_text in translated_texts]
for translated_text, doc in zip(translated_texts, documents):
if isinstance(doc, Document):
doc.text = translated_text
else:
doc[dict_key] = translated_text # type: ignore
return documents
raise AttributeError("Translator need query or documents to perform translation") | [
"def",
"translate",
"(",
"self",
",",
"query",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"documents",
":",
"Optional",
"[",
"Union",
"[",
"List",
"[",
"Document",
"]",
",",
"List",
"[",
"str",
"]",
",",
"List",
"[",
"Dict",
"[",
"str",
",",
"Any",
"]",
"]",
"]",
"]",
"=",
"None",
",",
"dict_key",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
"->",
"Union",
"[",
"str",
",",
"List",
"[",
"Document",
"]",
",",
"List",
"[",
"str",
"]",
",",
"List",
"[",
"Dict",
"[",
"str",
",",
"Any",
"]",
"]",
"]",
":",
"if",
"not",
"query",
"and",
"not",
"documents",
":",
"raise",
"AttributeError",
"(",
"\"Translator need query or documents to perform translation\"",
")",
"if",
"query",
"and",
"documents",
":",
"raise",
"AttributeError",
"(",
"\"Translator need either query or documents but not both\"",
")",
"if",
"documents",
"and",
"len",
"(",
"documents",
")",
"==",
"0",
":",
"logger",
".",
"warning",
"(",
"\"Empty documents list is passed\"",
")",
"return",
"documents",
"dict_key",
"=",
"dict_key",
"or",
"\"text\"",
"if",
"isinstance",
"(",
"documents",
",",
"list",
")",
":",
"if",
"isinstance",
"(",
"documents",
"[",
"0",
"]",
",",
"Document",
")",
":",
"text_for_translator",
"=",
"[",
"doc",
".",
"text",
"for",
"doc",
"in",
"documents",
"]",
"# type: ignore",
"elif",
"isinstance",
"(",
"documents",
"[",
"0",
"]",
",",
"str",
")",
":",
"text_for_translator",
"=",
"documents",
"# type: ignore",
"else",
":",
"if",
"not",
"isinstance",
"(",
"documents",
"[",
"0",
"]",
".",
"get",
"(",
"dict_key",
",",
"None",
")",
",",
"str",
")",
":",
"# type: ignore",
"raise",
"AttributeError",
"(",
"f\"Dictionary should have {dict_key} key and it's value should be `str` type\"",
")",
"text_for_translator",
"=",
"[",
"doc",
"[",
"dict_key",
"]",
"for",
"doc",
"in",
"documents",
"]",
"# type: ignore",
"else",
":",
"text_for_translator",
":",
"List",
"[",
"str",
"]",
"=",
"[",
"query",
"]",
"# type: ignore",
"batch",
"=",
"self",
".",
"tokenizer",
".",
"prepare_seq2seq_batch",
"(",
"src_texts",
"=",
"text_for_translator",
",",
"return_tensors",
"=",
"\"pt\"",
",",
"max_length",
"=",
"self",
".",
"max_seq_len",
")",
"generated_output",
"=",
"self",
".",
"model",
".",
"generate",
"(",
"*",
"*",
"batch",
")",
"translated_texts",
"=",
"self",
".",
"tokenizer",
".",
"batch_decode",
"(",
"generated_output",
",",
"skip_special_tokens",
"=",
"True",
",",
"clean_up_tokenization_spaces",
"=",
"self",
".",
"clean_up_tokenization_spaces",
")",
"if",
"query",
":",
"return",
"translated_texts",
"[",
"0",
"]",
"elif",
"documents",
":",
"if",
"isinstance",
"(",
"documents",
",",
"list",
")",
"and",
"isinstance",
"(",
"documents",
"[",
"0",
"]",
",",
"str",
")",
":",
"return",
"[",
"translated_text",
"for",
"translated_text",
"in",
"translated_texts",
"]",
"for",
"translated_text",
",",
"doc",
"in",
"zip",
"(",
"translated_texts",
",",
"documents",
")",
":",
"if",
"isinstance",
"(",
"doc",
",",
"Document",
")",
":",
"doc",
".",
"text",
"=",
"translated_text",
"else",
":",
"doc",
"[",
"dict_key",
"]",
"=",
"translated_text",
"# type: ignore",
"return",
"documents",
"raise",
"AttributeError",
"(",
"\"Translator need query or documents to perform translation\"",
")"
] | [
66,
4
] | [
126,
89
] | python | en | ['en', 'error', 'th'] | False |
BaseConverter.__init__ | (self, remove_numeric_tables: bool = False, valid_languages: Optional[List[str]] = None) |
:param remove_numeric_tables: This option uses heuristics to remove numeric rows from the tables.
The tabular structures in documents might be noise for the reader model if it
does not have table parsing capability for finding answers. However, tables
may also have long strings that could possible candidate for searching answers.
The rows containing strings are thus retained in this option.
:param valid_languages: validate languages from a list of languages specified in the ISO 639-1
(https://en.wikipedia.org/wiki/ISO_639-1) format.
This option can be used to add test for encoding errors. If the extracted text is
not one of the valid languages, then it might likely be encoding error resulting
in garbled text.
|
:param remove_numeric_tables: This option uses heuristics to remove numeric rows from the tables.
The tabular structures in documents might be noise for the reader model if it
does not have table parsing capability for finding answers. However, tables
may also have long strings that could possible candidate for searching answers.
The rows containing strings are thus retained in this option.
:param valid_languages: validate languages from a list of languages specified in the ISO 639-1
(https://en.wikipedia.org/wiki/ISO_639-1) format.
This option can be used to add test for encoding errors. If the extracted text is
not one of the valid languages, then it might likely be encoding error resulting
in garbled text.
| def __init__(self, remove_numeric_tables: bool = False, valid_languages: Optional[List[str]] = None):
"""
:param remove_numeric_tables: This option uses heuristics to remove numeric rows from the tables.
The tabular structures in documents might be noise for the reader model if it
does not have table parsing capability for finding answers. However, tables
may also have long strings that could possible candidate for searching answers.
The rows containing strings are thus retained in this option.
:param valid_languages: validate languages from a list of languages specified in the ISO 639-1
(https://en.wikipedia.org/wiki/ISO_639-1) format.
This option can be used to add test for encoding errors. If the extracted text is
not one of the valid languages, then it might likely be encoding error resulting
in garbled text.
"""
self.remove_numeric_tables = remove_numeric_tables
self.valid_languages = valid_languages | [
"def",
"__init__",
"(",
"self",
",",
"remove_numeric_tables",
":",
"bool",
"=",
"False",
",",
"valid_languages",
":",
"Optional",
"[",
"List",
"[",
"str",
"]",
"]",
"=",
"None",
")",
":",
"self",
".",
"remove_numeric_tables",
"=",
"remove_numeric_tables",
"self",
".",
"valid_languages",
"=",
"valid_languages"
] | [
16,
4
] | [
30,
46
] | python | en | ['en', 'error', 'th'] | False |
BaseConverter.convert | (
self,
file_path: Path,
meta: Optional[Dict[str, str]],
remove_numeric_tables: Optional[bool] = None,
valid_languages: Optional[List[str]] = None,
) |
Convert a file to a dictionary containing the text and any associated meta data.
File converters may extract file meta like name or size. In addition to it, user
supplied meta data like author, url, external IDs can be supplied as a dictionary.
:param file_path: path of the file to convert
:param meta: dictionary of meta data key-value pairs to append in the returned document.
:param remove_numeric_tables: This option uses heuristics to remove numeric rows from the tables.
The tabular structures in documents might be noise for the reader model if it
does not have table parsing capability for finding answers. However, tables
may also have long strings that could possible candidate for searching answers.
The rows containing strings are thus retained in this option.
:param valid_languages: validate languages from a list of languages specified in the ISO 639-1
(https://en.wikipedia.org/wiki/ISO_639-1) format.
This option can be used to add test for encoding errors. If the extracted text is
not one of the valid languages, then it might likely be encoding error resulting
in garbled text.
|
Convert a file to a dictionary containing the text and any associated meta data. | def convert(
self,
file_path: Path,
meta: Optional[Dict[str, str]],
remove_numeric_tables: Optional[bool] = None,
valid_languages: Optional[List[str]] = None,
) -> Dict[str, Any]:
"""
Convert a file to a dictionary containing the text and any associated meta data.
File converters may extract file meta like name or size. In addition to it, user
supplied meta data like author, url, external IDs can be supplied as a dictionary.
:param file_path: path of the file to convert
:param meta: dictionary of meta data key-value pairs to append in the returned document.
:param remove_numeric_tables: This option uses heuristics to remove numeric rows from the tables.
The tabular structures in documents might be noise for the reader model if it
does not have table parsing capability for finding answers. However, tables
may also have long strings that could possible candidate for searching answers.
The rows containing strings are thus retained in this option.
:param valid_languages: validate languages from a list of languages specified in the ISO 639-1
(https://en.wikipedia.org/wiki/ISO_639-1) format.
This option can be used to add test for encoding errors. If the extracted text is
not one of the valid languages, then it might likely be encoding error resulting
in garbled text.
"""
pass | [
"def",
"convert",
"(",
"self",
",",
"file_path",
":",
"Path",
",",
"meta",
":",
"Optional",
"[",
"Dict",
"[",
"str",
",",
"str",
"]",
"]",
",",
"remove_numeric_tables",
":",
"Optional",
"[",
"bool",
"]",
"=",
"None",
",",
"valid_languages",
":",
"Optional",
"[",
"List",
"[",
"str",
"]",
"]",
"=",
"None",
",",
")",
"->",
"Dict",
"[",
"str",
",",
"Any",
"]",
":",
"pass"
] | [
33,
4
] | [
59,
12
] | python | en | ['en', 'error', 'th'] | False |
BaseConverter.validate_language | (self, text: str) |
Validate if the language of the text is one of valid languages.
|
Validate if the language of the text is one of valid languages.
| def validate_language(self, text: str) -> bool:
"""
Validate if the language of the text is one of valid languages.
"""
if not self.valid_languages:
return True
try:
lang = langdetect.detect(text)
except langdetect.lang_detect_exception.LangDetectException:
lang = None
if lang in self.valid_languages:
return True
else:
return False | [
"def",
"validate_language",
"(",
"self",
",",
"text",
":",
"str",
")",
"->",
"bool",
":",
"if",
"not",
"self",
".",
"valid_languages",
":",
"return",
"True",
"try",
":",
"lang",
"=",
"langdetect",
".",
"detect",
"(",
"text",
")",
"except",
"langdetect",
".",
"lang_detect_exception",
".",
"LangDetectException",
":",
"lang",
"=",
"None",
"if",
"lang",
"in",
"self",
".",
"valid_languages",
":",
"return",
"True",
"else",
":",
"return",
"False"
] | [
61,
4
] | [
76,
24
] | python | en | ['en', 'error', 'th'] | False |
fillna | (X, value=None, method=None, axis=None, limit=None, downcast=None) | Impute missing values.
This function fills the missing values of the input sequence with the next/
previous known value. If there are contigous NaN values, they will all be
filled with the same next/previous known value.
Args:
X (ndarray or pandas.DataFrame):
Array of input sequence.
value:
Optional. Value to use to fill holes (e.g. 0), alternately a dict/Series/DataFrame of
values specifying which value to use for each index (for a Series) or column
(for a DataFrame). Values not in the dict/Series/DataFrame will not be filled.
This value cannot be a list. Default is None.
method (str or list):
Optional. String or list of strings describing whether to use forward or backward
fill. pad / ffill: propagate last valid observation forward to next valid.
backfill / bfill: use next valid observation to fill gap. Otherwise use ``None`` to
fill with desired value. Possible values include
``[‘backfill’, ‘bfill’, ‘pad’, ‘ffill’, None]``. Default is None.
axis (int or str):
Optional. Axis along which to fill missing value. Possible values include 0 or
"index", 1 or "columns". Default is None.
limit (int):
Optional. If method is specified, this is the maximum number of consecutive NaN values
to forward/backward fill. In other words, if there is a gap with more than this number
of consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None. Default is None.
downcast (dict):
Optional. A dict of item->dtype of what to downcast if possible, or the string "infer"
which will try to downcast to an appropriate equal type (e.g. float64 to int64 if
possible). Default is None.
Returns:
ndarray:
Array of input sequence with imputed values.
| Impute missing values. | def fillna(X, value=None, method=None, axis=None, limit=None, downcast=None):
"""Impute missing values.
This function fills the missing values of the input sequence with the next/
previous known value. If there are contigous NaN values, they will all be
filled with the same next/previous known value.
Args:
X (ndarray or pandas.DataFrame):
Array of input sequence.
value:
Optional. Value to use to fill holes (e.g. 0), alternately a dict/Series/DataFrame of
values specifying which value to use for each index (for a Series) or column
(for a DataFrame). Values not in the dict/Series/DataFrame will not be filled.
This value cannot be a list. Default is None.
method (str or list):
Optional. String or list of strings describing whether to use forward or backward
fill. pad / ffill: propagate last valid observation forward to next valid.
backfill / bfill: use next valid observation to fill gap. Otherwise use ``None`` to
fill with desired value. Possible values include
``[‘backfill’, ‘bfill’, ‘pad’, ‘ffill’, None]``. Default is None.
axis (int or str):
Optional. Axis along which to fill missing value. Possible values include 0 or
"index", 1 or "columns". Default is None.
limit (int):
Optional. If method is specified, this is the maximum number of consecutive NaN values
to forward/backward fill. In other words, if there is a gap with more than this number
of consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None. Default is None.
downcast (dict):
Optional. A dict of item->dtype of what to downcast if possible, or the string "infer"
which will try to downcast to an appropriate equal type (e.g. float64 to int64 if
possible). Default is None.
Returns:
ndarray:
Array of input sequence with imputed values.
"""
if isinstance(method, str) or method is None:
method = [method]
if isinstance(X, np.ndarray):
if X.ndim == 1:
X_ = pd.Series(X)
else:
X_ = pd.DataFrame(X)
else:
X_ = X.copy()
for fill in method:
X_ = X_.fillna(value=value, method=fill, axis=axis, limit=limit, downcast=downcast)
return X_.values | [
"def",
"fillna",
"(",
"X",
",",
"value",
"=",
"None",
",",
"method",
"=",
"None",
",",
"axis",
"=",
"None",
",",
"limit",
"=",
"None",
",",
"downcast",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"method",
",",
"str",
")",
"or",
"method",
"is",
"None",
":",
"method",
"=",
"[",
"method",
"]",
"if",
"isinstance",
"(",
"X",
",",
"np",
".",
"ndarray",
")",
":",
"if",
"X",
".",
"ndim",
"==",
"1",
":",
"X_",
"=",
"pd",
".",
"Series",
"(",
"X",
")",
"else",
":",
"X_",
"=",
"pd",
".",
"DataFrame",
"(",
"X",
")",
"else",
":",
"X_",
"=",
"X",
".",
"copy",
"(",
")",
"for",
"fill",
"in",
"method",
":",
"X_",
"=",
"X_",
".",
"fillna",
"(",
"value",
"=",
"value",
",",
"method",
"=",
"fill",
",",
"axis",
"=",
"axis",
",",
"limit",
"=",
"limit",
",",
"downcast",
"=",
"downcast",
")",
"return",
"X_",
".",
"values"
] | [
4,
0
] | [
58,
20
] | python | en | ['nl', 'et', 'en'] | False |
SuiteEditNotebookRenderer.render | (
self,
suite: ExpectationSuite,
batch_request: Optional[
Union[str, Dict[str, Union[str, int, Dict[str, Any]]]]
] = None,
) |
Render a notebook dict from an expectation suite.
|
Render a notebook dict from an expectation suite.
| def render(
self,
suite: ExpectationSuite,
batch_request: Optional[
Union[str, Dict[str, Union[str, int, Dict[str, Any]]]]
] = None,
) -> nbformat.NotebookNode:
"""
Render a notebook dict from an expectation suite.
"""
if not isinstance(suite, ExpectationSuite):
raise RuntimeWarning("render must be given an ExpectationSuite.")
self._notebook = nbformat.v4.new_notebook()
suite_name: str = suite.expectation_suite_name
if (
batch_request
and isinstance(batch_request, dict)
and BatchRequest(**batch_request)
):
batch_request = standardize_batch_request_display_ordering(
batch_request=batch_request
)
else:
batch_request = None
self.add_header(suite_name=suite_name, batch_request=batch_request)
self.add_authoring_intro(batch_request=batch_request)
self.add_expectation_cells_from_suite(
expectations=suite.expectations, batch_request=batch_request
)
self.add_footer(batch_request=batch_request)
return self._notebook | [
"def",
"render",
"(",
"self",
",",
"suite",
":",
"ExpectationSuite",
",",
"batch_request",
":",
"Optional",
"[",
"Union",
"[",
"str",
",",
"Dict",
"[",
"str",
",",
"Union",
"[",
"str",
",",
"int",
",",
"Dict",
"[",
"str",
",",
"Any",
"]",
"]",
"]",
"]",
"]",
"=",
"None",
",",
")",
"->",
"nbformat",
".",
"NotebookNode",
":",
"if",
"not",
"isinstance",
"(",
"suite",
",",
"ExpectationSuite",
")",
":",
"raise",
"RuntimeWarning",
"(",
"\"render must be given an ExpectationSuite.\"",
")",
"self",
".",
"_notebook",
"=",
"nbformat",
".",
"v4",
".",
"new_notebook",
"(",
")",
"suite_name",
":",
"str",
"=",
"suite",
".",
"expectation_suite_name",
"if",
"(",
"batch_request",
"and",
"isinstance",
"(",
"batch_request",
",",
"dict",
")",
"and",
"BatchRequest",
"(",
"*",
"*",
"batch_request",
")",
")",
":",
"batch_request",
"=",
"standardize_batch_request_display_ordering",
"(",
"batch_request",
"=",
"batch_request",
")",
"else",
":",
"batch_request",
"=",
"None",
"self",
".",
"add_header",
"(",
"suite_name",
"=",
"suite_name",
",",
"batch_request",
"=",
"batch_request",
")",
"self",
".",
"add_authoring_intro",
"(",
"batch_request",
"=",
"batch_request",
")",
"self",
".",
"add_expectation_cells_from_suite",
"(",
"expectations",
"=",
"suite",
".",
"expectations",
",",
"batch_request",
"=",
"batch_request",
")",
"self",
".",
"add_footer",
"(",
"batch_request",
"=",
"batch_request",
")",
"return",
"self",
".",
"_notebook"
] | [
353,
4
] | [
388,
29
] | python | en | ['en', 'error', 'th'] | False |
SuiteEditNotebookRenderer.render_to_disk | (
self,
suite: ExpectationSuite,
notebook_file_path: str,
batch_request: Optional[
Union[str, Dict[str, Union[str, int, Dict[str, Any]]]]
] = None,
) |
Render a notebook to disk from an expectation suite.
If batch_request dictionary is passed, its properties will override any found in suite citations.
|
Render a notebook to disk from an expectation suite. | def render_to_disk(
self,
suite: ExpectationSuite,
notebook_file_path: str,
batch_request: Optional[
Union[str, Dict[str, Union[str, int, Dict[str, Any]]]]
] = None,
) -> None:
"""
Render a notebook to disk from an expectation suite.
If batch_request dictionary is passed, its properties will override any found in suite citations.
"""
self.render(
suite=suite,
batch_request=batch_request,
)
self.write_notebook_to_disk(
notebook=self._notebook, notebook_file_path=notebook_file_path
) | [
"def",
"render_to_disk",
"(",
"self",
",",
"suite",
":",
"ExpectationSuite",
",",
"notebook_file_path",
":",
"str",
",",
"batch_request",
":",
"Optional",
"[",
"Union",
"[",
"str",
",",
"Dict",
"[",
"str",
",",
"Union",
"[",
"str",
",",
"int",
",",
"Dict",
"[",
"str",
",",
"Any",
"]",
"]",
"]",
"]",
"]",
"=",
"None",
",",
")",
"->",
"None",
":",
"self",
".",
"render",
"(",
"suite",
"=",
"suite",
",",
"batch_request",
"=",
"batch_request",
",",
")",
"self",
".",
"write_notebook_to_disk",
"(",
"notebook",
"=",
"self",
".",
"_notebook",
",",
"notebook_file_path",
"=",
"notebook_file_path",
")"
] | [
391,
4
] | [
410,
9
] | python | en | ['en', 'error', 'th'] | False |
venv | (request) |
Prepares a virtual environment for nose.
:rtype : virtual_environments.VirtualEnvDescription
|
Prepares a virtual environment for nose.
:rtype : virtual_environments.VirtualEnvDescription
| def venv(request):
"""
Prepares a virtual environment for nose.
:rtype : virtual_environments.VirtualEnvDescription
"""
return virtual_environments.prepare_virtualenv([request.param]) | [
"def",
"venv",
"(",
"request",
")",
":",
"return",
"virtual_environments",
".",
"prepare_virtualenv",
"(",
"[",
"request",
".",
"param",
"]",
")"
] | [
11,
0
] | [
16,
67
] | python | en | ['en', 'error', 'th'] | False |
SQLDocumentStore.__init__ | (
self,
url: str = "sqlite://",
index: str = "document",
label_index: str = "label",
update_existing_documents: bool = False,
) |
An SQL backed DocumentStore. Currently supports SQLite, PostgreSQL and MySQL backends.
:param url: URL for SQL database as expected by SQLAlchemy. More info here: https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls
:param index: The documents are scoped to an index attribute that can be used when writing, querying, or deleting documents.
This parameter sets the default value for document index.
:param label_index: The default value of index attribute for the labels.
:param update_existing_documents: Whether to update any existing documents with the same ID when adding
documents. When set as True, any document with an existing ID gets updated.
If set to False, an error is raised if the document ID of the document being
added already exists. Using this parameter could cause performance degradation
for document insertion.
|
An SQL backed DocumentStore. Currently supports SQLite, PostgreSQL and MySQL backends. | def __init__(
self,
url: str = "sqlite://",
index: str = "document",
label_index: str = "label",
update_existing_documents: bool = False,
):
"""
An SQL backed DocumentStore. Currently supports SQLite, PostgreSQL and MySQL backends.
:param url: URL for SQL database as expected by SQLAlchemy. More info here: https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls
:param index: The documents are scoped to an index attribute that can be used when writing, querying, or deleting documents.
This parameter sets the default value for document index.
:param label_index: The default value of index attribute for the labels.
:param update_existing_documents: Whether to update any existing documents with the same ID when adding
documents. When set as True, any document with an existing ID gets updated.
If set to False, an error is raised if the document ID of the document being
added already exists. Using this parameter could cause performance degradation
for document insertion.
"""
engine = create_engine(url)
ORMBase.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
self.session = Session()
self.index: str = index
self.label_index = label_index
self.update_existing_documents = update_existing_documents
if getattr(self, "similarity", None) is None:
self.similarity = None
self.use_windowed_query = True
if "sqlite" in url:
import sqlite3
if sqlite3.sqlite_version < "3.25":
self.use_windowed_query = False | [
"def",
"__init__",
"(",
"self",
",",
"url",
":",
"str",
"=",
"\"sqlite://\"",
",",
"index",
":",
"str",
"=",
"\"document\"",
",",
"label_index",
":",
"str",
"=",
"\"label\"",
",",
"update_existing_documents",
":",
"bool",
"=",
"False",
",",
")",
":",
"engine",
"=",
"create_engine",
"(",
"url",
")",
"ORMBase",
".",
"metadata",
".",
"create_all",
"(",
"engine",
")",
"Session",
"=",
"sessionmaker",
"(",
"bind",
"=",
"engine",
")",
"self",
".",
"session",
"=",
"Session",
"(",
")",
"self",
".",
"index",
":",
"str",
"=",
"index",
"self",
".",
"label_index",
"=",
"label_index",
"self",
".",
"update_existing_documents",
"=",
"update_existing_documents",
"if",
"getattr",
"(",
"self",
",",
"\"similarity\"",
",",
"None",
")",
"is",
"None",
":",
"self",
".",
"similarity",
"=",
"None",
"self",
".",
"use_windowed_query",
"=",
"True",
"if",
"\"sqlite\"",
"in",
"url",
":",
"import",
"sqlite3",
"if",
"sqlite3",
".",
"sqlite_version",
"<",
"\"3.25\"",
":",
"self",
".",
"use_windowed_query",
"=",
"False"
] | [
70,
4
] | [
103,
47
] | python | en | ['en', 'error', 'th'] | False |
SQLDocumentStore.get_document_by_id | (self, id: str, index: Optional[str] = None) | Fetch a document by specifying its text id string | Fetch a document by specifying its text id string | def get_document_by_id(self, id: str, index: Optional[str] = None) -> Optional[Document]:
"""Fetch a document by specifying its text id string"""
documents = self.get_documents_by_id([id], index)
document = documents[0] if documents else None
return document | [
"def",
"get_document_by_id",
"(",
"self",
",",
"id",
":",
"str",
",",
"index",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
")",
"->",
"Optional",
"[",
"Document",
"]",
":",
"documents",
"=",
"self",
".",
"get_documents_by_id",
"(",
"[",
"id",
"]",
",",
"index",
")",
"document",
"=",
"documents",
"[",
"0",
"]",
"if",
"documents",
"else",
"None",
"return",
"document"
] | [
105,
4
] | [
109,
23
] | python | en | ['en', 'en', 'en'] | True |
SQLDocumentStore.get_documents_by_id | (self, ids: List[str], index: Optional[str] = None, batch_size: int = 10_000) | Fetch documents by specifying a list of text id strings | Fetch documents by specifying a list of text id strings | def get_documents_by_id(self, ids: List[str], index: Optional[str] = None, batch_size: int = 10_000) -> List[Document]:
"""Fetch documents by specifying a list of text id strings"""
index = index or self.index
documents = []
for i in range(0, len(ids), batch_size):
query = self.session.query(DocumentORM).filter(
DocumentORM.id.in_(ids[i: i + batch_size]),
DocumentORM.index == index
)
for row in query.all():
documents.append(self._convert_sql_row_to_document(row))
return documents | [
"def",
"get_documents_by_id",
"(",
"self",
",",
"ids",
":",
"List",
"[",
"str",
"]",
",",
"index",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"batch_size",
":",
"int",
"=",
"10_000",
")",
"->",
"List",
"[",
"Document",
"]",
":",
"index",
"=",
"index",
"or",
"self",
".",
"index",
"documents",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"ids",
")",
",",
"batch_size",
")",
":",
"query",
"=",
"self",
".",
"session",
".",
"query",
"(",
"DocumentORM",
")",
".",
"filter",
"(",
"DocumentORM",
".",
"id",
".",
"in_",
"(",
"ids",
"[",
"i",
":",
"i",
"+",
"batch_size",
"]",
")",
",",
"DocumentORM",
".",
"index",
"==",
"index",
")",
"for",
"row",
"in",
"query",
".",
"all",
"(",
")",
":",
"documents",
".",
"append",
"(",
"self",
".",
"_convert_sql_row_to_document",
"(",
"row",
")",
")",
"return",
"documents"
] | [
111,
4
] | [
124,
24
] | python | en | ['en', 'en', 'en'] | True |
SQLDocumentStore.get_documents_by_vector_ids | (
self,
vector_ids: List[str],
index: Optional[str] = None,
batch_size: int = 10_000
) |
Fetch documents by specifying a list of text vector id strings
:param vector_ids: List of vector_id strings.
:param index: Name of the index to get the documents from. If None, the
DocumentStore's default index (self.index) will be used.
:param batch_size: When working with large number of documents, batching can help reduce memory footprint.
|
Fetch documents by specifying a list of text vector id strings | def get_documents_by_vector_ids(
self,
vector_ids: List[str],
index: Optional[str] = None,
batch_size: int = 10_000
):
"""
Fetch documents by specifying a list of text vector id strings
:param vector_ids: List of vector_id strings.
:param index: Name of the index to get the documents from. If None, the
DocumentStore's default index (self.index) will be used.
:param batch_size: When working with large number of documents, batching can help reduce memory footprint.
"""
result = self._query(
index=index,
vector_ids=vector_ids,
batch_size=batch_size
)
documents = list(result)
sorted_documents = sorted(documents, key=lambda doc: vector_ids.index(doc.meta["vector_id"]))
return sorted_documents | [
"def",
"get_documents_by_vector_ids",
"(",
"self",
",",
"vector_ids",
":",
"List",
"[",
"str",
"]",
",",
"index",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"batch_size",
":",
"int",
"=",
"10_000",
")",
":",
"result",
"=",
"self",
".",
"_query",
"(",
"index",
"=",
"index",
",",
"vector_ids",
"=",
"vector_ids",
",",
"batch_size",
"=",
"batch_size",
")",
"documents",
"=",
"list",
"(",
"result",
")",
"sorted_documents",
"=",
"sorted",
"(",
"documents",
",",
"key",
"=",
"lambda",
"doc",
":",
"vector_ids",
".",
"index",
"(",
"doc",
".",
"meta",
"[",
"\"vector_id\"",
"]",
")",
")",
"return",
"sorted_documents"
] | [
126,
4
] | [
148,
31
] | python | en | ['en', 'error', 'th'] | False |
SQLDocumentStore.get_all_documents_generator | (
self,
index: Optional[str] = None,
filters: Optional[Dict[str, List[str]]] = None,
return_embedding: Optional[bool] = None,
batch_size: int = 10_000,
) |
Get documents from the document store. Under-the-hood, documents are fetched in batches from the
document store and yielded as individual documents. This method can be used to iteratively process
a large number of documents without having to load all documents in memory.
:param index: Name of the index to get the documents from. If None, the
DocumentStore's default index (self.index) will be used.
:param filters: Optional filters to narrow down the documents to return.
Example: {"name": ["some", "more"], "category": ["only_one"]}
:param return_embedding: Whether to return the document embeddings.
:param batch_size: When working with large number of documents, batching can help reduce memory footprint.
|
Get documents from the document store. Under-the-hood, documents are fetched in batches from the
document store and yielded as individual documents. This method can be used to iteratively process
a large number of documents without having to load all documents in memory. | def get_all_documents_generator(
self,
index: Optional[str] = None,
filters: Optional[Dict[str, List[str]]] = None,
return_embedding: Optional[bool] = None,
batch_size: int = 10_000,
) -> Generator[Document, None, None]:
"""
Get documents from the document store. Under-the-hood, documents are fetched in batches from the
document store and yielded as individual documents. This method can be used to iteratively process
a large number of documents without having to load all documents in memory.
:param index: Name of the index to get the documents from. If None, the
DocumentStore's default index (self.index) will be used.
:param filters: Optional filters to narrow down the documents to return.
Example: {"name": ["some", "more"], "category": ["only_one"]}
:param return_embedding: Whether to return the document embeddings.
:param batch_size: When working with large number of documents, batching can help reduce memory footprint.
"""
if return_embedding is True:
raise Exception("return_embeddings is not supported by SQLDocumentStore.")
result = self._query(
index=index,
filters=filters,
batch_size=batch_size,
)
yield from result | [
"def",
"get_all_documents_generator",
"(",
"self",
",",
"index",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"filters",
":",
"Optional",
"[",
"Dict",
"[",
"str",
",",
"List",
"[",
"str",
"]",
"]",
"]",
"=",
"None",
",",
"return_embedding",
":",
"Optional",
"[",
"bool",
"]",
"=",
"None",
",",
"batch_size",
":",
"int",
"=",
"10_000",
",",
")",
"->",
"Generator",
"[",
"Document",
",",
"None",
",",
"None",
"]",
":",
"if",
"return_embedding",
"is",
"True",
":",
"raise",
"Exception",
"(",
"\"return_embeddings is not supported by SQLDocumentStore.\"",
")",
"result",
"=",
"self",
".",
"_query",
"(",
"index",
"=",
"index",
",",
"filters",
"=",
"filters",
",",
"batch_size",
"=",
"batch_size",
",",
")",
"yield",
"from",
"result"
] | [
159,
4
] | [
186,
25
] | python | en | ['en', 'error', 'th'] | False |
SQLDocumentStore._query | (
self,
index: Optional[str] = None,
filters: Optional[Dict[str, List[str]]] = None,
vector_ids: Optional[List[str]] = None,
only_documents_without_embedding: bool = False,
batch_size: int = 10_000
) |
:param index: Name of the index to get the documents from. If None, the
DocumentStore's default index (self.index) will be used.
:param filters: Optional filters to narrow down the documents to return.
Example: {"name": ["some", "more"], "category": ["only_one"]}
:param vector_ids: List of vector_id strings to filter the documents by.
:param only_documents_without_embedding: return only documents without an embedding.
:param batch_size: When working with large number of documents, batching can help reduce memory footprint.
|
:param index: Name of the index to get the documents from. If None, the
DocumentStore's default index (self.index) will be used.
:param filters: Optional filters to narrow down the documents to return.
Example: {"name": ["some", "more"], "category": ["only_one"]}
:param vector_ids: List of vector_id strings to filter the documents by.
:param only_documents_without_embedding: return only documents without an embedding.
:param batch_size: When working with large number of documents, batching can help reduce memory footprint.
| def _query(
self,
index: Optional[str] = None,
filters: Optional[Dict[str, List[str]]] = None,
vector_ids: Optional[List[str]] = None,
only_documents_without_embedding: bool = False,
batch_size: int = 10_000
):
"""
:param index: Name of the index to get the documents from. If None, the
DocumentStore's default index (self.index) will be used.
:param filters: Optional filters to narrow down the documents to return.
Example: {"name": ["some", "more"], "category": ["only_one"]}
:param vector_ids: List of vector_id strings to filter the documents by.
:param only_documents_without_embedding: return only documents without an embedding.
:param batch_size: When working with large number of documents, batching can help reduce memory footprint.
"""
index = index or self.index
# Generally ORM objects kept in memory cause performance issue
# Hence using directly column name improve memory and performance.
# Refer https://stackoverflow.com/questions/23185319/why-is-loading-sqlalchemy-objects-via-the-orm-5-8x-slower-than-rows-via-a-raw-my
documents_query = self.session.query(
DocumentORM.id,
DocumentORM.text,
DocumentORM.vector_id
).filter_by(index=index)
if filters:
documents_query = documents_query.join(MetaORM)
for key, values in filters.items():
documents_query = documents_query.filter(
MetaORM.name == key,
MetaORM.value.in_(values),
DocumentORM.id == MetaORM.document_id
)
if only_documents_without_embedding:
documents_query = documents_query.filter(DocumentORM.vector_id.is_(None))
if vector_ids:
documents_query = documents_query.filter(DocumentORM.vector_id.in_(vector_ids))
documents_map = {}
if self.use_windowed_query:
documents_query = self._windowed_query(documents_query, DocumentORM.id, batch_size)
for i, row in enumerate(documents_query, start=1):
documents_map[row.id] = Document(
id=row.id,
text=row.text,
meta=None if row.vector_id is None else {"vector_id": row.vector_id}
)
if i % batch_size == 0:
documents_map = self._get_documents_meta(documents_map)
yield from documents_map.values()
documents_map = {}
if documents_map:
documents_map = self._get_documents_meta(documents_map)
yield from documents_map.values() | [
"def",
"_query",
"(",
"self",
",",
"index",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"filters",
":",
"Optional",
"[",
"Dict",
"[",
"str",
",",
"List",
"[",
"str",
"]",
"]",
"]",
"=",
"None",
",",
"vector_ids",
":",
"Optional",
"[",
"List",
"[",
"str",
"]",
"]",
"=",
"None",
",",
"only_documents_without_embedding",
":",
"bool",
"=",
"False",
",",
"batch_size",
":",
"int",
"=",
"10_000",
")",
":",
"index",
"=",
"index",
"or",
"self",
".",
"index",
"# Generally ORM objects kept in memory cause performance issue",
"# Hence using directly column name improve memory and performance.",
"# Refer https://stackoverflow.com/questions/23185319/why-is-loading-sqlalchemy-objects-via-the-orm-5-8x-slower-than-rows-via-a-raw-my",
"documents_query",
"=",
"self",
".",
"session",
".",
"query",
"(",
"DocumentORM",
".",
"id",
",",
"DocumentORM",
".",
"text",
",",
"DocumentORM",
".",
"vector_id",
")",
".",
"filter_by",
"(",
"index",
"=",
"index",
")",
"if",
"filters",
":",
"documents_query",
"=",
"documents_query",
".",
"join",
"(",
"MetaORM",
")",
"for",
"key",
",",
"values",
"in",
"filters",
".",
"items",
"(",
")",
":",
"documents_query",
"=",
"documents_query",
".",
"filter",
"(",
"MetaORM",
".",
"name",
"==",
"key",
",",
"MetaORM",
".",
"value",
".",
"in_",
"(",
"values",
")",
",",
"DocumentORM",
".",
"id",
"==",
"MetaORM",
".",
"document_id",
")",
"if",
"only_documents_without_embedding",
":",
"documents_query",
"=",
"documents_query",
".",
"filter",
"(",
"DocumentORM",
".",
"vector_id",
".",
"is_",
"(",
"None",
")",
")",
"if",
"vector_ids",
":",
"documents_query",
"=",
"documents_query",
".",
"filter",
"(",
"DocumentORM",
".",
"vector_id",
".",
"in_",
"(",
"vector_ids",
")",
")",
"documents_map",
"=",
"{",
"}",
"if",
"self",
".",
"use_windowed_query",
":",
"documents_query",
"=",
"self",
".",
"_windowed_query",
"(",
"documents_query",
",",
"DocumentORM",
".",
"id",
",",
"batch_size",
")",
"for",
"i",
",",
"row",
"in",
"enumerate",
"(",
"documents_query",
",",
"start",
"=",
"1",
")",
":",
"documents_map",
"[",
"row",
".",
"id",
"]",
"=",
"Document",
"(",
"id",
"=",
"row",
".",
"id",
",",
"text",
"=",
"row",
".",
"text",
",",
"meta",
"=",
"None",
"if",
"row",
".",
"vector_id",
"is",
"None",
"else",
"{",
"\"vector_id\"",
":",
"row",
".",
"vector_id",
"}",
")",
"if",
"i",
"%",
"batch_size",
"==",
"0",
":",
"documents_map",
"=",
"self",
".",
"_get_documents_meta",
"(",
"documents_map",
")",
"yield",
"from",
"documents_map",
".",
"values",
"(",
")",
"documents_map",
"=",
"{",
"}",
"if",
"documents_map",
":",
"documents_map",
"=",
"self",
".",
"_get_documents_meta",
"(",
"documents_map",
")",
"yield",
"from",
"documents_map",
".",
"values",
"(",
")"
] | [
188,
4
] | [
245,
45
] | python | en | ['en', 'error', 'th'] | False |
SQLDocumentStore.get_all_labels | (self, index=None, filters: Optional[dict] = None) |
Return all labels in the document store
|
Return all labels in the document store
| def get_all_labels(self, index=None, filters: Optional[dict] = None):
"""
Return all labels in the document store
"""
index = index or self.label_index
# TODO: Use batch_size
label_rows = self.session.query(LabelORM).filter_by(index=index).all()
labels = [self._convert_sql_row_to_label(row) for row in label_rows]
return labels | [
"def",
"get_all_labels",
"(",
"self",
",",
"index",
"=",
"None",
",",
"filters",
":",
"Optional",
"[",
"dict",
"]",
"=",
"None",
")",
":",
"index",
"=",
"index",
"or",
"self",
".",
"label_index",
"# TODO: Use batch_size",
"label_rows",
"=",
"self",
".",
"session",
".",
"query",
"(",
"LabelORM",
")",
".",
"filter_by",
"(",
"index",
"=",
"index",
")",
".",
"all",
"(",
")",
"labels",
"=",
"[",
"self",
".",
"_convert_sql_row_to_label",
"(",
"row",
")",
"for",
"row",
"in",
"label_rows",
"]",
"return",
"labels"
] | [
259,
4
] | [
268,
21
] | python | en | ['en', 'error', 'th'] | False |
SQLDocumentStore.write_documents | (
self, documents: Union[List[dict], List[Document]], index: Optional[str] = None, batch_size: int = 10_000
) |
Indexes documents for later queries.
:param documents: a list of Python dictionaries or a list of Haystack Document objects.
For documents as dictionaries, the format is {"text": "<the-actual-text>"}.
Optionally: Include meta data via {"text": "<the-actual-text>",
"meta":{"name": "<some-document-name>, "author": "somebody", ...}}
It can be used for filtering and is accessible in the responses of the Finder.
:param index: add an optional index attribute to documents. It can be later used for filtering. For instance,
documents for evaluation can be indexed in a separate index than the documents for search.
:param batch_size: When working with large number of documents, batching can help reduce memory footprint.
:return: None
|
Indexes documents for later queries. | def write_documents(
self, documents: Union[List[dict], List[Document]], index: Optional[str] = None, batch_size: int = 10_000
):
"""
Indexes documents for later queries.
:param documents: a list of Python dictionaries or a list of Haystack Document objects.
For documents as dictionaries, the format is {"text": "<the-actual-text>"}.
Optionally: Include meta data via {"text": "<the-actual-text>",
"meta":{"name": "<some-document-name>, "author": "somebody", ...}}
It can be used for filtering and is accessible in the responses of the Finder.
:param index: add an optional index attribute to documents. It can be later used for filtering. For instance,
documents for evaluation can be indexed in a separate index than the documents for search.
:param batch_size: When working with large number of documents, batching can help reduce memory footprint.
:return: None
"""
index = index or self.index
if len(documents) == 0:
return
# Make sure we comply to Document class format
if isinstance(documents[0], dict):
document_objects = [Document.from_dict(d) if isinstance(d, dict) else d for d in documents]
else:
document_objects = documents
for i in range(0, len(document_objects), batch_size):
for doc in document_objects[i: i + batch_size]:
meta_fields = doc.meta or {}
vector_id = meta_fields.pop("vector_id", None)
meta_orms = [MetaORM(name=key, value=value) for key, value in meta_fields.items()]
doc_orm = DocumentORM(id=doc.id, text=doc.text, vector_id=vector_id, meta=meta_orms, index=index)
if self.update_existing_documents:
# First old meta data cleaning is required
self.session.query(MetaORM).filter_by(document_id=doc.id).delete()
self.session.merge(doc_orm)
else:
self.session.add(doc_orm)
try:
self.session.commit()
except Exception as ex:
logger.error(f"Transaction rollback: {ex.__cause__}")
# Rollback is important here otherwise self.session will be in inconsistent state and next call will fail
self.session.rollback()
raise ex | [
"def",
"write_documents",
"(",
"self",
",",
"documents",
":",
"Union",
"[",
"List",
"[",
"dict",
"]",
",",
"List",
"[",
"Document",
"]",
"]",
",",
"index",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"batch_size",
":",
"int",
"=",
"10_000",
")",
":",
"index",
"=",
"index",
"or",
"self",
".",
"index",
"if",
"len",
"(",
"documents",
")",
"==",
"0",
":",
"return",
"# Make sure we comply to Document class format",
"if",
"isinstance",
"(",
"documents",
"[",
"0",
"]",
",",
"dict",
")",
":",
"document_objects",
"=",
"[",
"Document",
".",
"from_dict",
"(",
"d",
")",
"if",
"isinstance",
"(",
"d",
",",
"dict",
")",
"else",
"d",
"for",
"d",
"in",
"documents",
"]",
"else",
":",
"document_objects",
"=",
"documents",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"document_objects",
")",
",",
"batch_size",
")",
":",
"for",
"doc",
"in",
"document_objects",
"[",
"i",
":",
"i",
"+",
"batch_size",
"]",
":",
"meta_fields",
"=",
"doc",
".",
"meta",
"or",
"{",
"}",
"vector_id",
"=",
"meta_fields",
".",
"pop",
"(",
"\"vector_id\"",
",",
"None",
")",
"meta_orms",
"=",
"[",
"MetaORM",
"(",
"name",
"=",
"key",
",",
"value",
"=",
"value",
")",
"for",
"key",
",",
"value",
"in",
"meta_fields",
".",
"items",
"(",
")",
"]",
"doc_orm",
"=",
"DocumentORM",
"(",
"id",
"=",
"doc",
".",
"id",
",",
"text",
"=",
"doc",
".",
"text",
",",
"vector_id",
"=",
"vector_id",
",",
"meta",
"=",
"meta_orms",
",",
"index",
"=",
"index",
")",
"if",
"self",
".",
"update_existing_documents",
":",
"# First old meta data cleaning is required",
"self",
".",
"session",
".",
"query",
"(",
"MetaORM",
")",
".",
"filter_by",
"(",
"document_id",
"=",
"doc",
".",
"id",
")",
".",
"delete",
"(",
")",
"self",
".",
"session",
".",
"merge",
"(",
"doc_orm",
")",
"else",
":",
"self",
".",
"session",
".",
"add",
"(",
"doc_orm",
")",
"try",
":",
"self",
".",
"session",
".",
"commit",
"(",
")",
"except",
"Exception",
"as",
"ex",
":",
"logger",
".",
"error",
"(",
"f\"Transaction rollback: {ex.__cause__}\"",
")",
"# Rollback is important here otherwise self.session will be in inconsistent state and next call will fail",
"self",
".",
"session",
".",
"rollback",
"(",
")",
"raise",
"ex"
] | [
270,
4
] | [
315,
24
] | python | en | ['en', 'error', 'th'] | False |
SQLDocumentStore.write_labels | (self, labels, index=None) | Write annotation labels into document store. | Write annotation labels into document store. | def write_labels(self, labels, index=None):
"""Write annotation labels into document store."""
labels = [Label.from_dict(l) if isinstance(l, dict) else l for l in labels]
index = index or self.label_index
# TODO: Use batch_size
for label in labels:
label_orm = LabelORM(
document_id=label.document_id,
no_answer=label.no_answer,
origin=label.origin,
question=label.question,
is_correct_answer=label.is_correct_answer,
is_correct_document=label.is_correct_document,
answer=label.answer,
offset_start_in_doc=label.offset_start_in_doc,
model_id=label.model_id,
index=index,
)
self.session.add(label_orm)
self.session.commit() | [
"def",
"write_labels",
"(",
"self",
",",
"labels",
",",
"index",
"=",
"None",
")",
":",
"labels",
"=",
"[",
"Label",
".",
"from_dict",
"(",
"l",
")",
"if",
"isinstance",
"(",
"l",
",",
"dict",
")",
"else",
"l",
"for",
"l",
"in",
"labels",
"]",
"index",
"=",
"index",
"or",
"self",
".",
"label_index",
"# TODO: Use batch_size",
"for",
"label",
"in",
"labels",
":",
"label_orm",
"=",
"LabelORM",
"(",
"document_id",
"=",
"label",
".",
"document_id",
",",
"no_answer",
"=",
"label",
".",
"no_answer",
",",
"origin",
"=",
"label",
".",
"origin",
",",
"question",
"=",
"label",
".",
"question",
",",
"is_correct_answer",
"=",
"label",
".",
"is_correct_answer",
",",
"is_correct_document",
"=",
"label",
".",
"is_correct_document",
",",
"answer",
"=",
"label",
".",
"answer",
",",
"offset_start_in_doc",
"=",
"label",
".",
"offset_start_in_doc",
",",
"model_id",
"=",
"label",
".",
"model_id",
",",
"index",
"=",
"index",
",",
")",
"self",
".",
"session",
".",
"add",
"(",
"label_orm",
")",
"self",
".",
"session",
".",
"commit",
"(",
")"
] | [
317,
4
] | [
337,
29
] | python | en | ['en', 'en', 'en'] | True |
SQLDocumentStore.update_vector_ids | (self, vector_id_map: Dict[str, str], index: Optional[str] = None, batch_size: int = 10_000) |
Update vector_ids for given document_ids.
:param vector_id_map: dict containing mapping of document_id -> vector_id.
:param index: filter documents by the optional index attribute for documents in database.
:param batch_size: When working with large number of documents, batching can help reduce memory footprint.
|
Update vector_ids for given document_ids. | def update_vector_ids(self, vector_id_map: Dict[str, str], index: Optional[str] = None, batch_size: int = 10_000):
"""
Update vector_ids for given document_ids.
:param vector_id_map: dict containing mapping of document_id -> vector_id.
:param index: filter documents by the optional index attribute for documents in database.
:param batch_size: When working with large number of documents, batching can help reduce memory footprint.
"""
index = index or self.index
for chunk_map in self.chunked_dict(vector_id_map, size=batch_size):
self.session.query(DocumentORM).filter(
DocumentORM.id.in_(chunk_map),
DocumentORM.index == index
).update({
DocumentORM.vector_id: case(
chunk_map,
value=DocumentORM.id,
)
}, synchronize_session=False)
try:
self.session.commit()
except Exception as ex:
logger.error(f"Transaction rollback: {ex.__cause__}")
self.session.rollback()
raise ex | [
"def",
"update_vector_ids",
"(",
"self",
",",
"vector_id_map",
":",
"Dict",
"[",
"str",
",",
"str",
"]",
",",
"index",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"batch_size",
":",
"int",
"=",
"10_000",
")",
":",
"index",
"=",
"index",
"or",
"self",
".",
"index",
"for",
"chunk_map",
"in",
"self",
".",
"chunked_dict",
"(",
"vector_id_map",
",",
"size",
"=",
"batch_size",
")",
":",
"self",
".",
"session",
".",
"query",
"(",
"DocumentORM",
")",
".",
"filter",
"(",
"DocumentORM",
".",
"id",
".",
"in_",
"(",
"chunk_map",
")",
",",
"DocumentORM",
".",
"index",
"==",
"index",
")",
".",
"update",
"(",
"{",
"DocumentORM",
".",
"vector_id",
":",
"case",
"(",
"chunk_map",
",",
"value",
"=",
"DocumentORM",
".",
"id",
",",
")",
"}",
",",
"synchronize_session",
"=",
"False",
")",
"try",
":",
"self",
".",
"session",
".",
"commit",
"(",
")",
"except",
"Exception",
"as",
"ex",
":",
"logger",
".",
"error",
"(",
"f\"Transaction rollback: {ex.__cause__}\"",
")",
"self",
".",
"session",
".",
"rollback",
"(",
")",
"raise",
"ex"
] | [
339,
4
] | [
363,
24
] | python | en | ['en', 'error', 'th'] | False |
SQLDocumentStore.reset_vector_ids | (self, index: Optional[str] = None) |
Set vector IDs for all documents as None
|
Set vector IDs for all documents as None
| def reset_vector_ids(self, index: Optional[str] = None):
"""
Set vector IDs for all documents as None
"""
index = index or self.index
self.session.query(DocumentORM).filter_by(index=index).update({DocumentORM.vector_id: null()})
self.session.commit() | [
"def",
"reset_vector_ids",
"(",
"self",
",",
"index",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
")",
":",
"index",
"=",
"index",
"or",
"self",
".",
"index",
"self",
".",
"session",
".",
"query",
"(",
"DocumentORM",
")",
".",
"filter_by",
"(",
"index",
"=",
"index",
")",
".",
"update",
"(",
"{",
"DocumentORM",
".",
"vector_id",
":",
"null",
"(",
")",
"}",
")",
"self",
".",
"session",
".",
"commit",
"(",
")"
] | [
365,
4
] | [
371,
29
] | python | en | ['en', 'error', 'th'] | False |
SQLDocumentStore.update_document_meta | (self, id: str, meta: Dict[str, str]) |
Update the metadata dictionary of a document by specifying its string id
|
Update the metadata dictionary of a document by specifying its string id
| def update_document_meta(self, id: str, meta: Dict[str, str]):
"""
Update the metadata dictionary of a document by specifying its string id
"""
self.session.query(MetaORM).filter_by(document_id=id).delete()
meta_orms = [MetaORM(name=key, value=value, document_id=id) for key, value in meta.items()]
for m in meta_orms:
self.session.add(m)
self.session.commit() | [
"def",
"update_document_meta",
"(",
"self",
",",
"id",
":",
"str",
",",
"meta",
":",
"Dict",
"[",
"str",
",",
"str",
"]",
")",
":",
"self",
".",
"session",
".",
"query",
"(",
"MetaORM",
")",
".",
"filter_by",
"(",
"document_id",
"=",
"id",
")",
".",
"delete",
"(",
")",
"meta_orms",
"=",
"[",
"MetaORM",
"(",
"name",
"=",
"key",
",",
"value",
"=",
"value",
",",
"document_id",
"=",
"id",
")",
"for",
"key",
",",
"value",
"in",
"meta",
".",
"items",
"(",
")",
"]",
"for",
"m",
"in",
"meta_orms",
":",
"self",
".",
"session",
".",
"add",
"(",
"m",
")",
"self",
".",
"session",
".",
"commit",
"(",
")"
] | [
373,
4
] | [
381,
29
] | python | en | ['en', 'error', 'th'] | False |
SQLDocumentStore.get_document_count | (self, filters: Optional[Dict[str, List[str]]] = None, index: Optional[str] = None) |
Return the number of documents in the document store.
|
Return the number of documents in the document store.
| def get_document_count(self, filters: Optional[Dict[str, List[str]]] = None, index: Optional[str] = None) -> int:
"""
Return the number of documents in the document store.
"""
index = index or self.index
query = self.session.query(DocumentORM).filter_by(index=index)
if filters:
query = query.join(MetaORM)
for key, values in filters.items():
query = query.filter(MetaORM.name == key, MetaORM.value.in_(values))
count = query.count()
return count | [
"def",
"get_document_count",
"(",
"self",
",",
"filters",
":",
"Optional",
"[",
"Dict",
"[",
"str",
",",
"List",
"[",
"str",
"]",
"]",
"]",
"=",
"None",
",",
"index",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
")",
"->",
"int",
":",
"index",
"=",
"index",
"or",
"self",
".",
"index",
"query",
"=",
"self",
".",
"session",
".",
"query",
"(",
"DocumentORM",
")",
".",
"filter_by",
"(",
"index",
"=",
"index",
")",
"if",
"filters",
":",
"query",
"=",
"query",
".",
"join",
"(",
"MetaORM",
")",
"for",
"key",
",",
"values",
"in",
"filters",
".",
"items",
"(",
")",
":",
"query",
"=",
"query",
".",
"filter",
"(",
"MetaORM",
".",
"name",
"==",
"key",
",",
"MetaORM",
".",
"value",
".",
"in_",
"(",
"values",
")",
")",
"count",
"=",
"query",
".",
"count",
"(",
")",
"return",
"count"
] | [
383,
4
] | [
396,
20
] | python | en | ['en', 'error', 'th'] | False |
SQLDocumentStore.get_label_count | (self, index: Optional[str] = None) |
Return the number of labels in the document store
|
Return the number of labels in the document store
| def get_label_count(self, index: Optional[str] = None) -> int:
"""
Return the number of labels in the document store
"""
index = index or self.index
return self.session.query(LabelORM).filter_by(index=index).count() | [
"def",
"get_label_count",
"(",
"self",
",",
"index",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
")",
"->",
"int",
":",
"index",
"=",
"index",
"or",
"self",
".",
"index",
"return",
"self",
".",
"session",
".",
"query",
"(",
"LabelORM",
")",
".",
"filter_by",
"(",
"index",
"=",
"index",
")",
".",
"count",
"(",
")"
] | [
398,
4
] | [
403,
74
] | python | en | ['en', 'error', 'th'] | False |
SQLDocumentStore.delete_all_documents | (self, index: Optional[str] = None, filters: Optional[Dict[str, List[str]]] = None) |
Delete documents in an index. All documents are deleted if no filters are passed.
:param index: Index name to delete the document from.
:param filters: Optional filters to narrow down the documents to be deleted.
:return: None
|
Delete documents in an index. All documents are deleted if no filters are passed. | def delete_all_documents(self, index: Optional[str] = None, filters: Optional[Dict[str, List[str]]] = None):
"""
Delete documents in an index. All documents are deleted if no filters are passed.
:param index: Index name to delete the document from.
:param filters: Optional filters to narrow down the documents to be deleted.
:return: None
"""
index = index or self.index
document_ids_to_delete = self.session.query(DocumentORM.id).filter_by(index=index)
if filters:
# documents_query = documents_query.join(MetaORM)
for key, values in filters.items():
document_ids_to_delete = document_ids_to_delete.filter(
MetaORM.name == key,
MetaORM.value.in_(values),
DocumentORM.id == MetaORM.document_id
)
self.session.query(DocumentORM).filter(DocumentORM.id.in_(document_ids_to_delete)).delete(synchronize_session=False)
self.session.commit() | [
"def",
"delete_all_documents",
"(",
"self",
",",
"index",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"filters",
":",
"Optional",
"[",
"Dict",
"[",
"str",
",",
"List",
"[",
"str",
"]",
"]",
"]",
"=",
"None",
")",
":",
"index",
"=",
"index",
"or",
"self",
".",
"index",
"document_ids_to_delete",
"=",
"self",
".",
"session",
".",
"query",
"(",
"DocumentORM",
".",
"id",
")",
".",
"filter_by",
"(",
"index",
"=",
"index",
")",
"if",
"filters",
":",
"# documents_query = documents_query.join(MetaORM)",
"for",
"key",
",",
"values",
"in",
"filters",
".",
"items",
"(",
")",
":",
"document_ids_to_delete",
"=",
"document_ids_to_delete",
".",
"filter",
"(",
"MetaORM",
".",
"name",
"==",
"key",
",",
"MetaORM",
".",
"value",
".",
"in_",
"(",
"values",
")",
",",
"DocumentORM",
".",
"id",
"==",
"MetaORM",
".",
"document_id",
")",
"self",
".",
"session",
".",
"query",
"(",
"DocumentORM",
")",
".",
"filter",
"(",
"DocumentORM",
".",
"id",
".",
"in_",
"(",
"document_ids_to_delete",
")",
")",
".",
"delete",
"(",
"synchronize_session",
"=",
"False",
")",
"self",
".",
"session",
".",
"commit",
"(",
")"
] | [
442,
4
] | [
463,
29
] | python | en | ['en', 'error', 'th'] | False |
SQLDocumentStore._column_windows | (self, session, column, windowsize) | Return a series of WHERE clauses against
a given column that break it into windows.
Result is an iterable of tuples, consisting of
((start, end), whereclause), where (start, end) are the ids.
The code is taken from: https://github.com/sqlalchemy/sqlalchemy/wiki/RangeQuery-and-WindowedRangeQuery
| Return a series of WHERE clauses against
a given column that break it into windows. | def _column_windows(self, session, column, windowsize):
"""Return a series of WHERE clauses against
a given column that break it into windows.
Result is an iterable of tuples, consisting of
((start, end), whereclause), where (start, end) are the ids.
The code is taken from: https://github.com/sqlalchemy/sqlalchemy/wiki/RangeQuery-and-WindowedRangeQuery
"""
def int_for_range(start_id, end_id):
if end_id:
return and_(
column >= start_id,
column < end_id
)
else:
return column >= start_id
q = session.query(
column,
func.row_number(). \
over(order_by=column). \
label('rownum')
). \
from_self(column)
if windowsize > 1:
q = q.filter(text("rownum %% %d=1" % windowsize))
intervals = [id for id, in q]
while intervals:
start = intervals.pop(0)
if intervals:
end = intervals[0]
else:
end = None
yield int_for_range(start, end) | [
"def",
"_column_windows",
"(",
"self",
",",
"session",
",",
"column",
",",
"windowsize",
")",
":",
"def",
"int_for_range",
"(",
"start_id",
",",
"end_id",
")",
":",
"if",
"end_id",
":",
"return",
"and_",
"(",
"column",
">=",
"start_id",
",",
"column",
"<",
"end_id",
")",
"else",
":",
"return",
"column",
">=",
"start_id",
"q",
"=",
"session",
".",
"query",
"(",
"column",
",",
"func",
".",
"row_number",
"(",
")",
".",
"over",
"(",
"order_by",
"=",
"column",
")",
".",
"label",
"(",
"'rownum'",
")",
")",
".",
"from_self",
"(",
"column",
")",
"if",
"windowsize",
">",
"1",
":",
"q",
"=",
"q",
".",
"filter",
"(",
"text",
"(",
"\"rownum %% %d=1\"",
"%",
"windowsize",
")",
")",
"intervals",
"=",
"[",
"id",
"for",
"id",
",",
"in",
"q",
"]",
"while",
"intervals",
":",
"start",
"=",
"intervals",
".",
"pop",
"(",
"0",
")",
"if",
"intervals",
":",
"end",
"=",
"intervals",
"[",
"0",
"]",
"else",
":",
"end",
"=",
"None",
"yield",
"int_for_range",
"(",
"start",
",",
"end",
")"
] | [
480,
4
] | [
517,
43
] | python | en | ['en', 'en', 'en'] | True |
SQLDocumentStore._windowed_query | (self, q, column, windowsize) | Break a Query into windows on a given column. | Break a Query into windows on a given column. | def _windowed_query(self, q, column, windowsize):
""""Break a Query into windows on a given column."""
for whereclause in self._column_windows(
q.session,
column, windowsize):
for row in q.filter(whereclause).order_by(column):
yield row | [
"def",
"_windowed_query",
"(",
"self",
",",
"q",
",",
"column",
",",
"windowsize",
")",
":",
"for",
"whereclause",
"in",
"self",
".",
"_column_windows",
"(",
"q",
".",
"session",
",",
"column",
",",
"windowsize",
")",
":",
"for",
"row",
"in",
"q",
".",
"filter",
"(",
"whereclause",
")",
".",
"order_by",
"(",
"column",
")",
":",
"yield",
"row"
] | [
519,
4
] | [
526,
25
] | python | en | ['en', 'gl', 'en'] | True |
FlexGroupLayer.op_cat | (self, x_out, m, groups) |
Usage: Concat by input joints ratio of the first layer, always keep the ratio = N_i : 1; N_i is joint number in the ith group.
:return: Concat with other info and adjust the channel size
|
Usage: Concat by input joints ratio of the first layer, always keep the ratio = N_i : 1; N_i is joint number in the ith group.
:return: Concat with other info and adjust the channel size
| def op_cat(self, x_out, m, groups):
"""
Usage: Concat by input joints ratio of the first layer, always keep the ratio = N_i : 1; N_i is joint number in the ith group.
:return: Concat with other info and adjust the channel size
"""
cat_m = []
for i,group in enumerate(groups):
indexes = group
xs = []
for index in indexes:
xs.append(x_out[:,index[0]:index[1]])
x_cat = torch.cat(xs,dim=1)
cat_m.append(torch.cat([x_cat, m[i]], dim=1))
return torch.cat(cat_m, dim=1) | [
"def",
"op_cat",
"(",
"self",
",",
"x_out",
",",
"m",
",",
"groups",
")",
":",
"cat_m",
"=",
"[",
"]",
"for",
"i",
",",
"group",
"in",
"enumerate",
"(",
"groups",
")",
":",
"indexes",
"=",
"group",
"xs",
"=",
"[",
"]",
"for",
"index",
"in",
"indexes",
":",
"xs",
".",
"append",
"(",
"x_out",
"[",
":",
",",
"index",
"[",
"0",
"]",
":",
"index",
"[",
"1",
"]",
"]",
")",
"x_cat",
"=",
"torch",
".",
"cat",
"(",
"xs",
",",
"dim",
"=",
"1",
")",
"cat_m",
".",
"append",
"(",
"torch",
".",
"cat",
"(",
"[",
"x_cat",
",",
"m",
"[",
"i",
"]",
"]",
",",
"dim",
"=",
"1",
")",
")",
"return",
"torch",
".",
"cat",
"(",
"cat_m",
",",
"dim",
"=",
"1",
")"
] | [
164,
4
] | [
177,
38
] | python | en | ['en', 'error', 'th'] | False |
FlexGroupLayer._keep_ratio | (self, inc_num, fix_seq, index, added_dim, by_ratio) |
For concat by a certain ratio, you can change [joint_dim] to give various concat ratios.
:param inc_num: input channel number of a group. type:torch.Tensor
:param fix_seq: output index sequence of 1st layer, knowing the groups number.
:return: a concatenated input channel number
|
For concat by a certain ratio, you can change [joint_dim] to give various concat ratios.
:param inc_num: input channel number of a group. type:torch.Tensor
:param fix_seq: output index sequence of 1st layer, knowing the groups number.
:return: a concatenated input channel number
| def _keep_ratio(self, inc_num, fix_seq, index, added_dim, by_ratio):
"""
For concat by a certain ratio, you can change [joint_dim] to give various concat ratios.
:param inc_num: input channel number of a group. type:torch.Tensor
:param fix_seq: output index sequence of 1st layer, knowing the groups number.
:return: a concatenated input channel number
"""
ori_size = len(fix_seq[index])
# add [x,y] dimension
if by_ratio: # Add the dimension by ratio. e.g. 20%, 40%...
out_num = int(inc_num * (ori_size + added_dim) / ori_size)
else: # Add the dimension by discrete values
out_num = added_dim + inc_num
concat_size = out_num - inc_num
return out_num, concat_size | [
"def",
"_keep_ratio",
"(",
"self",
",",
"inc_num",
",",
"fix_seq",
",",
"index",
",",
"added_dim",
",",
"by_ratio",
")",
":",
"ori_size",
"=",
"len",
"(",
"fix_seq",
"[",
"index",
"]",
")",
"# add [x,y] dimension",
"if",
"by_ratio",
":",
"# Add the dimension by ratio. e.g. 20%, 40%...",
"out_num",
"=",
"int",
"(",
"inc_num",
"*",
"(",
"ori_size",
"+",
"added_dim",
")",
"/",
"ori_size",
")",
"else",
":",
"# Add the dimension by discrete values",
"out_num",
"=",
"added_dim",
"+",
"inc_num",
"concat_size",
"=",
"out_num",
"-",
"inc_num",
"return",
"out_num",
",",
"concat_size"
] | [
179,
4
] | [
193,
35
] | python | en | ['en', 'error', 'th'] | False |
FlexGroupLayer._get_partial_input | (self, x) |
Usage: Get inputs as Group representation
:param x: all 2d joints inputs, x.shape=[B, 34, T]
:param out_seq: output index sequence of each layer
:return: 1. x_self: Each group inputs; type: list
2. x_other: Out of the group values; type: list
|
Usage: Get inputs as Group representation
:param x: all 2d joints inputs, x.shape=[B, 34, T]
:param out_seq: output index sequence of each layer
:return: 1. x_self: Each group inputs; type: list
2. x_other: Out of the group values; type: list
| def _get_partial_input(self, x):
"""
Usage: Get inputs as Group representation
:param x: all 2d joints inputs, x.shape=[B, 34, T]
:param out_seq: output index sequence of each layer
:return: 1. x_self: Each group inputs; type: list
2. x_other: Out of the group values; type: list
"""
x_other = []
x_self = []
in_dim = []
v = 0
for i,group in enumerate(self.groups):
indexes = group
xs = []
for index in indexes:
xs.append(x[:,index[0]:index[1]])
x_cat = torch.cat(xs,dim=1)
x_self.append(x_cat)
x_other.append(torch.cat([x[:,0:v],x[:,(v+x_cat.shape[1]):]],dim=1))
in_dim.append(x_other[i].shape[1])
v += x_cat.shape[1]
return x_other, x | [
"def",
"_get_partial_input",
"(",
"self",
",",
"x",
")",
":",
"x_other",
"=",
"[",
"]",
"x_self",
"=",
"[",
"]",
"in_dim",
"=",
"[",
"]",
"v",
"=",
"0",
"for",
"i",
",",
"group",
"in",
"enumerate",
"(",
"self",
".",
"groups",
")",
":",
"indexes",
"=",
"group",
"xs",
"=",
"[",
"]",
"for",
"index",
"in",
"indexes",
":",
"xs",
".",
"append",
"(",
"x",
"[",
":",
",",
"index",
"[",
"0",
"]",
":",
"index",
"[",
"1",
"]",
"]",
")",
"x_cat",
"=",
"torch",
".",
"cat",
"(",
"xs",
",",
"dim",
"=",
"1",
")",
"x_self",
".",
"append",
"(",
"x_cat",
")",
"x_other",
".",
"append",
"(",
"torch",
".",
"cat",
"(",
"[",
"x",
"[",
":",
",",
"0",
":",
"v",
"]",
",",
"x",
"[",
":",
",",
"(",
"v",
"+",
"x_cat",
".",
"shape",
"[",
"1",
"]",
")",
":",
"]",
"]",
",",
"dim",
"=",
"1",
")",
")",
"in_dim",
".",
"append",
"(",
"x_other",
"[",
"i",
"]",
".",
"shape",
"[",
"1",
"]",
")",
"v",
"+=",
"x_cat",
".",
"shape",
"[",
"1",
"]",
"return",
"x_other",
",",
"x"
] | [
198,
4
] | [
220,
25
] | python | en | ['en', 'error', 'th'] | False |
FlexGroupLayer._split_fc | (self, x, dtype) |
Usage: Split channels into groups
:param x: Input features
:return: x1: each group features. type: list
x_cat: concatenate each group features. type:torch.Tensor
|
Usage: Split channels into groups
:param x: Input features
:return: x1: each group features. type: list
x_cat: concatenate each group features. type:torch.Tensor
| def _split_fc(self, x, dtype):
"""
Usage: Split channels into groups
:param x: Input features
:return: x1: each group features. type: list
x_cat: concatenate each group features. type:torch.Tensor
"""
x1 = []
for i,group in enumerate(self.groups):
indexes = group
xs = []
for index in indexes:
if len(index) == 1:
xs.append(x[:, index[0]:index[0]+1].type(dtype))
else:
xs.append(x[:,index[0]:index[1]].type(dtype))
x1.append(torch.cat(xs,dim=1)) #Each group features
x_cat = torch.cat(x1, dim=1)
return x1, x_cat | [
"def",
"_split_fc",
"(",
"self",
",",
"x",
",",
"dtype",
")",
":",
"x1",
"=",
"[",
"]",
"for",
"i",
",",
"group",
"in",
"enumerate",
"(",
"self",
".",
"groups",
")",
":",
"indexes",
"=",
"group",
"xs",
"=",
"[",
"]",
"for",
"index",
"in",
"indexes",
":",
"if",
"len",
"(",
"index",
")",
"==",
"1",
":",
"xs",
".",
"append",
"(",
"x",
"[",
":",
",",
"index",
"[",
"0",
"]",
":",
"index",
"[",
"0",
"]",
"+",
"1",
"]",
".",
"type",
"(",
"dtype",
")",
")",
"else",
":",
"xs",
".",
"append",
"(",
"x",
"[",
":",
",",
"index",
"[",
"0",
"]",
":",
"index",
"[",
"1",
"]",
"]",
".",
"type",
"(",
"dtype",
")",
")",
"x1",
".",
"append",
"(",
"torch",
".",
"cat",
"(",
"xs",
",",
"dim",
"=",
"1",
")",
")",
"#Each group features",
"x_cat",
"=",
"torch",
".",
"cat",
"(",
"x1",
",",
"dim",
"=",
"1",
")",
"return",
"x1",
",",
"x_cat"
] | [
222,
4
] | [
240,
24
] | python | en | ['en', 'error', 'th'] | False |
FlexGroupLayer._group_conv | (self, x, groups) |
Usage: fully connection in a group
:param x: features
:param groups: depend on concat or not of different input size
:return: final outputs after group conv.
|
Usage: fully connection in a group
:param x: features
:param groups: depend on concat or not of different input size
:return: final outputs after group conv.
| def _group_conv(self, x, groups):
"""
Usage: fully connection in a group
:param x: features
:param groups: depend on concat or not of different input size
:return: final outputs after group conv.
"""
outs = []
ks = self.kernel_size
for i, group in enumerate(groups):
indexes = group
xs = []
for index in indexes:
if len(index) == 1:
xs.append(x[:, index[0]:index[0]+1])
else:
xs.append(x[:,index[0]:index[1]])
x1 = torch.cat(xs,dim=1)
x_out = self._reshape_x_offset(x1, ks)
outs.append(self.group_conv[i](x_out))
return torch.cat(outs, dim=1) | [
"def",
"_group_conv",
"(",
"self",
",",
"x",
",",
"groups",
")",
":",
"outs",
"=",
"[",
"]",
"ks",
"=",
"self",
".",
"kernel_size",
"for",
"i",
",",
"group",
"in",
"enumerate",
"(",
"groups",
")",
":",
"indexes",
"=",
"group",
"xs",
"=",
"[",
"]",
"for",
"index",
"in",
"indexes",
":",
"if",
"len",
"(",
"index",
")",
"==",
"1",
":",
"xs",
".",
"append",
"(",
"x",
"[",
":",
",",
"index",
"[",
"0",
"]",
":",
"index",
"[",
"0",
"]",
"+",
"1",
"]",
")",
"else",
":",
"xs",
".",
"append",
"(",
"x",
"[",
":",
",",
"index",
"[",
"0",
"]",
":",
"index",
"[",
"1",
"]",
"]",
")",
"x1",
"=",
"torch",
".",
"cat",
"(",
"xs",
",",
"dim",
"=",
"1",
")",
"x_out",
"=",
"self",
".",
"_reshape_x_offset",
"(",
"x1",
",",
"ks",
")",
"outs",
".",
"append",
"(",
"self",
".",
"group_conv",
"[",
"i",
"]",
"(",
"x_out",
")",
")",
"return",
"torch",
".",
"cat",
"(",
"outs",
",",
"dim",
"=",
"1",
")"
] | [
242,
4
] | [
262,
37
] | python | en | ['en', 'error', 'th'] | False |
dc | (input1, input2) |
Dice coefficient
Computes the Dice coefficient (also known as Sorensen index) between the binary
objects in two images.
The metric is defined as
.. math::
DC = \frac{2|A\capB|}{|A|+|B|}
, where A is the first and B the second set of samples (here binary objects).
Parameters
----------
input1: array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
input2: array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
Returns
-------
dc: float
The Dice coefficient between the object(s) in `input1` and the
object(s) in `input2`. It ranges from 0 (no overlap) to 1 (perfect overlap).
Notes
-----
This is a real metric.
|
Dice coefficient | def dc(input1, input2):
"""
Dice coefficient
Computes the Dice coefficient (also known as Sorensen index) between the binary
objects in two images.
The metric is defined as
.. math::
DC = \frac{2|A\capB|}{|A|+|B|}
, where A is the first and B the second set of samples (here binary objects).
Parameters
----------
input1: array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
input2: array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
Returns
-------
dc: float
The Dice coefficient between the object(s) in `input1` and the
object(s) in `input2`. It ranges from 0 (no overlap) to 1 (perfect overlap).
Notes
-----
This is a real metric.
"""
input1 = numpy.atleast_1d(input1.astype(numpy.bool))
input2 = numpy.atleast_1d(input2.astype(numpy.bool))
intersection = numpy.count_nonzero(input1 & input2)
size_i1 = numpy.count_nonzero(input1)
size_i2 = numpy.count_nonzero(input2)
try:
dc = 2. * intersection / float(size_i1 + size_i2)
except ZeroDivisionError:
dc = 0.0
return dc | [
"def",
"dc",
"(",
"input1",
",",
"input2",
")",
":",
"input1",
"=",
"numpy",
".",
"atleast_1d",
"(",
"input1",
".",
"astype",
"(",
"numpy",
".",
"bool",
")",
")",
"input2",
"=",
"numpy",
".",
"atleast_1d",
"(",
"input2",
".",
"astype",
"(",
"numpy",
".",
"bool",
")",
")",
"intersection",
"=",
"numpy",
".",
"count_nonzero",
"(",
"input1",
"&",
"input2",
")",
"size_i1",
"=",
"numpy",
".",
"count_nonzero",
"(",
"input1",
")",
"size_i2",
"=",
"numpy",
".",
"count_nonzero",
"(",
"input2",
")",
"try",
":",
"dc",
"=",
"2.",
"*",
"intersection",
"/",
"float",
"(",
"size_i1",
"+",
"size_i2",
")",
"except",
"ZeroDivisionError",
":",
"dc",
"=",
"0.0",
"return",
"dc"
] | [
9,
0
] | [
56,
13
] | python | en | ['en', 'error', 'th'] | False |
dice_ratio | (preds, labels) |
preds & labels should only contain 0 or 1.
|
preds & labels should only contain 0 or 1.
| def dice_ratio(preds, labels):
'''
preds & labels should only contain 0 or 1.
'''
if np.sum(preds) + np.sum(labels) == 0:
return 1
return np.sum(preds[labels==1])*2.0 / (np.sum(preds) + np.sum(labels)) | [
"def",
"dice_ratio",
"(",
"preds",
",",
"labels",
")",
":",
"if",
"np",
".",
"sum",
"(",
"preds",
")",
"+",
"np",
".",
"sum",
"(",
"labels",
")",
"==",
"0",
":",
"return",
"1",
"return",
"np",
".",
"sum",
"(",
"preds",
"[",
"labels",
"==",
"1",
"]",
")",
"*",
"2.0",
"/",
"(",
"np",
".",
"sum",
"(",
"preds",
")",
"+",
"np",
".",
"sum",
"(",
"labels",
")",
")"
] | [
59,
0
] | [
65,
74
] | python | en | ['en', 'error', 'th'] | False |
GirderExternalDataCli.__init__ | (self, apiKey, objectStore) | initialization function to create a GirderCli instance, will attempt
to authenticate with the designated Girder instance.
| initialization function to create a GirderCli instance, will attempt
to authenticate with the designated Girder instance.
| def __init__(self, apiKey, objectStore):
"""initialization function to create a GirderCli instance, will attempt
to authenticate with the designated Girder instance.
"""
GirderClient.__init__(self,
apiUrl='https://data.kitware.com/api/v1')
self.objectStore = objectStore
self.authenticate(apiKey=apiKey) | [
"def",
"__init__",
"(",
"self",
",",
"apiKey",
",",
"objectStore",
")",
":",
"GirderClient",
".",
"__init__",
"(",
"self",
",",
"apiUrl",
"=",
"'https://data.kitware.com/api/v1'",
")",
"self",
".",
"objectStore",
"=",
"objectStore",
"self",
".",
"authenticate",
"(",
"apiKey",
"=",
"apiKey",
")"
] | [
19,
4
] | [
26,
40
] | python | en | ['en', 'en', 'en'] | True |
GirderExternalDataCli.content_link_upload | (self, localFolder, parentId, ext='.sha512',
parentType='folder', blacklist=['.git', '.ExternalData'],
reuseExisting=True, dryRun=False) | Upload objects corresponding to CMake ExternalData content links.
This will recursively walk down the tree and find content links ending
with the specified extension and create a hierarchy on the server under
the parentId.
:param ext: Content link file extension.
:param parentId: id of the parent in Girder or resource path.
:param parentType: one of (collection,folder,user), default of folder.
:param reuseExisting: bool whether to accept an existing item of
the same name in the same location, or create a new one instead.
:param dryRun: Do not actually upload any content.
| Upload objects corresponding to CMake ExternalData content links. | def content_link_upload(self, localFolder, parentId, ext='.sha512',
parentType='folder', blacklist=['.git', '.ExternalData'],
reuseExisting=True, dryRun=False):
"""Upload objects corresponding to CMake ExternalData content links.
This will recursively walk down the tree and find content links ending
with the specified extension and create a hierarchy on the server under
the parentId.
:param ext: Content link file extension.
:param parentId: id of the parent in Girder or resource path.
:param parentType: one of (collection,folder,user), default of folder.
:param reuseExisting: bool whether to accept an existing item of
the same name in the same location, or create a new one instead.
:param dryRun: Do not actually upload any content.
"""
parentId = self._checkResourcePath(parentId)
localFolder = os.path.normpath(localFolder)
for entry in os.listdir(localFolder):
if entry in blacklist:
print("Ignoring file %s as it is blacklisted" % entry)
continue
full_entry = os.path.join(localFolder, entry)
if os.path.islink(full_entry):
# os.walk skips symlinks by default
print("Skipping file %s as it is a symlink" % entry)
continue
if os.path.isdir(full_entry):
self._uploadFolderRecursive(
full_entry, parentId, parentType, ext,
reuseExisting=reuseExisting, blacklist=blacklist,
dryRun=dryRun) | [
"def",
"content_link_upload",
"(",
"self",
",",
"localFolder",
",",
"parentId",
",",
"ext",
"=",
"'.sha512'",
",",
"parentType",
"=",
"'folder'",
",",
"blacklist",
"=",
"[",
"'.git'",
",",
"'.ExternalData'",
"]",
",",
"reuseExisting",
"=",
"True",
",",
"dryRun",
"=",
"False",
")",
":",
"parentId",
"=",
"self",
".",
"_checkResourcePath",
"(",
"parentId",
")",
"localFolder",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"localFolder",
")",
"for",
"entry",
"in",
"os",
".",
"listdir",
"(",
"localFolder",
")",
":",
"if",
"entry",
"in",
"blacklist",
":",
"print",
"(",
"\"Ignoring file %s as it is blacklisted\"",
"%",
"entry",
")",
"continue",
"full_entry",
"=",
"os",
".",
"path",
".",
"join",
"(",
"localFolder",
",",
"entry",
")",
"if",
"os",
".",
"path",
".",
"islink",
"(",
"full_entry",
")",
":",
"# os.walk skips symlinks by default",
"print",
"(",
"\"Skipping file %s as it is a symlink\"",
"%",
"entry",
")",
"continue",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"full_entry",
")",
":",
"self",
".",
"_uploadFolderRecursive",
"(",
"full_entry",
",",
"parentId",
",",
"parentType",
",",
"ext",
",",
"reuseExisting",
"=",
"reuseExisting",
",",
"blacklist",
"=",
"blacklist",
",",
"dryRun",
"=",
"dryRun",
")"
] | [
28,
4
] | [
59,
34
] | python | en | ['en', 'en', 'en'] | True |
GirderExternalDataCli._uploadContentLinkItem | (self, name, content_link, folder,
ext='.sha512', parentType='folder', dryRun=False,
reuseExisting=False) | Upload objects corresponding to CMake ExternalData content links.
This will upload the file with name, *name*, for the content link
located at *content_link* to the Girder folder, *folder*.
:param ext: Content link file extension.
:param parentType: one of (collection,folder,user), default of folder.
:param reuseExisting: bool whether to accept an existing item of
the same name in the same location, or create a new one instead.
:param dryRun: Do not actually upload any content.
| Upload objects corresponding to CMake ExternalData content links. | def _uploadContentLinkItem(self, name, content_link, folder,
ext='.sha512', parentType='folder', dryRun=False,
reuseExisting=False):
"""Upload objects corresponding to CMake ExternalData content links.
This will upload the file with name, *name*, for the content link
located at *content_link* to the Girder folder, *folder*.
:param ext: Content link file extension.
:param parentType: one of (collection,folder,user), default of folder.
:param reuseExisting: bool whether to accept an existing item of
the same name in the same location, or create a new one instead.
:param dryRun: Do not actually upload any content.
"""
content_link = os.path.normpath(content_link)
if os.path.isfile(content_link) and \
fnmatch.fnmatch(content_link, '*' + ext):
if parentType != 'folder':
raise Exception(('Attempting to upload an item under a %s.'
% parentType) +
' Items can only be added to folders.')
else:
with open(content_link, 'r') as fp:
hash_value = fp.readline().strip()
self._uploadAsItem(
name,
folder['_id'],
os.path.join(self.objectStore, hash_value),
reuseExisting=reuseExisting,
dryRun=dryRun) | [
"def",
"_uploadContentLinkItem",
"(",
"self",
",",
"name",
",",
"content_link",
",",
"folder",
",",
"ext",
"=",
"'.sha512'",
",",
"parentType",
"=",
"'folder'",
",",
"dryRun",
"=",
"False",
",",
"reuseExisting",
"=",
"False",
")",
":",
"content_link",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"content_link",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"content_link",
")",
"and",
"fnmatch",
".",
"fnmatch",
"(",
"content_link",
",",
"'*'",
"+",
"ext",
")",
":",
"if",
"parentType",
"!=",
"'folder'",
":",
"raise",
"Exception",
"(",
"(",
"'Attempting to upload an item under a %s.'",
"%",
"parentType",
")",
"+",
"' Items can only be added to folders.'",
")",
"else",
":",
"with",
"open",
"(",
"content_link",
",",
"'r'",
")",
"as",
"fp",
":",
"hash_value",
"=",
"fp",
".",
"readline",
"(",
")",
".",
"strip",
"(",
")",
"self",
".",
"_uploadAsItem",
"(",
"name",
",",
"folder",
"[",
"'_id'",
"]",
",",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"objectStore",
",",
"hash_value",
")",
",",
"reuseExisting",
"=",
"reuseExisting",
",",
"dryRun",
"=",
"dryRun",
")"
] | [
61,
4
] | [
90,
34
] | python | en | ['en', 'en', 'en'] | True |
GirderExternalDataCli._uploadFolderRecursive | (self, localFolder, parentId, parentType,
ext='.sha512',
reuseExisting=False,
blacklist=[],
dryRun=False) | Function to recursively upload a folder and all of its descendants.
:param localFolder: full path to local folder to be uploaded
:param parentId: id of parent in Girder,
where new folder will be added
:param parentType: one of (collection, folder, user)
:param leaf_folders_as_items: whether leaf folders should have all
files uploaded as single items
:param reuseExisting: boolean indicating whether to accept an existing
item
of the same name in the same location, or create a new one instead
| Function to recursively upload a folder and all of its descendants.
:param localFolder: full path to local folder to be uploaded
:param parentId: id of parent in Girder,
where new folder will be added
:param parentType: one of (collection, folder, user)
:param leaf_folders_as_items: whether leaf folders should have all
files uploaded as single items
:param reuseExisting: boolean indicating whether to accept an existing
item
of the same name in the same location, or create a new one instead
| def _uploadFolderRecursive(self, localFolder, parentId, parentType,
ext='.sha512',
reuseExisting=False,
blacklist=[],
dryRun=False):
"""Function to recursively upload a folder and all of its descendants.
:param localFolder: full path to local folder to be uploaded
:param parentId: id of parent in Girder,
where new folder will be added
:param parentType: one of (collection, folder, user)
:param leaf_folders_as_items: whether leaf folders should have all
files uploaded as single items
:param reuseExisting: boolean indicating whether to accept an existing
item
of the same name in the same location, or create a new one instead
"""
localFolder = os.path.normpath(localFolder)
filename = os.path.basename(localFolder)
if filename in blacklist:
print("Ignoring file %s as it is blacklisted" % filename)
return
# Do not add the folder if it does not contain any content links
has_content_link = False
for root, dirnames, filenames in os.walk(localFolder):
for filename in fnmatch.filter(filenames, '*' + ext):
has_content_link = True
break
if not has_content_link:
return
print('Creating Folder from %s' % localFolder)
if dryRun:
# create a dryRun placeholder
folder = {'_id': 'dryRun'}
elif localFolder == '.':
folder = {'_id': parentId}
else:
folder = self.loadOrCreateFolder(
os.path.basename(localFolder), parentId, parentType)
for entry in sorted(os.listdir(localFolder)):
if entry in blacklist:
print("Ignoring file %s as it is blacklisted" % entry)
continue
full_entry = os.path.join(localFolder, entry)
if os.path.islink(full_entry):
# os.walk skips symlinks by default
print("Skipping file %s as it is a symlink" % entry)
continue
elif os.path.isdir(full_entry):
# At this point we should have an actual folder, so can
# pass that as the parentType
self._uploadFolderRecursive(
full_entry, folder['_id'], 'folder',
ext, reuseExisting=reuseExisting,
blacklist=blacklist, dryRun=dryRun)
else:
name = os.path.splitext(entry)[0]
self._uploadContentLinkItem(name, full_entry, folder,
ext=ext, parentType=parentType, dryRun=dryRun,
reuseExisting=reuseExisting)
if not dryRun:
for callback in self._folderUploadCallbacks:
callback(folder, localFolder) | [
"def",
"_uploadFolderRecursive",
"(",
"self",
",",
"localFolder",
",",
"parentId",
",",
"parentType",
",",
"ext",
"=",
"'.sha512'",
",",
"reuseExisting",
"=",
"False",
",",
"blacklist",
"=",
"[",
"]",
",",
"dryRun",
"=",
"False",
")",
":",
"localFolder",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"localFolder",
")",
"filename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"localFolder",
")",
"if",
"filename",
"in",
"blacklist",
":",
"print",
"(",
"\"Ignoring file %s as it is blacklisted\"",
"%",
"filename",
")",
"return",
"# Do not add the folder if it does not contain any content links",
"has_content_link",
"=",
"False",
"for",
"root",
",",
"dirnames",
",",
"filenames",
"in",
"os",
".",
"walk",
"(",
"localFolder",
")",
":",
"for",
"filename",
"in",
"fnmatch",
".",
"filter",
"(",
"filenames",
",",
"'*'",
"+",
"ext",
")",
":",
"has_content_link",
"=",
"True",
"break",
"if",
"not",
"has_content_link",
":",
"return",
"print",
"(",
"'Creating Folder from %s'",
"%",
"localFolder",
")",
"if",
"dryRun",
":",
"# create a dryRun placeholder",
"folder",
"=",
"{",
"'_id'",
":",
"'dryRun'",
"}",
"elif",
"localFolder",
"==",
"'.'",
":",
"folder",
"=",
"{",
"'_id'",
":",
"parentId",
"}",
"else",
":",
"folder",
"=",
"self",
".",
"loadOrCreateFolder",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"localFolder",
")",
",",
"parentId",
",",
"parentType",
")",
"for",
"entry",
"in",
"sorted",
"(",
"os",
".",
"listdir",
"(",
"localFolder",
")",
")",
":",
"if",
"entry",
"in",
"blacklist",
":",
"print",
"(",
"\"Ignoring file %s as it is blacklisted\"",
"%",
"entry",
")",
"continue",
"full_entry",
"=",
"os",
".",
"path",
".",
"join",
"(",
"localFolder",
",",
"entry",
")",
"if",
"os",
".",
"path",
".",
"islink",
"(",
"full_entry",
")",
":",
"# os.walk skips symlinks by default",
"print",
"(",
"\"Skipping file %s as it is a symlink\"",
"%",
"entry",
")",
"continue",
"elif",
"os",
".",
"path",
".",
"isdir",
"(",
"full_entry",
")",
":",
"# At this point we should have an actual folder, so can",
"# pass that as the parentType",
"self",
".",
"_uploadFolderRecursive",
"(",
"full_entry",
",",
"folder",
"[",
"'_id'",
"]",
",",
"'folder'",
",",
"ext",
",",
"reuseExisting",
"=",
"reuseExisting",
",",
"blacklist",
"=",
"blacklist",
",",
"dryRun",
"=",
"dryRun",
")",
"else",
":",
"name",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"entry",
")",
"[",
"0",
"]",
"self",
".",
"_uploadContentLinkItem",
"(",
"name",
",",
"full_entry",
",",
"folder",
",",
"ext",
"=",
"ext",
",",
"parentType",
"=",
"parentType",
",",
"dryRun",
"=",
"dryRun",
",",
"reuseExisting",
"=",
"reuseExisting",
")",
"if",
"not",
"dryRun",
":",
"for",
"callback",
"in",
"self",
".",
"_folderUploadCallbacks",
":",
"callback",
"(",
"folder",
",",
"localFolder",
")"
] | [
92,
4
] | [
157,
49
] | python | en | ['en', 'en', 'en'] | True |
get_bin_intervals | (data, num_bins) |
Returns bin intervals for 1D data.
Parameters
----------
data: np.ndarray
A 1D NumPy array of values to get bin intervals for.
num_bins: int
The number of bins to create.
Returns
-------
bin_intervals: np.ndarray of shape (num_bins, 2)
A 2D NumPy array of bin intevals, with each row being one bin,
with the first value being the lower bound for the bin and
the second being the upper bound for the bin.
|
Returns bin intervals for 1D data. | def get_bin_intervals(data, num_bins):
"""
Returns bin intervals for 1D data.
Parameters
----------
data: np.ndarray
A 1D NumPy array of values to get bin intervals for.
num_bins: int
The number of bins to create.
Returns
-------
bin_intervals: np.ndarray of shape (num_bins, 2)
A 2D NumPy array of bin intevals, with each row being one bin,
with the first value being the lower bound for the bin and
the second being the upper bound for the bin.
"""
# Transition points between bins.
bin_trans = np.linspace(data[0], data[-1], num_bins+1, endpoint=True)
bin_intervals = np.empty((num_bins, 2), dtype=data.dtype)
for i in range(num_bins):
bin_intervals[i, :] = [bin_trans[i], bin_trans[i+1]]
return bin_intervals | [
"def",
"get_bin_intervals",
"(",
"data",
",",
"num_bins",
")",
":",
"# Transition points between bins.",
"bin_trans",
"=",
"np",
".",
"linspace",
"(",
"data",
"[",
"0",
"]",
",",
"data",
"[",
"-",
"1",
"]",
",",
"num_bins",
"+",
"1",
",",
"endpoint",
"=",
"True",
")",
"bin_intervals",
"=",
"np",
".",
"empty",
"(",
"(",
"num_bins",
",",
"2",
")",
",",
"dtype",
"=",
"data",
".",
"dtype",
")",
"for",
"i",
"in",
"range",
"(",
"num_bins",
")",
":",
"bin_intervals",
"[",
"i",
",",
":",
"]",
"=",
"[",
"bin_trans",
"[",
"i",
"]",
",",
"bin_trans",
"[",
"i",
"+",
"1",
"]",
"]",
"return",
"bin_intervals"
] | [
10,
0
] | [
33,
24
] | python | en | ['en', 'error', 'th'] | False |
xr_scale_res | (dataset, x_coord='longitude', y_coord='latitude',
frac_res=None, abs_res=None) |
Scales the resolution of an `xarray.Dataset` or `xarray.DataArray`
to a fraction of its original resolution or an absolute resolution.
Parameters
----------
dataset: xarray.Dataset or xarray.DataArray
The Dataset or DataArray to reduce the resolution of.
x_coord, y_coord: str
Names of the x and y coordinates in `dataset` to scale.
frac_res: float
The fraction of the original resolution to scale to. Must be postive.
Note that this can be greater than 1.0, in which case the resolution
is upsampled.
abs_res: list-like
A list-like of the number of pixels for the x and y axes, respectively.
Overrides `frac_res` if specified.
Returns
-------
dataset_scaled: xarray.Dataset or xarray.DataArray
The result of scaling the resolution of `dataset`.
Raises
------
AssertionError: If neither `frac_res` nor `abs_res` is specified.
|
Scales the resolution of an `xarray.Dataset` or `xarray.DataArray`
to a fraction of its original resolution or an absolute resolution. | def xr_scale_res(dataset, x_coord='longitude', y_coord='latitude',
frac_res=None, abs_res=None):
"""
Scales the resolution of an `xarray.Dataset` or `xarray.DataArray`
to a fraction of its original resolution or an absolute resolution.
Parameters
----------
dataset: xarray.Dataset or xarray.DataArray
The Dataset or DataArray to reduce the resolution of.
x_coord, y_coord: str
Names of the x and y coordinates in `dataset` to scale.
frac_res: float
The fraction of the original resolution to scale to. Must be postive.
Note that this can be greater than 1.0, in which case the resolution
is upsampled.
abs_res: list-like
A list-like of the number of pixels for the x and y axes, respectively.
Overrides `frac_res` if specified.
Returns
-------
dataset_scaled: xarray.Dataset or xarray.DataArray
The result of scaling the resolution of `dataset`.
Raises
------
AssertionError: If neither `frac_res` nor `abs_res` is specified.
"""
assert frac_res is not None or abs_res is not None, \
"Either frac_res or abs_res must be specified (i.e. not None)."
if frac_res is not None:
x_px = y_px = np.sqrt(frac_res)
interp_param = 'frac'
elif abs_res is not None:
interp_param = 'num'
x_px, y_px = abs_res
return xr_interp(dataset, {x_coord: ('interp', {interp_param: x_px}), \
y_coord: ('interp', {interp_param: y_px})}) | [
"def",
"xr_scale_res",
"(",
"dataset",
",",
"x_coord",
"=",
"'longitude'",
",",
"y_coord",
"=",
"'latitude'",
",",
"frac_res",
"=",
"None",
",",
"abs_res",
"=",
"None",
")",
":",
"assert",
"frac_res",
"is",
"not",
"None",
"or",
"abs_res",
"is",
"not",
"None",
",",
"\"Either frac_res or abs_res must be specified (i.e. not None).\"",
"if",
"frac_res",
"is",
"not",
"None",
":",
"x_px",
"=",
"y_px",
"=",
"np",
".",
"sqrt",
"(",
"frac_res",
")",
"interp_param",
"=",
"'frac'",
"elif",
"abs_res",
"is",
"not",
"None",
":",
"interp_param",
"=",
"'num'",
"x_px",
",",
"y_px",
"=",
"abs_res",
"return",
"xr_interp",
"(",
"dataset",
",",
"{",
"x_coord",
":",
"(",
"'interp'",
",",
"{",
"interp_param",
":",
"x_px",
"}",
")",
",",
"y_coord",
":",
"(",
"'interp'",
",",
"{",
"interp_param",
":",
"y_px",
"}",
")",
"}",
")"
] | [
36,
0
] | [
74,
74
] | python | en | ['en', 'error', 'th'] | False |
xr_sel_time_by_bin | (dataset, num_bins, time_coord='time') |
Selects time coordinates by nearest neighbors of the means of bins.
This is useful for plotting data with high variance in temporal
spacing between acquisitions.
Parameters
----------
dataset: xarray.Dataset or xarray.DataArray
The Dataset or DataArray to aggregate by binning.
Must have a 'time' coordinate of type `datetime64`.
num_bins: int
The number of bins to use.
time_coord: str
The name of the time coordinate to bin.
Returns
-------
result: xarray.Dataset or xarray.DataArray
The result of aggregating within bins for the binned data.
|
Selects time coordinates by nearest neighbors of the means of bins.
This is useful for plotting data with high variance in temporal
spacing between acquisitions. | def xr_sel_time_by_bin(dataset, num_bins, time_coord='time'):
"""
Selects time coordinates by nearest neighbors of the means of bins.
This is useful for plotting data with high variance in temporal
spacing between acquisitions.
Parameters
----------
dataset: xarray.Dataset or xarray.DataArray
The Dataset or DataArray to aggregate by binning.
Must have a 'time' coordinate of type `datetime64`.
num_bins: int
The number of bins to use.
time_coord: str
The name of the time coordinate to bin.
Returns
-------
result: xarray.Dataset or xarray.DataArray
The result of aggregating within bins for the binned data.
"""
return xr_interp(dataset, {time_coord: ('bin', {'num': num_bins})}) | [
"def",
"xr_sel_time_by_bin",
"(",
"dataset",
",",
"num_bins",
",",
"time_coord",
"=",
"'time'",
")",
":",
"return",
"xr_interp",
"(",
"dataset",
",",
"{",
"time_coord",
":",
"(",
"'bin'",
",",
"{",
"'num'",
":",
"num_bins",
"}",
")",
"}",
")"
] | [
77,
0
] | [
98,
71
] | python | en | ['en', 'error', 'th'] | False |
xr_interp | (dataset, interp_config) |
Interpolates an `xarray.Dataset` or `xarray.DataArray`.
This is often done to match dimensions between xarray objects or
downsample to reduce memory consumption.
First, coordinates are interpolated according to `interp_config`.
Then the data values for those interpolated coordinates are obtained
through nearest neighbors interpolation.
Parameters
----------
dataset: xarray.Dataset or xarray.DataArray
The Dataset or DataArray to interpolate.
interp_config: dict
Mapping of names of coordinates to 2-tuples of the interpolation types
to use for those coordinates and the parameters for those interpolation types.
The supported coordinate interpolation types are 'interp' for
linear interpolation and 'bin' for binning.
The parameters, with supported interpolation types annotated to their
left, are as follow:
('interp', 'bin'): 'frac':
The fraction of the original size to use. Exclusive with 'num'.
('interp', 'bin'): 'num':
The number of points in the output. Exclusive with 'frac'.
Either 'frac' or 'num' must be in the interpolation parameters.
The following is an example value:
`{'latitude':('interp',{'frac':0.5}),'longitude':('interp',{'frac':0.5}),
'time':('bin',{'num':20})}`.
Returns
-------
interp_data: xarray.Dataset or xarray.DataArray
The specified interpolation of `dataset`.
:Authors:
John Rattz ([email protected])
|
Interpolates an `xarray.Dataset` or `xarray.DataArray`.
This is often done to match dimensions between xarray objects or
downsample to reduce memory consumption. | def xr_interp(dataset, interp_config):
"""
Interpolates an `xarray.Dataset` or `xarray.DataArray`.
This is often done to match dimensions between xarray objects or
downsample to reduce memory consumption.
First, coordinates are interpolated according to `interp_config`.
Then the data values for those interpolated coordinates are obtained
through nearest neighbors interpolation.
Parameters
----------
dataset: xarray.Dataset or xarray.DataArray
The Dataset or DataArray to interpolate.
interp_config: dict
Mapping of names of coordinates to 2-tuples of the interpolation types
to use for those coordinates and the parameters for those interpolation types.
The supported coordinate interpolation types are 'interp' for
linear interpolation and 'bin' for binning.
The parameters, with supported interpolation types annotated to their
left, are as follow:
('interp', 'bin'): 'frac':
The fraction of the original size to use. Exclusive with 'num'.
('interp', 'bin'): 'num':
The number of points in the output. Exclusive with 'frac'.
Either 'frac' or 'num' must be in the interpolation parameters.
The following is an example value:
`{'latitude':('interp',{'frac':0.5}),'longitude':('interp',{'frac':0.5}),
'time':('bin',{'num':20})}`.
Returns
-------
interp_data: xarray.Dataset or xarray.DataArray
The specified interpolation of `dataset`.
:Authors:
John Rattz ([email protected])
"""
# Create the new coordinates.
new_coords = {}
for dim, (interp_type, interp_kwargs) in interp_config.items():
# Determine the number of points to use.
num_pts = interp_kwargs.get('num', None)
if num_pts is None:
frac = interp_kwargs.get('frac', None)
num_pts_orig = len(dataset[dim])
num_pts = round(num_pts_orig * frac)
dim_vals = dataset[dim].values
dim_dtype = type(dim_vals[0])
# Convert NumPy datetime64 objects to scalars.
if dim_dtype == np.datetime64:
dim_vals = np.array(list(map(_n64_datetime_to_scalar, dim_vals)))
interp_vals = None
# Interpolate coordinates.
if interp_type == 'bin':
bin_intervals = get_bin_intervals(dim_vals, num_pts)
interp_vals = np.mean(bin_intervals, axis=1)
if interp_type == 'interp':
interp_inds = np.linspace(0, len(dim_vals) - 1, num_pts, dtype=np.int32)
interp_vals = dim_vals[interp_inds]
# Convert scalars to NumPy datetime64 objects.
if dim_dtype == np.datetime64:
interp_vals = np.array(list(map(_scalar_to_n64_datetime, interp_vals)))
new_coords[dim] = interp_vals
# Nearest-neighbor interpolate data values.
interp_data = dataset.interp(coords=new_coords, method='nearest')
# xarray.Dataset.interp() converts to dtype float64, so cast back to the original dtypes.
if isinstance(dataset, xr.DataArray):
interp_data = interp_data.astype(dataset.dtype)
elif isinstance(dataset, xr.Dataset):
for data_var_name in interp_data.data_vars:
interp_data[data_var_name] = interp_data[data_var_name].astype(dataset[data_var_name].dtype)
return interp_data | [
"def",
"xr_interp",
"(",
"dataset",
",",
"interp_config",
")",
":",
"# Create the new coordinates.",
"new_coords",
"=",
"{",
"}",
"for",
"dim",
",",
"(",
"interp_type",
",",
"interp_kwargs",
")",
"in",
"interp_config",
".",
"items",
"(",
")",
":",
"# Determine the number of points to use.",
"num_pts",
"=",
"interp_kwargs",
".",
"get",
"(",
"'num'",
",",
"None",
")",
"if",
"num_pts",
"is",
"None",
":",
"frac",
"=",
"interp_kwargs",
".",
"get",
"(",
"'frac'",
",",
"None",
")",
"num_pts_orig",
"=",
"len",
"(",
"dataset",
"[",
"dim",
"]",
")",
"num_pts",
"=",
"round",
"(",
"num_pts_orig",
"*",
"frac",
")",
"dim_vals",
"=",
"dataset",
"[",
"dim",
"]",
".",
"values",
"dim_dtype",
"=",
"type",
"(",
"dim_vals",
"[",
"0",
"]",
")",
"# Convert NumPy datetime64 objects to scalars.",
"if",
"dim_dtype",
"==",
"np",
".",
"datetime64",
":",
"dim_vals",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"map",
"(",
"_n64_datetime_to_scalar",
",",
"dim_vals",
")",
")",
")",
"interp_vals",
"=",
"None",
"# Interpolate coordinates.",
"if",
"interp_type",
"==",
"'bin'",
":",
"bin_intervals",
"=",
"get_bin_intervals",
"(",
"dim_vals",
",",
"num_pts",
")",
"interp_vals",
"=",
"np",
".",
"mean",
"(",
"bin_intervals",
",",
"axis",
"=",
"1",
")",
"if",
"interp_type",
"==",
"'interp'",
":",
"interp_inds",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"len",
"(",
"dim_vals",
")",
"-",
"1",
",",
"num_pts",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
"interp_vals",
"=",
"dim_vals",
"[",
"interp_inds",
"]",
"# Convert scalars to NumPy datetime64 objects.",
"if",
"dim_dtype",
"==",
"np",
".",
"datetime64",
":",
"interp_vals",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"map",
"(",
"_scalar_to_n64_datetime",
",",
"interp_vals",
")",
")",
")",
"new_coords",
"[",
"dim",
"]",
"=",
"interp_vals",
"# Nearest-neighbor interpolate data values.",
"interp_data",
"=",
"dataset",
".",
"interp",
"(",
"coords",
"=",
"new_coords",
",",
"method",
"=",
"'nearest'",
")",
"# xarray.Dataset.interp() converts to dtype float64, so cast back to the original dtypes.",
"if",
"isinstance",
"(",
"dataset",
",",
"xr",
".",
"DataArray",
")",
":",
"interp_data",
"=",
"interp_data",
".",
"astype",
"(",
"dataset",
".",
"dtype",
")",
"elif",
"isinstance",
"(",
"dataset",
",",
"xr",
".",
"Dataset",
")",
":",
"for",
"data_var_name",
"in",
"interp_data",
".",
"data_vars",
":",
"interp_data",
"[",
"data_var_name",
"]",
"=",
"interp_data",
"[",
"data_var_name",
"]",
".",
"astype",
"(",
"dataset",
"[",
"data_var_name",
"]",
".",
"dtype",
")",
"return",
"interp_data"
] | [
101,
0
] | [
173,
22
] | python | en | ['en', 'error', 'th'] | False |
dt_to_str | (date, fmt='%Y-%m-%d') |
Converts a datetime object to a string.
|
Converts a datetime object to a string.
| def dt_to_str(date, fmt='%Y-%m-%d'):
"""
Converts a datetime object to a string.
"""
return date.strftime(fmt) | [
"def",
"dt_to_str",
"(",
"date",
",",
"fmt",
"=",
"'%Y-%m-%d'",
")",
":",
"return",
"date",
".",
"strftime",
"(",
"fmt",
")"
] | [
3,
0
] | [
7,
29
] | python | en | ['en', 'error', 'th'] | False |
_n64_to_datetime | (n64) |
Converts Numpy 64 bit timestamps to datetime objects. Units in seconds
|
Converts Numpy 64 bit timestamps to datetime objects. Units in seconds
| def _n64_to_datetime(n64):
"""
Converts Numpy 64 bit timestamps to datetime objects. Units in seconds
"""
return datetime.utcfromtimestamp(n64.tolist() / 1e9) | [
"def",
"_n64_to_datetime",
"(",
"n64",
")",
":",
"return",
"datetime",
".",
"utcfromtimestamp",
"(",
"n64",
".",
"tolist",
"(",
")",
"/",
"1e9",
")"
] | [
9,
0
] | [
13,
56
] | python | en | ['en', 'error', 'th'] | False |
_n64_datetime_to_scalar | (dt64) |
Converts a NumPy datetime64 object to the number of seconds since
midnight, January 1, 1970, as a NumPy float64.
Returns
-------
scalar: numpy.float64
The number of seconds since midnight, January 1, 1970, as a NumPy float64.
|
Converts a NumPy datetime64 object to the number of seconds since
midnight, January 1, 1970, as a NumPy float64.
Returns
-------
scalar: numpy.float64
The number of seconds since midnight, January 1, 1970, as a NumPy float64.
| def _n64_datetime_to_scalar(dt64):
"""
Converts a NumPy datetime64 object to the number of seconds since
midnight, January 1, 1970, as a NumPy float64.
Returns
-------
scalar: numpy.float64
The number of seconds since midnight, January 1, 1970, as a NumPy float64.
"""
return (dt64 - np.datetime64('1970-01-01T00:00:00Z')) / np.timedelta64(1, 's') | [
"def",
"_n64_datetime_to_scalar",
"(",
"dt64",
")",
":",
"return",
"(",
"dt64",
"-",
"np",
".",
"datetime64",
"(",
"'1970-01-01T00:00:00Z'",
")",
")",
"/",
"np",
".",
"timedelta64",
"(",
"1",
",",
"'s'",
")"
] | [
15,
0
] | [
25,
82
] | python | en | ['en', 'error', 'th'] | False |
_scalar_to_n64_datetime | (scalar) |
Converts a floating point number to a NumPy datetime64 object.
Returns
-------
dt64: numpy.datetime64
The NumPy datetime64 object representing the datetime of the scalar argument.
|
Converts a floating point number to a NumPy datetime64 object.
Returns
-------
dt64: numpy.datetime64
The NumPy datetime64 object representing the datetime of the scalar argument.
| def _scalar_to_n64_datetime(scalar):
"""
Converts a floating point number to a NumPy datetime64 object.
Returns
-------
dt64: numpy.datetime64
The NumPy datetime64 object representing the datetime of the scalar argument.
"""
return (scalar * np.timedelta64(1, 's')) + np.datetime64('1970-01-01T00:00:00Z') | [
"def",
"_scalar_to_n64_datetime",
"(",
"scalar",
")",
":",
"return",
"(",
"scalar",
"*",
"np",
".",
"timedelta64",
"(",
"1",
",",
"'s'",
")",
")",
"+",
"np",
".",
"datetime64",
"(",
"'1970-01-01T00:00:00Z'",
")"
] | [
27,
0
] | [
36,
84
] | python | en | ['en', 'error', 'th'] | False |
SuiteScaffoldNotebookRenderer.render_to_disk | (self, notebook_file_path: str) |
Render a notebook to disk from an expectation suite.
If batch_kwargs are passed they will override any found in suite
citations.
|
Render a notebook to disk from an expectation suite. | def render_to_disk(self, notebook_file_path: str) -> None:
"""
Render a notebook to disk from an expectation suite.
If batch_kwargs are passed they will override any found in suite
citations.
"""
self.render(self.batch_kwargs)
self.write_notebook_to_disk(self._notebook, notebook_file_path) | [
"def",
"render_to_disk",
"(",
"self",
",",
"notebook_file_path",
":",
"str",
")",
"->",
"None",
":",
"self",
".",
"render",
"(",
"self",
".",
"batch_kwargs",
")",
"self",
".",
"write_notebook_to_disk",
"(",
"self",
".",
"_notebook",
",",
"notebook_file_path",
")"
] | [
152,
4
] | [
160,
71
] | python | en | ['en', 'error', 'th'] | False |
Instruments.add_instrument | (self, instrument: Instrument) | Start instrumenting the current run loop with the given instrument.
Args:
instrument (trio.abc.Instrument): The instrument to activate.
If ``instrument`` is already active, does nothing.
| Start instrumenting the current run loop with the given instrument. | def add_instrument(self, instrument: Instrument) -> None:
"""Start instrumenting the current run loop with the given instrument.
Args:
instrument (trio.abc.Instrument): The instrument to activate.
If ``instrument`` is already active, does nothing.
"""
if instrument in self["_all"]:
return
self["_all"][instrument] = None
try:
for name in dir(instrument):
if name.startswith("_"):
continue
try:
prototype = getattr(Instrument, name)
except AttributeError:
continue
impl = getattr(instrument, name)
if isinstance(impl, types.MethodType) and impl.__func__ is prototype:
# Inherited unchanged from _abc.Instrument
continue
self.setdefault(name, {})[instrument] = None
except:
self.remove_instrument(instrument)
raise | [
"def",
"add_instrument",
"(",
"self",
",",
"instrument",
":",
"Instrument",
")",
"->",
"None",
":",
"if",
"instrument",
"in",
"self",
"[",
"\"_all\"",
"]",
":",
"return",
"self",
"[",
"\"_all\"",
"]",
"[",
"instrument",
"]",
"=",
"None",
"try",
":",
"for",
"name",
"in",
"dir",
"(",
"instrument",
")",
":",
"if",
"name",
".",
"startswith",
"(",
"\"_\"",
")",
":",
"continue",
"try",
":",
"prototype",
"=",
"getattr",
"(",
"Instrument",
",",
"name",
")",
"except",
"AttributeError",
":",
"continue",
"impl",
"=",
"getattr",
"(",
"instrument",
",",
"name",
")",
"if",
"isinstance",
"(",
"impl",
",",
"types",
".",
"MethodType",
")",
"and",
"impl",
".",
"__func__",
"is",
"prototype",
":",
"# Inherited unchanged from _abc.Instrument",
"continue",
"self",
".",
"setdefault",
"(",
"name",
",",
"{",
"}",
")",
"[",
"instrument",
"]",
"=",
"None",
"except",
":",
"self",
".",
"remove_instrument",
"(",
"instrument",
")",
"raise"
] | [
37,
4
] | [
64,
17
] | python | en | ['en', 'en', 'en'] | True |
Instruments.remove_instrument | (self, instrument: Instrument) | Stop instrumenting the current run loop with the given instrument.
Args:
instrument (trio.abc.Instrument): The instrument to de-activate.
Raises:
KeyError: if the instrument is not currently active. This could
occur either because you never added it, or because you added it
and then it raised an unhandled exception and was automatically
deactivated.
| Stop instrumenting the current run loop with the given instrument. | def remove_instrument(self, instrument: Instrument) -> None:
"""Stop instrumenting the current run loop with the given instrument.
Args:
instrument (trio.abc.Instrument): The instrument to de-activate.
Raises:
KeyError: if the instrument is not currently active. This could
occur either because you never added it, or because you added it
and then it raised an unhandled exception and was automatically
deactivated.
"""
# If instrument isn't present, the KeyError propagates out
self["_all"].pop(instrument)
for hookname, instruments in list(self.items()):
if instrument in instruments:
del instruments[instrument]
if not instruments:
del self[hookname] | [
"def",
"remove_instrument",
"(",
"self",
",",
"instrument",
":",
"Instrument",
")",
"->",
"None",
":",
"# If instrument isn't present, the KeyError propagates out",
"self",
"[",
"\"_all\"",
"]",
".",
"pop",
"(",
"instrument",
")",
"for",
"hookname",
",",
"instruments",
"in",
"list",
"(",
"self",
".",
"items",
"(",
")",
")",
":",
"if",
"instrument",
"in",
"instruments",
":",
"del",
"instruments",
"[",
"instrument",
"]",
"if",
"not",
"instruments",
":",
"del",
"self",
"[",
"hookname",
"]"
] | [
67,
4
] | [
86,
38
] | python | en | ['en', 'en', 'en'] | True |
Instruments.call | (self, hookname: str, *args: Any) | Call hookname(*args) on each applicable instrument.
You must first check whether there are any instruments installed for
that hook, e.g.::
if "before_task_step" in instruments:
instruments.call("before_task_step", task)
| Call hookname(*args) on each applicable instrument. | def call(self, hookname: str, *args: Any) -> None:
"""Call hookname(*args) on each applicable instrument.
You must first check whether there are any instruments installed for
that hook, e.g.::
if "before_task_step" in instruments:
instruments.call("before_task_step", task)
"""
for instrument in list(self[hookname]):
try:
getattr(instrument, hookname)(*args)
except:
self.remove_instrument(instrument)
INSTRUMENT_LOGGER.exception(
"Exception raised when calling %r on instrument %r. "
"Instrument has been disabled.",
hookname,
instrument,
) | [
"def",
"call",
"(",
"self",
",",
"hookname",
":",
"str",
",",
"*",
"args",
":",
"Any",
")",
"->",
"None",
":",
"for",
"instrument",
"in",
"list",
"(",
"self",
"[",
"hookname",
"]",
")",
":",
"try",
":",
"getattr",
"(",
"instrument",
",",
"hookname",
")",
"(",
"*",
"args",
")",
"except",
":",
"self",
".",
"remove_instrument",
"(",
"instrument",
")",
"INSTRUMENT_LOGGER",
".",
"exception",
"(",
"\"Exception raised when calling %r on instrument %r. \"",
"\"Instrument has been disabled.\"",
",",
"hookname",
",",
"instrument",
",",
")"
] | [
88,
4
] | [
107,
17
] | python | en | ['en', 'en', 'en'] | True |
contextual_confusion_matrix | (expected, observed, data=None,
start=None, end=None, weighted=True) | Compute the confusion matrix between the ground truth and the detected anomalies.
Args:
expected (DataFrame or list of tuples):
Ground truth passed as a ``pandas.DataFrame`` or list containing
two columns: start and stop.
observed (DataFrame or list of tuples):
Detected anomalies passed as a ``pandas.DataFrame`` or list containing
two columns: start and stop.
data (DataFrame):
Original data, passed as a ``pandas.DataFrame`` containing timestamp.
Used to extract start and end.
start (int):
Minimum timestamp of the original data.
end (int):
Maximum timestamp of the original data.
weighted (boolean):
Flag to represent which algorithm to use.
If true use weighted segment algorithm, else use overlap segment.
Returns:
tuple:
number of true negative, false positive, false negative, true positive.
| Compute the confusion matrix between the ground truth and the detected anomalies. | def contextual_confusion_matrix(expected, observed, data=None,
start=None, end=None, weighted=True):
"""Compute the confusion matrix between the ground truth and the detected anomalies.
Args:
expected (DataFrame or list of tuples):
Ground truth passed as a ``pandas.DataFrame`` or list containing
two columns: start and stop.
observed (DataFrame or list of tuples):
Detected anomalies passed as a ``pandas.DataFrame`` or list containing
two columns: start and stop.
data (DataFrame):
Original data, passed as a ``pandas.DataFrame`` containing timestamp.
Used to extract start and end.
start (int):
Minimum timestamp of the original data.
end (int):
Maximum timestamp of the original data.
weighted (boolean):
Flag to represent which algorithm to use.
If true use weighted segment algorithm, else use overlap segment.
Returns:
tuple:
number of true negative, false positive, false negative, true positive.
"""
def _ws(x, y, z, w):
return _weighted_segment(x, y, _contextual_partition, z, w)
if weighted:
function = _ws
else:
function = _overlap_segment
if data is not None:
start = data['timestamp'].min()
end = data['timestamp'].max()
if not isinstance(expected, list):
expected = list(expected[['start', 'end']].itertuples(index=False))
if not isinstance(observed, list):
observed = list(observed[['start', 'end']].itertuples(index=False))
expected = _pad(expected)
observed = _pad(observed)
return function(expected, observed, start, end) | [
"def",
"contextual_confusion_matrix",
"(",
"expected",
",",
"observed",
",",
"data",
"=",
"None",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
",",
"weighted",
"=",
"True",
")",
":",
"def",
"_ws",
"(",
"x",
",",
"y",
",",
"z",
",",
"w",
")",
":",
"return",
"_weighted_segment",
"(",
"x",
",",
"y",
",",
"_contextual_partition",
",",
"z",
",",
"w",
")",
"if",
"weighted",
":",
"function",
"=",
"_ws",
"else",
":",
"function",
"=",
"_overlap_segment",
"if",
"data",
"is",
"not",
"None",
":",
"start",
"=",
"data",
"[",
"'timestamp'",
"]",
".",
"min",
"(",
")",
"end",
"=",
"data",
"[",
"'timestamp'",
"]",
".",
"max",
"(",
")",
"if",
"not",
"isinstance",
"(",
"expected",
",",
"list",
")",
":",
"expected",
"=",
"list",
"(",
"expected",
"[",
"[",
"'start'",
",",
"'end'",
"]",
"]",
".",
"itertuples",
"(",
"index",
"=",
"False",
")",
")",
"if",
"not",
"isinstance",
"(",
"observed",
",",
"list",
")",
":",
"observed",
"=",
"list",
"(",
"observed",
"[",
"[",
"'start'",
",",
"'end'",
"]",
"]",
".",
"itertuples",
"(",
"index",
"=",
"False",
")",
")",
"expected",
"=",
"_pad",
"(",
"expected",
")",
"observed",
"=",
"_pad",
"(",
"observed",
")",
"return",
"function",
"(",
"expected",
",",
"observed",
",",
"start",
",",
"end",
")"
] | [
61,
0
] | [
108,
51
] | python | en | ['en', 'en', 'en'] | True |
contextual_accuracy | (expected, observed, data=None, start=None, end=None, weighted=True) | Compute an accuracy score between the ground truth and the detected anomalies.
Args:
expected (DataFrame or list of tuples):
Ground truth passed as a ``pandas.DataFrame`` or list containing
two columns: start and stop.
observed (DataFrame or list of tuples):
Detected anomalies passed as a ``pandas.DataFrame`` or list containing
two columns: start and stop.
data (DataFrame):
Original data, passed as a ``pandas.DataFrame`` containing timestamp.
Used to extract start and end.
start (int):
Minimum timestamp of the original data.
end (int):
Maximum timestamp of the original data.
weighted (boolean):
Flag to represent which algorithm to use.
If true use weighted segment algorithm, else use overlap segment.
Returns:
float:
Accuracy score between the ground truth and detected anomalies.
| Compute an accuracy score between the ground truth and the detected anomalies. | def contextual_accuracy(expected, observed, data=None, start=None, end=None, weighted=True):
"""Compute an accuracy score between the ground truth and the detected anomalies.
Args:
expected (DataFrame or list of tuples):
Ground truth passed as a ``pandas.DataFrame`` or list containing
two columns: start and stop.
observed (DataFrame or list of tuples):
Detected anomalies passed as a ``pandas.DataFrame`` or list containing
two columns: start and stop.
data (DataFrame):
Original data, passed as a ``pandas.DataFrame`` containing timestamp.
Used to extract start and end.
start (int):
Minimum timestamp of the original data.
end (int):
Maximum timestamp of the original data.
weighted (boolean):
Flag to represent which algorithm to use.
If true use weighted segment algorithm, else use overlap segment.
Returns:
float:
Accuracy score between the ground truth and detected anomalies.
"""
def _cm(x, y, z, w, f):
return contextual_confusion_matrix(x, y, z, w, f, weighted)
return _accuracy(expected, observed, data, start, end, _cm) | [
"def",
"contextual_accuracy",
"(",
"expected",
",",
"observed",
",",
"data",
"=",
"None",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
",",
"weighted",
"=",
"True",
")",
":",
"def",
"_cm",
"(",
"x",
",",
"y",
",",
"z",
",",
"w",
",",
"f",
")",
":",
"return",
"contextual_confusion_matrix",
"(",
"x",
",",
"y",
",",
"z",
",",
"w",
",",
"f",
",",
"weighted",
")",
"return",
"_accuracy",
"(",
"expected",
",",
"observed",
",",
"data",
",",
"start",
",",
"end",
",",
"_cm",
")"
] | [
111,
0
] | [
138,
63
] | python | en | ['en', 'en', 'en'] | True |
contextual_precision | (expected, observed, data=None, start=None, end=None, weighted=True) | Compute an precision score between the ground truth and the detected anomalies.
Args:
expected (DataFrame or list of tuples):
Ground truth passed as a ``pandas.DataFrame`` or list containing
two columns: start and stop.
observed (DataFrame or list of tuples):
Detected anomalies passed as a ``pandas.DataFrame`` or list containing
two columns: start and stop.
data (DataFrame):
Original data, passed as a ``pandas.DataFrame`` containing timestamp.
Used to extract start and end.
start (int):
Minimum timestamp of the original data.
end (int):
Maximum timestamp of the original data.
weighted (boolean):
Flag to represent which algorithm to use.
If true use weighted segment algorithm, else use overlap segment.
Returns:
float:
Precision score between the ground truth and detected anomalies.
| Compute an precision score between the ground truth and the detected anomalies. | def contextual_precision(expected, observed, data=None, start=None, end=None, weighted=True):
"""Compute an precision score between the ground truth and the detected anomalies.
Args:
expected (DataFrame or list of tuples):
Ground truth passed as a ``pandas.DataFrame`` or list containing
two columns: start and stop.
observed (DataFrame or list of tuples):
Detected anomalies passed as a ``pandas.DataFrame`` or list containing
two columns: start and stop.
data (DataFrame):
Original data, passed as a ``pandas.DataFrame`` containing timestamp.
Used to extract start and end.
start (int):
Minimum timestamp of the original data.
end (int):
Maximum timestamp of the original data.
weighted (boolean):
Flag to represent which algorithm to use.
If true use weighted segment algorithm, else use overlap segment.
Returns:
float:
Precision score between the ground truth and detected anomalies.
"""
def _cm(x, y, z, w, f):
return contextual_confusion_matrix(x, y, z, w, f, weighted)
return _precision(expected, observed, data, start, end, _cm) | [
"def",
"contextual_precision",
"(",
"expected",
",",
"observed",
",",
"data",
"=",
"None",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
",",
"weighted",
"=",
"True",
")",
":",
"def",
"_cm",
"(",
"x",
",",
"y",
",",
"z",
",",
"w",
",",
"f",
")",
":",
"return",
"contextual_confusion_matrix",
"(",
"x",
",",
"y",
",",
"z",
",",
"w",
",",
"f",
",",
"weighted",
")",
"return",
"_precision",
"(",
"expected",
",",
"observed",
",",
"data",
",",
"start",
",",
"end",
",",
"_cm",
")"
] | [
141,
0
] | [
168,
64
] | python | en | ['en', 'en', 'en'] | True |
contextual_recall | (expected, observed, data=None, start=None, end=None, weighted=True) | Compute an recall score between the ground truth and the detected anomalies.
Args:
expected (DataFrame or list of tuples):
Ground truth passed as a ``pandas.DataFrame`` or list containing
two columns: start and stop.
observed (DataFrame or list of tuples):
Detected anomalies passed as a ``pandas.DataFrame`` or list containing
two columns: start and stop.
data (DataFrame):
Original data, passed as a ``pandas.DataFrame`` containing timestamp.
Used to extract start and end.
start (int):
Minimum timestamp of the original data.
end (int):
Maximum timestamp of the original data.
weighted (boolean):
Flag to represent which algorithm to use.
If true use weighted segment algorithm, else use overlap segment.
Returns:
float:
Recall score between the ground truth and detected anomalies.
| Compute an recall score between the ground truth and the detected anomalies. | def contextual_recall(expected, observed, data=None, start=None, end=None, weighted=True):
"""Compute an recall score between the ground truth and the detected anomalies.
Args:
expected (DataFrame or list of tuples):
Ground truth passed as a ``pandas.DataFrame`` or list containing
two columns: start and stop.
observed (DataFrame or list of tuples):
Detected anomalies passed as a ``pandas.DataFrame`` or list containing
two columns: start and stop.
data (DataFrame):
Original data, passed as a ``pandas.DataFrame`` containing timestamp.
Used to extract start and end.
start (int):
Minimum timestamp of the original data.
end (int):
Maximum timestamp of the original data.
weighted (boolean):
Flag to represent which algorithm to use.
If true use weighted segment algorithm, else use overlap segment.
Returns:
float:
Recall score between the ground truth and detected anomalies.
"""
def _cm(x, y, z, w, f):
return contextual_confusion_matrix(x, y, z, w, f, weighted)
return _recall(expected, observed, data, start, end, _cm) | [
"def",
"contextual_recall",
"(",
"expected",
",",
"observed",
",",
"data",
"=",
"None",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
",",
"weighted",
"=",
"True",
")",
":",
"def",
"_cm",
"(",
"x",
",",
"y",
",",
"z",
",",
"w",
",",
"f",
")",
":",
"return",
"contextual_confusion_matrix",
"(",
"x",
",",
"y",
",",
"z",
",",
"w",
",",
"f",
",",
"weighted",
")",
"return",
"_recall",
"(",
"expected",
",",
"observed",
",",
"data",
",",
"start",
",",
"end",
",",
"_cm",
")"
] | [
171,
0
] | [
198,
61
] | python | en | ['en', 'en', 'en'] | True |
contextual_f1_score | (expected, observed, data=None, start=None, end=None, weighted=True) | Compute an f1 score between the ground truth and the detected anomalies.
Args:
expected (DataFrame or list of tuples):
Ground truth passed as a ``pandas.DataFrame`` or list containing
two columns: start and stop.
observed (DataFrame or list of tuples):
Detected anomalies passed as a ``pandas.DataFrame`` or list containing
two columns: start and stop.
data (DataFrame):
Original data, passed as a ``pandas.DataFrame`` containing timestamp.
Used to extract start and end.
start (int):
Minimum timestamp of the original data.
end (int):
Maximum timestamp of the original data.
weighted (boolean):
Flag to represent which algorithm to use.
If true use weighted segment algorithm, else use overlap segment.
Returns:
float:
F1 score between the ground truth and detected anomalies.
| Compute an f1 score between the ground truth and the detected anomalies. | def contextual_f1_score(expected, observed, data=None, start=None, end=None, weighted=True):
"""Compute an f1 score between the ground truth and the detected anomalies.
Args:
expected (DataFrame or list of tuples):
Ground truth passed as a ``pandas.DataFrame`` or list containing
two columns: start and stop.
observed (DataFrame or list of tuples):
Detected anomalies passed as a ``pandas.DataFrame`` or list containing
two columns: start and stop.
data (DataFrame):
Original data, passed as a ``pandas.DataFrame`` containing timestamp.
Used to extract start and end.
start (int):
Minimum timestamp of the original data.
end (int):
Maximum timestamp of the original data.
weighted (boolean):
Flag to represent which algorithm to use.
If true use weighted segment algorithm, else use overlap segment.
Returns:
float:
F1 score between the ground truth and detected anomalies.
"""
def _cm(x, y, z, w, f):
return contextual_confusion_matrix(x, y, z, w, f, weighted)
return _f1_score(expected, observed, data, start, end, _cm) | [
"def",
"contextual_f1_score",
"(",
"expected",
",",
"observed",
",",
"data",
"=",
"None",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
",",
"weighted",
"=",
"True",
")",
":",
"def",
"_cm",
"(",
"x",
",",
"y",
",",
"z",
",",
"w",
",",
"f",
")",
":",
"return",
"contextual_confusion_matrix",
"(",
"x",
",",
"y",
",",
"z",
",",
"w",
",",
"f",
",",
"weighted",
")",
"return",
"_f1_score",
"(",
"expected",
",",
"observed",
",",
"data",
",",
"start",
",",
"end",
",",
"_cm",
")"
] | [
201,
0
] | [
228,
63
] | python | en | ['en', 'en', 'en'] | True |
my_fun | (one, two, three, four, five, six) | Sample function with multiple code issues | Sample function with multiple code issues | def my_fun(one, two, three, four, five, six): # pylint: disable=W0613
"""Sample function with multiple code issues"""
one += 1; two += 2 # More than one statement on a single line (C0321)
seven = eight # Unused variable "seven" (W0612), undefined variable "eight" (E1101)
return one + two + nine | [
"def",
"my_fun",
"(",
"one",
",",
"two",
",",
"three",
",",
"four",
",",
"five",
",",
"six",
")",
":",
"# pylint: disable=W0613",
"one",
"+=",
"1",
"two",
"+=",
"2",
"# More than one statement on a single line (C0321)",
"seven",
"=",
"eight",
"# Unused variable \"seven\" (W0612), undefined variable \"eight\" (E1101)",
"return",
"one",
"+",
"two",
"+",
"nine"
] | [
3,
0
] | [
7,
27
] | python | en | ['en', 'en', 'en'] | True |
export_answers_to_csv | (agg_results: list, output_file) |
Exports answers coming from finder.get_answers() to a CSV file
:param agg_results: list of predictions coming from finder.get_answers()
:param output_file: filename of output file
:return: None
|
Exports answers coming from finder.get_answers() to a CSV file
:param agg_results: list of predictions coming from finder.get_answers()
:param output_file: filename of output file
:return: None
| def export_answers_to_csv(agg_results: list, output_file):
"""
Exports answers coming from finder.get_answers() to a CSV file
:param agg_results: list of predictions coming from finder.get_answers()
:param output_file: filename of output file
:return: None
"""
if isinstance(agg_results, dict):
agg_results = [agg_results]
assert "query" in agg_results[0], f"Wrong format used for {agg_results[0]}"
assert "answers" in agg_results[0], f"Wrong format used for {agg_results[0]}"
data = {} # type: Dict[str, List[Any]]
data["query"] = []
data["prediction"] = []
data["prediction_rank"] = []
data["prediction_context"] = []
for res in agg_results:
for i in range(len(res["answers"])):
temp = res["answers"][i]
data["query"].append(res["query"])
data["prediction"].append(temp["answer"])
data["prediction_rank"].append(i + 1)
data["prediction_context"].append(temp["context"])
df = pd.DataFrame(data)
df.to_csv(output_file, index=False) | [
"def",
"export_answers_to_csv",
"(",
"agg_results",
":",
"list",
",",
"output_file",
")",
":",
"if",
"isinstance",
"(",
"agg_results",
",",
"dict",
")",
":",
"agg_results",
"=",
"[",
"agg_results",
"]",
"assert",
"\"query\"",
"in",
"agg_results",
"[",
"0",
"]",
",",
"f\"Wrong format used for {agg_results[0]}\"",
"assert",
"\"answers\"",
"in",
"agg_results",
"[",
"0",
"]",
",",
"f\"Wrong format used for {agg_results[0]}\"",
"data",
"=",
"{",
"}",
"# type: Dict[str, List[Any]]",
"data",
"[",
"\"query\"",
"]",
"=",
"[",
"]",
"data",
"[",
"\"prediction\"",
"]",
"=",
"[",
"]",
"data",
"[",
"\"prediction_rank\"",
"]",
"=",
"[",
"]",
"data",
"[",
"\"prediction_context\"",
"]",
"=",
"[",
"]",
"for",
"res",
"in",
"agg_results",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"res",
"[",
"\"answers\"",
"]",
")",
")",
":",
"temp",
"=",
"res",
"[",
"\"answers\"",
"]",
"[",
"i",
"]",
"data",
"[",
"\"query\"",
"]",
".",
"append",
"(",
"res",
"[",
"\"query\"",
"]",
")",
"data",
"[",
"\"prediction\"",
"]",
".",
"append",
"(",
"temp",
"[",
"\"answer\"",
"]",
")",
"data",
"[",
"\"prediction_rank\"",
"]",
".",
"append",
"(",
"i",
"+",
"1",
")",
"data",
"[",
"\"prediction_context\"",
"]",
".",
"append",
"(",
"temp",
"[",
"\"context\"",
"]",
")",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"data",
")",
"df",
".",
"to_csv",
"(",
"output_file",
",",
"index",
"=",
"False",
")"
] | [
33,
0
] | [
61,
39
] | python | en | ['en', 'error', 'th'] | False |
convert_labels_to_squad | (labels_file: str) |
Convert the export from the labeling UI to SQuAD format for training.
:param labels_file: path for export file from the labeling tool
:return:
|
Convert the export from the labeling UI to SQuAD format for training. | def convert_labels_to_squad(labels_file: str):
"""
Convert the export from the labeling UI to SQuAD format for training.
:param labels_file: path for export file from the labeling tool
:return:
"""
with open(labels_file, encoding='utf-8') as label_file:
labels = json.load(label_file)
labels_grouped_by_documents = defaultdict(list)
for label in labels:
labels_grouped_by_documents[label["document_id"]].append(label)
labels_in_squad_format = {"data": []} # type: Dict[str, Any]
for document_id, labels in labels_grouped_by_documents.items():
qas = []
for label in labels:
doc = DocumentORM.query.get(label["document_id"])
assert (
doc.text[label["start_offset"] : label["end_offset"]]
== label["selected_text"]
)
qas.append(
{
"question": label["question"],
"id": label["id"],
"question_id": label["question_id"],
"answers": [
{
"text": label["selected_text"],
"answer_start": label["start_offset"],
"labeller_id": label["labeler_id"],
}
],
"is_impossible": False,
}
)
squad_format_label = {
"paragraphs": [
{"qas": qas, "context": doc.text, "document_id": document_id}
]
}
labels_in_squad_format["data"].append(squad_format_label)
with open("labels_in_squad_format.json", "w+", encoding='utf-8') as outfile:
json.dump(labels_in_squad_format, outfile) | [
"def",
"convert_labels_to_squad",
"(",
"labels_file",
":",
"str",
")",
":",
"with",
"open",
"(",
"labels_file",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"label_file",
":",
"labels",
"=",
"json",
".",
"load",
"(",
"label_file",
")",
"labels_grouped_by_documents",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"label",
"in",
"labels",
":",
"labels_grouped_by_documents",
"[",
"label",
"[",
"\"document_id\"",
"]",
"]",
".",
"append",
"(",
"label",
")",
"labels_in_squad_format",
"=",
"{",
"\"data\"",
":",
"[",
"]",
"}",
"# type: Dict[str, Any]",
"for",
"document_id",
",",
"labels",
"in",
"labels_grouped_by_documents",
".",
"items",
"(",
")",
":",
"qas",
"=",
"[",
"]",
"for",
"label",
"in",
"labels",
":",
"doc",
"=",
"DocumentORM",
".",
"query",
".",
"get",
"(",
"label",
"[",
"\"document_id\"",
"]",
")",
"assert",
"(",
"doc",
".",
"text",
"[",
"label",
"[",
"\"start_offset\"",
"]",
":",
"label",
"[",
"\"end_offset\"",
"]",
"]",
"==",
"label",
"[",
"\"selected_text\"",
"]",
")",
"qas",
".",
"append",
"(",
"{",
"\"question\"",
":",
"label",
"[",
"\"question\"",
"]",
",",
"\"id\"",
":",
"label",
"[",
"\"id\"",
"]",
",",
"\"question_id\"",
":",
"label",
"[",
"\"question_id\"",
"]",
",",
"\"answers\"",
":",
"[",
"{",
"\"text\"",
":",
"label",
"[",
"\"selected_text\"",
"]",
",",
"\"answer_start\"",
":",
"label",
"[",
"\"start_offset\"",
"]",
",",
"\"labeller_id\"",
":",
"label",
"[",
"\"labeler_id\"",
"]",
",",
"}",
"]",
",",
"\"is_impossible\"",
":",
"False",
",",
"}",
")",
"squad_format_label",
"=",
"{",
"\"paragraphs\"",
":",
"[",
"{",
"\"qas\"",
":",
"qas",
",",
"\"context\"",
":",
"doc",
".",
"text",
",",
"\"document_id\"",
":",
"document_id",
"}",
"]",
"}",
"labels_in_squad_format",
"[",
"\"data\"",
"]",
".",
"append",
"(",
"squad_format_label",
")",
"with",
"open",
"(",
"\"labels_in_squad_format.json\"",
",",
"\"w+\"",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"outfile",
":",
"json",
".",
"dump",
"(",
"labels_in_squad_format",
",",
"outfile",
")"
] | [
65,
0
] | [
115,
50
] | python | en | ['en', 'error', 'th'] | False |
get_batches_from_generator | (iterable, n) |
Batch elements of an iterable into fixed-length chunks or blocks.
|
Batch elements of an iterable into fixed-length chunks or blocks.
| def get_batches_from_generator(iterable, n):
"""
Batch elements of an iterable into fixed-length chunks or blocks.
"""
it = iter(iterable)
x = tuple(islice(it, n))
while x:
yield x
x = tuple(islice(it, n)) | [
"def",
"get_batches_from_generator",
"(",
"iterable",
",",
"n",
")",
":",
"it",
"=",
"iter",
"(",
"iterable",
")",
"x",
"=",
"tuple",
"(",
"islice",
"(",
"it",
",",
"n",
")",
")",
"while",
"x",
":",
"yield",
"x",
"x",
"=",
"tuple",
"(",
"islice",
"(",
"it",
",",
"n",
")",
")"
] | [
118,
0
] | [
126,
32
] | python | en | ['en', 'error', 'th'] | False |
initialized_project | (mock_webbrowser, tmp_path_factory) | This is an initialized project through the CLI. | This is an initialized project through the CLI. | def initialized_project(mock_webbrowser, tmp_path_factory):
"""This is an initialized project through the CLI."""
project_dir = str(tmp_path_factory.mktemp("my_rad_project"))
os.makedirs(os.path.join(project_dir, "data"))
data_folder_path = os.path.join(project_dir, "data")
data_path = os.path.join(project_dir, "data/Titanic.csv")
fixture_path = file_relative_path(__file__, "../../test_sets/Titanic.csv")
shutil.copy(fixture_path, data_path)
runner = CliRunner(mix_stderr=False)
_ = runner.invoke(
cli,
["init", "-d", project_dir],
input="\n\n1\n1\n{}\n\n\n\n2\n{}\n\n\n\n".format(data_folder_path, data_path),
catch_exceptions=False,
)
assert mock_webbrowser.call_count == 1
assert (
"{}/great_expectations/uncommitted/data_docs/local_site/validations/Titanic/warning/".format(
project_dir
)
in mock_webbrowser.call_args[0][0]
)
context = DataContext(os.path.join(project_dir, DataContext.GE_DIR))
assert isinstance(context, DataContext)
assert len(context.list_datasources()) == 1
return project_dir | [
"def",
"initialized_project",
"(",
"mock_webbrowser",
",",
"tmp_path_factory",
")",
":",
"project_dir",
"=",
"str",
"(",
"tmp_path_factory",
".",
"mktemp",
"(",
"\"my_rad_project\"",
")",
")",
"os",
".",
"makedirs",
"(",
"os",
".",
"path",
".",
"join",
"(",
"project_dir",
",",
"\"data\"",
")",
")",
"data_folder_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"project_dir",
",",
"\"data\"",
")",
"data_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"project_dir",
",",
"\"data/Titanic.csv\"",
")",
"fixture_path",
"=",
"file_relative_path",
"(",
"__file__",
",",
"\"../../test_sets/Titanic.csv\"",
")",
"shutil",
".",
"copy",
"(",
"fixture_path",
",",
"data_path",
")",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"_",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"init\"",
",",
"\"-d\"",
",",
"project_dir",
"]",
",",
"input",
"=",
"\"\\n\\n1\\n1\\n{}\\n\\n\\n\\n2\\n{}\\n\\n\\n\\n\"",
".",
"format",
"(",
"data_folder_path",
",",
"data_path",
")",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"assert",
"mock_webbrowser",
".",
"call_count",
"==",
"1",
"assert",
"(",
"\"{}/great_expectations/uncommitted/data_docs/local_site/validations/Titanic/warning/\"",
".",
"format",
"(",
"project_dir",
")",
"in",
"mock_webbrowser",
".",
"call_args",
"[",
"0",
"]",
"[",
"0",
"]",
")",
"context",
"=",
"DataContext",
"(",
"os",
".",
"path",
".",
"join",
"(",
"project_dir",
",",
"DataContext",
".",
"GE_DIR",
")",
")",
"assert",
"isinstance",
"(",
"context",
",",
"DataContext",
")",
"assert",
"len",
"(",
"context",
".",
"list_datasources",
"(",
")",
")",
"==",
"1",
"return",
"project_dir"
] | [
274,
0
] | [
300,
22
] | python | en | ['en', 'en', 'en'] | True |
scheduler_trace | () | Returns a scheduler-dependent value we can use to check determinism. | Returns a scheduler-dependent value we can use to check determinism. | async def scheduler_trace():
"""Returns a scheduler-dependent value we can use to check determinism."""
trace = []
async def tracer(name):
for i in range(50):
trace.append((name, i))
await trio.sleep(0)
async with trio.open_nursery() as nursery:
for i in range(5):
nursery.start_soon(tracer, i)
return tuple(trace) | [
"async",
"def",
"scheduler_trace",
"(",
")",
":",
"trace",
"=",
"[",
"]",
"async",
"def",
"tracer",
"(",
"name",
")",
":",
"for",
"i",
"in",
"range",
"(",
"50",
")",
":",
"trace",
".",
"append",
"(",
"(",
"name",
",",
"i",
")",
")",
"await",
"trio",
".",
"sleep",
"(",
"0",
")",
"async",
"with",
"trio",
".",
"open_nursery",
"(",
")",
"as",
"nursery",
":",
"for",
"i",
"in",
"range",
"(",
"5",
")",
":",
"nursery",
".",
"start_soon",
"(",
"tracer",
",",
"i",
")",
"return",
"tuple",
"(",
"trace",
")"
] | [
3,
0
] | [
16,
23
] | python | en | ['en', 'en', 'en'] | True |
store | (ctx) | Store operations | Store operations | def store(ctx):
"""Store operations"""
directory: str = toolkit.parse_cli_config_file_location(
config_file_location=ctx.obj.config_file_location
).get("directory")
context: DataContext = toolkit.load_data_context_with_error_handling(
directory=directory,
from_cli_upgrade_command=False,
)
# TODO consider moving this all the way up in to the CLIState constructor
ctx.obj.data_context = context
usage_stats_prefix = f"cli.store.{ctx.invoked_subcommand}"
toolkit.send_usage_message(
data_context=context,
event=f"{usage_stats_prefix}.begin",
success=True,
)
ctx.obj.usage_event_end = f"{usage_stats_prefix}.end" | [
"def",
"store",
"(",
"ctx",
")",
":",
"directory",
":",
"str",
"=",
"toolkit",
".",
"parse_cli_config_file_location",
"(",
"config_file_location",
"=",
"ctx",
".",
"obj",
".",
"config_file_location",
")",
".",
"get",
"(",
"\"directory\"",
")",
"context",
":",
"DataContext",
"=",
"toolkit",
".",
"load_data_context_with_error_handling",
"(",
"directory",
"=",
"directory",
",",
"from_cli_upgrade_command",
"=",
"False",
",",
")",
"# TODO consider moving this all the way up in to the CLIState constructor",
"ctx",
".",
"obj",
".",
"data_context",
"=",
"context",
"usage_stats_prefix",
"=",
"f\"cli.store.{ctx.invoked_subcommand}\"",
"toolkit",
".",
"send_usage_message",
"(",
"data_context",
"=",
"context",
",",
"event",
"=",
"f\"{usage_stats_prefix}.begin\"",
",",
"success",
"=",
"True",
",",
")",
"ctx",
".",
"obj",
".",
"usage_event_end",
"=",
"f\"{usage_stats_prefix}.end\""
] | [
9,
0
] | [
27,
57
] | python | en | ['en', 'en', 'en'] | False |
store_list | (ctx) | List active Stores. | List active Stores. | def store_list(ctx):
"""List active Stores."""
context = ctx.obj.data_context
usage_event_end: str = ctx.obj.usage_event_end
try:
stores = context.list_active_stores()
cli_message(f"{len(stores)} active Stores found:")
for store in stores:
cli_message("")
cli_message_dict(store)
toolkit.send_usage_message(
data_context=context, event=usage_event_end, success=True
)
except Exception as e:
toolkit.exit_with_failure_message_and_stats(
context=context,
usage_event=usage_event_end,
message=f"<red>{e}</red>",
)
return | [
"def",
"store_list",
"(",
"ctx",
")",
":",
"context",
"=",
"ctx",
".",
"obj",
".",
"data_context",
"usage_event_end",
":",
"str",
"=",
"ctx",
".",
"obj",
".",
"usage_event_end",
"try",
":",
"stores",
"=",
"context",
".",
"list_active_stores",
"(",
")",
"cli_message",
"(",
"f\"{len(stores)} active Stores found:\"",
")",
"for",
"store",
"in",
"stores",
":",
"cli_message",
"(",
"\"\"",
")",
"cli_message_dict",
"(",
"store",
")",
"toolkit",
".",
"send_usage_message",
"(",
"data_context",
"=",
"context",
",",
"event",
"=",
"usage_event_end",
",",
"success",
"=",
"True",
")",
"except",
"Exception",
"as",
"e",
":",
"toolkit",
".",
"exit_with_failure_message_and_stats",
"(",
"context",
"=",
"context",
",",
"usage_event",
"=",
"usage_event_end",
",",
"message",
"=",
"f\"<red>{e}</red>\"",
",",
")",
"return"
] | [
32,
0
] | [
52,
14
] | python | en | ['es', 'en', 'en'] | True |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.