Search is not available for this dataset
identifier
stringlengths 1
155
| parameters
stringlengths 2
6.09k
| docstring
stringlengths 11
63.4k
| docstring_summary
stringlengths 0
63.4k
| function
stringlengths 29
99.8k
| function_tokens
sequence | start_point
sequence | end_point
sequence | language
stringclasses 1
value | docstring_language
stringlengths 2
7
| docstring_language_predictions
stringlengths 18
23
| is_langid_reliable
stringclasses 2
values |
---|---|---|---|---|---|---|---|---|---|---|---|
UnboundedQueue.get_batch_nowait | (self) | Attempt to get the next batch from the queue, without blocking.
Returns:
list: A list of dequeued items, in order. On a successful call this
list is always non-empty; if it would be empty we raise
:exc:`~trio.WouldBlock` instead.
Raises:
~trio.WouldBlock: if the queue is empty.
| Attempt to get the next batch from the queue, without blocking. | def get_batch_nowait(self):
"""Attempt to get the next batch from the queue, without blocking.
Returns:
list: A list of dequeued items, in order. On a successful call this
list is always non-empty; if it would be empty we raise
:exc:`~trio.WouldBlock` instead.
Raises:
~trio.WouldBlock: if the queue is empty.
"""
if not self._can_get:
raise _core.WouldBlock
return self._get_batch_protected() | [
"def",
"get_batch_nowait",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_can_get",
":",
"raise",
"_core",
".",
"WouldBlock",
"return",
"self",
".",
"_get_batch_protected",
"(",
")"
] | [
96,
4
] | [
110,
42
] | python | en | ['en', 'en', 'en'] | True |
UnboundedQueue.get_batch | (self) | Get the next batch from the queue, blocking as necessary.
Returns:
list: A list of dequeued items, in order. This list is always
non-empty.
| Get the next batch from the queue, blocking as necessary. | async def get_batch(self):
"""Get the next batch from the queue, blocking as necessary.
Returns:
list: A list of dequeued items, in order. This list is always
non-empty.
"""
await _core.checkpoint_if_cancelled()
if not self._can_get:
await self._lot.park()
return self._get_batch_protected()
else:
try:
return self._get_batch_protected()
finally:
await _core.cancel_shielded_checkpoint() | [
"async",
"def",
"get_batch",
"(",
"self",
")",
":",
"await",
"_core",
".",
"checkpoint_if_cancelled",
"(",
")",
"if",
"not",
"self",
".",
"_can_get",
":",
"await",
"self",
".",
"_lot",
".",
"park",
"(",
")",
"return",
"self",
".",
"_get_batch_protected",
"(",
")",
"else",
":",
"try",
":",
"return",
"self",
".",
"_get_batch_protected",
"(",
")",
"finally",
":",
"await",
"_core",
".",
"cancel_shielded_checkpoint",
"(",
")"
] | [
112,
4
] | [
128,
56
] | python | en | ['en', 'en', 'en'] | True |
UnboundedQueue.statistics | (self) | Return an object containing debugging information.
Currently the following fields are defined:
* ``qsize``: The number of items currently in the queue.
* ``tasks_waiting``: The number of tasks blocked on this queue's
:meth:`get_batch` method.
| Return an object containing debugging information. | def statistics(self):
"""Return an object containing debugging information.
Currently the following fields are defined:
* ``qsize``: The number of items currently in the queue.
* ``tasks_waiting``: The number of tasks blocked on this queue's
:meth:`get_batch` method.
"""
return _UnboundedQueueStats(
qsize=len(self._data), tasks_waiting=self._lot.statistics().tasks_waiting
) | [
"def",
"statistics",
"(",
"self",
")",
":",
"return",
"_UnboundedQueueStats",
"(",
"qsize",
"=",
"len",
"(",
"self",
".",
"_data",
")",
",",
"tasks_waiting",
"=",
"self",
".",
"_lot",
".",
"statistics",
"(",
")",
".",
"tasks_waiting",
")"
] | [
130,
4
] | [
142,
9
] | python | en | ['id', 'en', 'en'] | True |
IRCORS._cors_normalize | (value: Any) |
List values get turned into a comma-separated string. Other values
are returned unaltered.
|
List values get turned into a comma-separated string. Other values
are returned unaltered.
| def _cors_normalize(value: Any) -> Any:
"""
List values get turned into a comma-separated string. Other values
are returned unaltered.
"""
if type(value) == list:
return ", ".join([ str(x) for x in value ])
else:
return value | [
"def",
"_cors_normalize",
"(",
"value",
":",
"Any",
")",
"->",
"Any",
":",
"if",
"type",
"(",
"value",
")",
"==",
"list",
":",
"return",
"\", \"",
".",
"join",
"(",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"value",
"]",
")",
"else",
":",
"return",
"value"
] | [
68,
4
] | [
77,
24
] | python | en | ['en', 'error', 'th'] | False |
clean_text | (text) |
Preprocess text for NLP applications: Lowercase, remove markups
:param text: raw text
:return: processed tokenized text
|
Preprocess text for NLP applications: Lowercase, remove markups
:param text: raw text
:return: processed tokenized text
| def clean_text(text):
"""
Preprocess text for NLP applications: Lowercase, remove markups
:param text: raw text
:return: processed tokenized text
"""
# Lowercase text
text = text.lower()
# Remove carriage returns and new lines
text = re.sub('\r?\n|\r', ' ', text)
# Remove Trademarks, Copyright, and Registered
text = re.sub('(™|®|©|™|®|©|™|®|©)', '', text)
return text | [
"def",
"clean_text",
"(",
"text",
")",
":",
"# Lowercase text",
"text",
"=",
"text",
".",
"lower",
"(",
")",
"# Remove carriage returns and new lines",
"text",
"=",
"re",
".",
"sub",
"(",
"'\\r?\\n|\\r'",
",",
"' '",
",",
"text",
")",
"# Remove Trademarks, Copyright, and Registered",
"text",
"=",
"re",
".",
"sub",
"(",
"'(™|®|©|™|®|©|™|®|©)', ''",
",",
"te",
"x",
")",
"",
"return",
"text"
] | [
19,
0
] | [
34,
15
] | python | en | ['en', 'error', 'th'] | False |
remove_custom_stopwords | (text) |
Removes custom stopwords from text
:param text:
:return:
|
Removes custom stopwords from text
:param text:
:return:
| def remove_custom_stopwords(text):
"""
Removes custom stopwords from text
:param text:
:return:
"""
tokens = spacy_tokenization(text)
tokens_no_stop = [token for token in tokens if token not in custom_stop_words]
return ' '.join(tokens_no_stop) | [
"def",
"remove_custom_stopwords",
"(",
"text",
")",
":",
"tokens",
"=",
"spacy_tokenization",
"(",
"text",
")",
"tokens_no_stop",
"=",
"[",
"token",
"for",
"token",
"in",
"tokens",
"if",
"token",
"not",
"in",
"custom_stop_words",
"]",
"return",
"' '",
".",
"join",
"(",
"tokens_no_stop",
")"
] | [
37,
0
] | [
48,
35
] | python | en | ['en', 'error', 'th'] | False |
spacy_tokenization | (text) |
Leverage SpaCy for tokenization
:param text:
:return:
|
Leverage SpaCy for tokenization
:param text:
:return:
| def spacy_tokenization(text):
"""
Leverage SpaCy for tokenization
:param text:
:return:
"""
doc = nlp(text)
tokens = []
for token in doc:
if not token.is_punct:
tokens.append(token.text)
return tokens | [
"def",
"spacy_tokenization",
"(",
"text",
")",
":",
"doc",
"=",
"nlp",
"(",
"text",
")",
"tokens",
"=",
"[",
"]",
"for",
"token",
"in",
"doc",
":",
"if",
"not",
"token",
".",
"is_punct",
":",
"tokens",
".",
"append",
"(",
"token",
".",
"text",
")",
"return",
"tokens"
] | [
51,
0
] | [
66,
17
] | python | en | ['en', 'error', 'th'] | False |
spacy_embedding | (iterable_of_text, nlp=nlp) |
Embed text with SpaCy
:param iterable_of_text:
:param nlp: SpaCy Model, default to en_core_web_lg
:return: Mapped vectors from SpaCy simplex
|
Embed text with SpaCy
:param iterable_of_text:
:param nlp: SpaCy Model, default to en_core_web_lg
:return: Mapped vectors from SpaCy simplex
| def spacy_embedding(iterable_of_text, nlp=nlp):
"""
Embed text with SpaCy
:param iterable_of_text:
:param nlp: SpaCy Model, default to en_core_web_lg
:return: Mapped vectors from SpaCy simplex
"""
counter = 0
embeddings = []
for iterable in iterable_of_text:
embeddings.append(nlp(iterable).vector)
counter += 1
if counter % 100 == 0:
print(counter)
result = np.stack(embeddings, axis=0)
return result | [
"def",
"spacy_embedding",
"(",
"iterable_of_text",
",",
"nlp",
"=",
"nlp",
")",
":",
"counter",
"=",
"0",
"embeddings",
"=",
"[",
"]",
"for",
"iterable",
"in",
"iterable_of_text",
":",
"embeddings",
".",
"append",
"(",
"nlp",
"(",
"iterable",
")",
".",
"vector",
")",
"counter",
"+=",
"1",
"if",
"counter",
"%",
"100",
"==",
"0",
":",
"print",
"(",
"counter",
")",
"result",
"=",
"np",
".",
"stack",
"(",
"embeddings",
",",
"axis",
"=",
"0",
")",
"return",
"result"
] | [
69,
0
] | [
90,
17
] | python | en | ['en', 'error', 'th'] | False |
test_golden_path_sql_datasource_configuration | (
mock_emit, empty_data_context_stats_enabled, sa, test_connectable_postgresql_db
) | Tests the golden path for setting up a StreamlinedSQLDatasource using test_yaml_config | Tests the golden path for setting up a StreamlinedSQLDatasource using test_yaml_config | def test_golden_path_sql_datasource_configuration(
mock_emit, empty_data_context_stats_enabled, sa, test_connectable_postgresql_db
):
"""Tests the golden path for setting up a StreamlinedSQLDatasource using test_yaml_config"""
context: DataContext = empty_data_context_stats_enabled
os.chdir(context.root_directory)
# Everything below this line (except for asserts) is what we expect users to run as part of the golden path.
import great_expectations as ge
context = ge.get_context()
db_hostname = os.getenv("GE_TEST_LOCAL_DB_HOSTNAME", "localhost")
yaml_config = f"""
class_name: SimpleSqlalchemyDatasource
credentials:
drivername: postgresql
username: postgres
password: ""
host: {db_hostname}
port: 5432
database: test_ci
introspection:
whole_table_with_limits:
sampling_method: _sample_using_limit
sampling_kwargs:
n: 10
"""
# noinspection PyUnusedLocal
report_object = context.test_yaml_config(
name="my_datasource",
yaml_config=yaml_config,
return_mode="report_object",
)
assert mock_emit.call_count == 2
# Substitute anonymized names since it changes for each run
anonymized_datasource_name = mock_emit.call_args_list[1][0][0]["event_payload"][
"anonymized_name"
]
anonymized_data_connector_name = mock_emit.call_args_list[1][0][0]["event_payload"][
"anonymized_data_connectors"
][0]["anonymized_name"]
expected_call_args_list = [
mock.call(
{"event_payload": {}, "event": "data_context.__init__", "success": True}
),
mock.call(
{
"event": "data_context.test_yaml_config",
"event_payload": {
"anonymized_name": anonymized_datasource_name,
"parent_class": "SimpleSqlalchemyDatasource",
"anonymized_execution_engine": {
"parent_class": "SqlAlchemyExecutionEngine"
},
"anonymized_data_connectors": [
{
"anonymized_name": anonymized_data_connector_name,
"parent_class": "InferredAssetSqlDataConnector",
}
],
},
"success": True,
}
),
]
assert mock_emit.call_args_list == expected_call_args_list
print(json.dumps(report_object, indent=2))
print(context.datasources)
my_batch = context.get_batch(
"my_datasource",
"whole_table_with_limits",
"test_df",
)
# assert len(my_batch.data.fetchall()) == 10
with pytest.raises(KeyError):
my_batch = context.get_batch(
"my_datasource",
"whole_table_with_limits",
"DOES_NOT_EXIST",
)
my_validator = context.get_validator(
datasource_name="my_datasource",
data_connector_name="whole_table_with_limits",
data_asset_name="test_df",
expectation_suite=ExpectationSuite("my_expectation_suite"),
)
my_evr = my_validator.expect_table_columns_to_match_set(column_set=[])
print(my_evr) | [
"def",
"test_golden_path_sql_datasource_configuration",
"(",
"mock_emit",
",",
"empty_data_context_stats_enabled",
",",
"sa",
",",
"test_connectable_postgresql_db",
")",
":",
"context",
":",
"DataContext",
"=",
"empty_data_context_stats_enabled",
"os",
".",
"chdir",
"(",
"context",
".",
"root_directory",
")",
"# Everything below this line (except for asserts) is what we expect users to run as part of the golden path.",
"import",
"great_expectations",
"as",
"ge",
"context",
"=",
"ge",
".",
"get_context",
"(",
")",
"db_hostname",
"=",
"os",
".",
"getenv",
"(",
"\"GE_TEST_LOCAL_DB_HOSTNAME\"",
",",
"\"localhost\"",
")",
"yaml_config",
"=",
"f\"\"\"\nclass_name: SimpleSqlalchemyDatasource\ncredentials:\n drivername: postgresql\n username: postgres\n password: \"\"\n host: {db_hostname}\n port: 5432\n database: test_ci\n\nintrospection:\n whole_table_with_limits:\n sampling_method: _sample_using_limit\n sampling_kwargs:\n n: 10\n\"\"\"",
"# noinspection PyUnusedLocal",
"report_object",
"=",
"context",
".",
"test_yaml_config",
"(",
"name",
"=",
"\"my_datasource\"",
",",
"yaml_config",
"=",
"yaml_config",
",",
"return_mode",
"=",
"\"report_object\"",
",",
")",
"assert",
"mock_emit",
".",
"call_count",
"==",
"2",
"# Substitute anonymized names since it changes for each run",
"anonymized_datasource_name",
"=",
"mock_emit",
".",
"call_args_list",
"[",
"1",
"]",
"[",
"0",
"]",
"[",
"0",
"]",
"[",
"\"event_payload\"",
"]",
"[",
"\"anonymized_name\"",
"]",
"anonymized_data_connector_name",
"=",
"mock_emit",
".",
"call_args_list",
"[",
"1",
"]",
"[",
"0",
"]",
"[",
"0",
"]",
"[",
"\"event_payload\"",
"]",
"[",
"\"anonymized_data_connectors\"",
"]",
"[",
"0",
"]",
"[",
"\"anonymized_name\"",
"]",
"expected_call_args_list",
"=",
"[",
"mock",
".",
"call",
"(",
"{",
"\"event_payload\"",
":",
"{",
"}",
",",
"\"event\"",
":",
"\"data_context.__init__\"",
",",
"\"success\"",
":",
"True",
"}",
")",
",",
"mock",
".",
"call",
"(",
"{",
"\"event\"",
":",
"\"data_context.test_yaml_config\"",
",",
"\"event_payload\"",
":",
"{",
"\"anonymized_name\"",
":",
"anonymized_datasource_name",
",",
"\"parent_class\"",
":",
"\"SimpleSqlalchemyDatasource\"",
",",
"\"anonymized_execution_engine\"",
":",
"{",
"\"parent_class\"",
":",
"\"SqlAlchemyExecutionEngine\"",
"}",
",",
"\"anonymized_data_connectors\"",
":",
"[",
"{",
"\"anonymized_name\"",
":",
"anonymized_data_connector_name",
",",
"\"parent_class\"",
":",
"\"InferredAssetSqlDataConnector\"",
",",
"}",
"]",
",",
"}",
",",
"\"success\"",
":",
"True",
",",
"}",
")",
",",
"]",
"assert",
"mock_emit",
".",
"call_args_list",
"==",
"expected_call_args_list",
"print",
"(",
"json",
".",
"dumps",
"(",
"report_object",
",",
"indent",
"=",
"2",
")",
")",
"print",
"(",
"context",
".",
"datasources",
")",
"my_batch",
"=",
"context",
".",
"get_batch",
"(",
"\"my_datasource\"",
",",
"\"whole_table_with_limits\"",
",",
"\"test_df\"",
",",
")",
"# assert len(my_batch.data.fetchall()) == 10",
"with",
"pytest",
".",
"raises",
"(",
"KeyError",
")",
":",
"my_batch",
"=",
"context",
".",
"get_batch",
"(",
"\"my_datasource\"",
",",
"\"whole_table_with_limits\"",
",",
"\"DOES_NOT_EXIST\"",
",",
")",
"my_validator",
"=",
"context",
".",
"get_validator",
"(",
"datasource_name",
"=",
"\"my_datasource\"",
",",
"data_connector_name",
"=",
"\"whole_table_with_limits\"",
",",
"data_asset_name",
"=",
"\"test_df\"",
",",
"expectation_suite",
"=",
"ExpectationSuite",
"(",
"\"my_expectation_suite\"",
")",
",",
")",
"my_evr",
"=",
"my_validator",
".",
"expect_table_columns_to_match_set",
"(",
"column_set",
"=",
"[",
"]",
")",
"print",
"(",
"my_evr",
")"
] | [
619,
0
] | [
713,
17
] | python | en | ['en', 'en', 'en'] | True |
test_golden_path_inferred_asset_pandas_datasource_configuration | (
mock_emit, empty_data_context_stats_enabled, test_df, tmp_path_factory
) |
Tests the golden path for InferredAssetFilesystemDataConnector with PandasExecutionEngine using test_yaml_config
|
Tests the golden path for InferredAssetFilesystemDataConnector with PandasExecutionEngine using test_yaml_config
| def test_golden_path_inferred_asset_pandas_datasource_configuration(
mock_emit, empty_data_context_stats_enabled, test_df, tmp_path_factory
):
"""
Tests the golden path for InferredAssetFilesystemDataConnector with PandasExecutionEngine using test_yaml_config
"""
base_directory = str(
tmp_path_factory.mktemp("test_golden_path_pandas_datasource_configuration")
)
create_files_in_directory(
directory=base_directory,
file_name_list=[
"test_dir_charlie/A/A-1.csv",
"test_dir_charlie/A/A-2.csv",
"test_dir_charlie/A/A-3.csv",
"test_dir_charlie/B/B-1.csv",
"test_dir_charlie/B/B-2.csv",
"test_dir_charlie/B/B-3.csv",
"test_dir_charlie/C/C-1.csv",
"test_dir_charlie/C/C-2.csv",
"test_dir_charlie/C/C-3.csv",
"test_dir_charlie/D/D-1.csv",
"test_dir_charlie/D/D-2.csv",
"test_dir_charlie/D/D-3.csv",
],
file_content_fn=lambda: test_df.to_csv(header=True, index=False),
)
context: DataContext = empty_data_context_stats_enabled
os.chdir(context.root_directory)
import great_expectations as ge
context = ge.get_context()
mock_emit.reset_mock() # Remove data_context.__init__ call
yaml_config = f"""
class_name: Datasource
execution_engine:
class_name: PandasExecutionEngine
data_connectors:
my_filesystem_data_connector:
class_name: InferredAssetFilesystemDataConnector
base_directory: {base_directory}/test_dir_charlie
glob_directive: "*/*.csv"
default_regex:
pattern: (.+)/(.+)-(\\d+)\\.csv
group_names:
- subdirectory
- data_asset_name
- number
"""
# noinspection PyUnusedLocal
report_object = context.test_yaml_config(
name="my_directory_datasource",
yaml_config=yaml_config,
return_mode="report_object",
)
# print(json.dumps(report_object, indent=2))
# print(context.datasources)
assert mock_emit.call_count == 1
# Substitute anonymized names since it changes for each run
anonymized_datasource_name = mock_emit.call_args_list[0][0][0]["event_payload"][
"anonymized_name"
]
anonymized_execution_engine_name = mock_emit.call_args_list[0][0][0][
"event_payload"
]["anonymized_execution_engine"]["anonymized_name"]
anonymized_data_connector_name = mock_emit.call_args_list[0][0][0]["event_payload"][
"anonymized_data_connectors"
][0]["anonymized_name"]
expected_call_args_list = [
mock.call(
{
"event": "data_context.test_yaml_config",
"event_payload": {
"anonymized_name": anonymized_datasource_name,
"parent_class": "Datasource",
"anonymized_execution_engine": {
"anonymized_name": anonymized_execution_engine_name,
"parent_class": "PandasExecutionEngine",
},
"anonymized_data_connectors": [
{
"anonymized_name": anonymized_data_connector_name,
"parent_class": "InferredAssetFilesystemDataConnector",
}
],
},
"success": True,
}
),
]
assert mock_emit.call_args_list == expected_call_args_list
my_batch = context.get_batch(
datasource_name="my_directory_datasource",
data_connector_name="my_filesystem_data_connector",
data_asset_name="A",
batch_identifiers={
"number": "2",
},
batch_spec_passthrough={
"sampling_method": "_sample_using_hash",
"sampling_kwargs": {
"column_name": "date",
"hash_function_name": "md5",
"hash_value": "f",
},
},
)
assert my_batch.batch_definition["data_asset_name"] == "A"
df_data = my_batch.data.dataframe
assert df_data.shape == (10, 10)
df_data["date"] = df_data.apply(
lambda row: datetime.datetime.strptime(row["date"], "%Y-%m-%d").date(), axis=1
)
assert (
test_df[
(test_df["date"] == datetime.date(2020, 1, 15))
| (test_df["date"] == datetime.date(2020, 1, 29))
]
.drop("timestamp", axis=1)
.equals(df_data.drop("timestamp", axis=1))
)
with pytest.raises(ValueError):
# noinspection PyUnusedLocal
my_batch = context.get_batch(
datasource_name="my_directory_datasource",
data_connector_name="my_filesystem_data_connector",
data_asset_name="DOES_NOT_EXIST",
)
my_validator = context.get_validator(
datasource_name="my_directory_datasource",
data_connector_name="my_filesystem_data_connector",
data_asset_name="D",
data_connector_query={"batch_filter_parameters": {"number": "3"}},
expectation_suite=ExpectationSuite("my_expectation_suite"),
batch_spec_passthrough={
"sampling_method": "_sample_using_hash",
"sampling_kwargs": {
"column_name": "date",
"hash_function_name": "md5",
"hash_value": "f",
},
},
)
my_evr = my_validator.expect_column_values_to_be_between(
column="d", min_value=1, max_value=31
)
assert my_evr.success
# TODO: <Alex>ALEX</Alex>
# my_evr = my_validator.expect_table_columns_to_match_ordered_list(ordered_list=["x", "y", "z"])
# assert my_evr.success
# No other usage stats calls detected
assert mock_emit.call_count == 1 | [
"def",
"test_golden_path_inferred_asset_pandas_datasource_configuration",
"(",
"mock_emit",
",",
"empty_data_context_stats_enabled",
",",
"test_df",
",",
"tmp_path_factory",
")",
":",
"base_directory",
"=",
"str",
"(",
"tmp_path_factory",
".",
"mktemp",
"(",
"\"test_golden_path_pandas_datasource_configuration\"",
")",
")",
"create_files_in_directory",
"(",
"directory",
"=",
"base_directory",
",",
"file_name_list",
"=",
"[",
"\"test_dir_charlie/A/A-1.csv\"",
",",
"\"test_dir_charlie/A/A-2.csv\"",
",",
"\"test_dir_charlie/A/A-3.csv\"",
",",
"\"test_dir_charlie/B/B-1.csv\"",
",",
"\"test_dir_charlie/B/B-2.csv\"",
",",
"\"test_dir_charlie/B/B-3.csv\"",
",",
"\"test_dir_charlie/C/C-1.csv\"",
",",
"\"test_dir_charlie/C/C-2.csv\"",
",",
"\"test_dir_charlie/C/C-3.csv\"",
",",
"\"test_dir_charlie/D/D-1.csv\"",
",",
"\"test_dir_charlie/D/D-2.csv\"",
",",
"\"test_dir_charlie/D/D-3.csv\"",
",",
"]",
",",
"file_content_fn",
"=",
"lambda",
":",
"test_df",
".",
"to_csv",
"(",
"header",
"=",
"True",
",",
"index",
"=",
"False",
")",
",",
")",
"context",
":",
"DataContext",
"=",
"empty_data_context_stats_enabled",
"os",
".",
"chdir",
"(",
"context",
".",
"root_directory",
")",
"import",
"great_expectations",
"as",
"ge",
"context",
"=",
"ge",
".",
"get_context",
"(",
")",
"mock_emit",
".",
"reset_mock",
"(",
")",
"# Remove data_context.__init__ call",
"yaml_config",
"=",
"f\"\"\"\nclass_name: Datasource\n\nexecution_engine:\n class_name: PandasExecutionEngine\n\ndata_connectors:\n my_filesystem_data_connector:\n class_name: InferredAssetFilesystemDataConnector\n base_directory: {base_directory}/test_dir_charlie\n glob_directive: \"*/*.csv\"\n\n default_regex:\n pattern: (.+)/(.+)-(\\\\d+)\\\\.csv\n group_names:\n - subdirectory\n - data_asset_name\n - number\n\"\"\"",
"# noinspection PyUnusedLocal",
"report_object",
"=",
"context",
".",
"test_yaml_config",
"(",
"name",
"=",
"\"my_directory_datasource\"",
",",
"yaml_config",
"=",
"yaml_config",
",",
"return_mode",
"=",
"\"report_object\"",
",",
")",
"# print(json.dumps(report_object, indent=2))",
"# print(context.datasources)",
"assert",
"mock_emit",
".",
"call_count",
"==",
"1",
"# Substitute anonymized names since it changes for each run",
"anonymized_datasource_name",
"=",
"mock_emit",
".",
"call_args_list",
"[",
"0",
"]",
"[",
"0",
"]",
"[",
"0",
"]",
"[",
"\"event_payload\"",
"]",
"[",
"\"anonymized_name\"",
"]",
"anonymized_execution_engine_name",
"=",
"mock_emit",
".",
"call_args_list",
"[",
"0",
"]",
"[",
"0",
"]",
"[",
"0",
"]",
"[",
"\"event_payload\"",
"]",
"[",
"\"anonymized_execution_engine\"",
"]",
"[",
"\"anonymized_name\"",
"]",
"anonymized_data_connector_name",
"=",
"mock_emit",
".",
"call_args_list",
"[",
"0",
"]",
"[",
"0",
"]",
"[",
"0",
"]",
"[",
"\"event_payload\"",
"]",
"[",
"\"anonymized_data_connectors\"",
"]",
"[",
"0",
"]",
"[",
"\"anonymized_name\"",
"]",
"expected_call_args_list",
"=",
"[",
"mock",
".",
"call",
"(",
"{",
"\"event\"",
":",
"\"data_context.test_yaml_config\"",
",",
"\"event_payload\"",
":",
"{",
"\"anonymized_name\"",
":",
"anonymized_datasource_name",
",",
"\"parent_class\"",
":",
"\"Datasource\"",
",",
"\"anonymized_execution_engine\"",
":",
"{",
"\"anonymized_name\"",
":",
"anonymized_execution_engine_name",
",",
"\"parent_class\"",
":",
"\"PandasExecutionEngine\"",
",",
"}",
",",
"\"anonymized_data_connectors\"",
":",
"[",
"{",
"\"anonymized_name\"",
":",
"anonymized_data_connector_name",
",",
"\"parent_class\"",
":",
"\"InferredAssetFilesystemDataConnector\"",
",",
"}",
"]",
",",
"}",
",",
"\"success\"",
":",
"True",
",",
"}",
")",
",",
"]",
"assert",
"mock_emit",
".",
"call_args_list",
"==",
"expected_call_args_list",
"my_batch",
"=",
"context",
".",
"get_batch",
"(",
"datasource_name",
"=",
"\"my_directory_datasource\"",
",",
"data_connector_name",
"=",
"\"my_filesystem_data_connector\"",
",",
"data_asset_name",
"=",
"\"A\"",
",",
"batch_identifiers",
"=",
"{",
"\"number\"",
":",
"\"2\"",
",",
"}",
",",
"batch_spec_passthrough",
"=",
"{",
"\"sampling_method\"",
":",
"\"_sample_using_hash\"",
",",
"\"sampling_kwargs\"",
":",
"{",
"\"column_name\"",
":",
"\"date\"",
",",
"\"hash_function_name\"",
":",
"\"md5\"",
",",
"\"hash_value\"",
":",
"\"f\"",
",",
"}",
",",
"}",
",",
")",
"assert",
"my_batch",
".",
"batch_definition",
"[",
"\"data_asset_name\"",
"]",
"==",
"\"A\"",
"df_data",
"=",
"my_batch",
".",
"data",
".",
"dataframe",
"assert",
"df_data",
".",
"shape",
"==",
"(",
"10",
",",
"10",
")",
"df_data",
"[",
"\"date\"",
"]",
"=",
"df_data",
".",
"apply",
"(",
"lambda",
"row",
":",
"datetime",
".",
"datetime",
".",
"strptime",
"(",
"row",
"[",
"\"date\"",
"]",
",",
"\"%Y-%m-%d\"",
")",
".",
"date",
"(",
")",
",",
"axis",
"=",
"1",
")",
"assert",
"(",
"test_df",
"[",
"(",
"test_df",
"[",
"\"date\"",
"]",
"==",
"datetime",
".",
"date",
"(",
"2020",
",",
"1",
",",
"15",
")",
")",
"|",
"(",
"test_df",
"[",
"\"date\"",
"]",
"==",
"datetime",
".",
"date",
"(",
"2020",
",",
"1",
",",
"29",
")",
")",
"]",
".",
"drop",
"(",
"\"timestamp\"",
",",
"axis",
"=",
"1",
")",
".",
"equals",
"(",
"df_data",
".",
"drop",
"(",
"\"timestamp\"",
",",
"axis",
"=",
"1",
")",
")",
")",
"with",
"pytest",
".",
"raises",
"(",
"ValueError",
")",
":",
"# noinspection PyUnusedLocal",
"my_batch",
"=",
"context",
".",
"get_batch",
"(",
"datasource_name",
"=",
"\"my_directory_datasource\"",
",",
"data_connector_name",
"=",
"\"my_filesystem_data_connector\"",
",",
"data_asset_name",
"=",
"\"DOES_NOT_EXIST\"",
",",
")",
"my_validator",
"=",
"context",
".",
"get_validator",
"(",
"datasource_name",
"=",
"\"my_directory_datasource\"",
",",
"data_connector_name",
"=",
"\"my_filesystem_data_connector\"",
",",
"data_asset_name",
"=",
"\"D\"",
",",
"data_connector_query",
"=",
"{",
"\"batch_filter_parameters\"",
":",
"{",
"\"number\"",
":",
"\"3\"",
"}",
"}",
",",
"expectation_suite",
"=",
"ExpectationSuite",
"(",
"\"my_expectation_suite\"",
")",
",",
"batch_spec_passthrough",
"=",
"{",
"\"sampling_method\"",
":",
"\"_sample_using_hash\"",
",",
"\"sampling_kwargs\"",
":",
"{",
"\"column_name\"",
":",
"\"date\"",
",",
"\"hash_function_name\"",
":",
"\"md5\"",
",",
"\"hash_value\"",
":",
"\"f\"",
",",
"}",
",",
"}",
",",
")",
"my_evr",
"=",
"my_validator",
".",
"expect_column_values_to_be_between",
"(",
"column",
"=",
"\"d\"",
",",
"min_value",
"=",
"1",
",",
"max_value",
"=",
"31",
")",
"assert",
"my_evr",
".",
"success",
"# TODO: <Alex>ALEX</Alex>",
"# my_evr = my_validator.expect_table_columns_to_match_ordered_list(ordered_list=[\"x\", \"y\", \"z\"])",
"# assert my_evr.success",
"# No other usage stats calls detected",
"assert",
"mock_emit",
".",
"call_count",
"==",
"1"
] | [
730,
0
] | [
895,
36
] | python | en | ['en', 'error', 'th'] | False |
test_golden_path_configured_asset_pandas_datasource_configuration | (
mock_emit, empty_data_context_stats_enabled, test_df, tmp_path_factory
) |
Tests the golden path for InferredAssetFilesystemDataConnector with PandasExecutionEngine using test_yaml_config
|
Tests the golden path for InferredAssetFilesystemDataConnector with PandasExecutionEngine using test_yaml_config
| def test_golden_path_configured_asset_pandas_datasource_configuration(
mock_emit, empty_data_context_stats_enabled, test_df, tmp_path_factory
):
"""
Tests the golden path for InferredAssetFilesystemDataConnector with PandasExecutionEngine using test_yaml_config
"""
base_directory = str(
tmp_path_factory.mktemp("test_golden_path_pandas_datasource_configuration")
)
create_files_in_directory(
directory=base_directory,
file_name_list=[
"test_dir_foxtrot/A/A-1.csv",
"test_dir_foxtrot/A/A-2.csv",
"test_dir_foxtrot/A/A-3.csv",
"test_dir_foxtrot/B/B-1.txt",
"test_dir_foxtrot/B/B-2.txt",
"test_dir_foxtrot/B/B-3.txt",
"test_dir_foxtrot/C/C-2017.csv",
"test_dir_foxtrot/C/C-2018.csv",
"test_dir_foxtrot/C/C-2019.csv",
"test_dir_foxtrot/D/D-aaa.csv",
"test_dir_foxtrot/D/D-bbb.csv",
"test_dir_foxtrot/D/D-ccc.csv",
"test_dir_foxtrot/D/D-ddd.csv",
"test_dir_foxtrot/D/D-eee.csv",
],
file_content_fn=lambda: test_df.to_csv(header=True, index=False),
)
context: DataContext = empty_data_context_stats_enabled
os.chdir(context.root_directory)
import great_expectations as ge
context = ge.get_context()
mock_emit.reset_mock() # Remove data_context.__init__ call
yaml_config = f"""
class_name: Datasource
execution_engine:
class_name: PandasExecutionEngine
data_connectors:
my_filesystem_data_connector:
class_name: ConfiguredAssetFilesystemDataConnector
base_directory: {base_directory}
# glob_directive: "*"
default_regex:
pattern: (.+)\\.csv
group_names:
- alphanumeric
assets:
A:
base_directory: {base_directory}/test_dir_foxtrot/A
pattern: (.+)-(\\d+)\\.csv
group_names:
- letter
- number
B:
base_directory: {base_directory}/test_dir_foxtrot/B
pattern: (.+)-(\\d+)\\.csv
group_names:
- letter
- number
C:
base_directory: {base_directory}/test_dir_foxtrot/C
pattern: (.+)-(\\d+)\\.csv
group_names:
- letter
- year
D:
base_directory: {base_directory}/test_dir_foxtrot/D
pattern: (.+)-(\\d+)\\.csv
group_names:
- letter
- checksum
"""
# noinspection PyUnusedLocal
report_object = context.test_yaml_config(
name="my_directory_datasource",
yaml_config=yaml_config,
return_mode="report_object",
)
# print(json.dumps(report_object, indent=2))
# print(context.datasources)
assert mock_emit.call_count == 1
# Substitute anonymized names since it changes for each run
anonymized_datasource_name = mock_emit.call_args_list[0][0][0]["event_payload"][
"anonymized_name"
]
anonymized_execution_engine_name = mock_emit.call_args_list[0][0][0][
"event_payload"
]["anonymized_execution_engine"]["anonymized_name"]
anonymized_data_connector_name = mock_emit.call_args_list[0][0][0]["event_payload"][
"anonymized_data_connectors"
][0]["anonymized_name"]
expected_call_args_list = [
mock.call(
{
"event": "data_context.test_yaml_config",
"event_payload": {
"anonymized_name": anonymized_datasource_name,
"parent_class": "Datasource",
"anonymized_execution_engine": {
"anonymized_name": anonymized_execution_engine_name,
"parent_class": "PandasExecutionEngine",
},
"anonymized_data_connectors": [
{
"anonymized_name": anonymized_data_connector_name,
"parent_class": "ConfiguredAssetFilesystemDataConnector",
}
],
},
"success": True,
}
),
]
assert mock_emit.call_args_list == expected_call_args_list
my_batch = context.get_batch(
datasource_name="my_directory_datasource",
data_connector_name="my_filesystem_data_connector",
data_asset_name="A",
batch_identifiers={
"number": "2",
},
batch_spec_passthrough={
"sampling_method": "_sample_using_hash",
"sampling_kwargs": {
"column_name": "date",
"hash_function_name": "md5",
"hash_value": "f",
},
},
)
assert my_batch.batch_definition["data_asset_name"] == "A"
my_batch.head()
df_data = my_batch.data.dataframe
assert df_data.shape == (10, 10)
df_data["date"] = df_data.apply(
lambda row: datetime.datetime.strptime(row["date"], "%Y-%m-%d").date(), axis=1
)
assert (
test_df[
(test_df["date"] == datetime.date(2020, 1, 15))
| (test_df["date"] == datetime.date(2020, 1, 29))
]
.drop("timestamp", axis=1)
.equals(df_data.drop("timestamp", axis=1))
)
with pytest.raises(ValueError):
# noinspection PyUnusedLocal
my_batch = context.get_batch(
datasource_name="my_directory_datasource",
data_connector_name="my_filesystem_data_connector",
data_asset_name="DOES_NOT_EXIST",
)
my_validator = context.get_validator(
datasource_name="my_directory_datasource",
data_connector_name="my_filesystem_data_connector",
data_asset_name="C",
data_connector_query={"batch_filter_parameters": {"year": "2019"}},
create_expectation_suite_with_name="my_expectations",
batch_spec_passthrough={
"sampling_method": "_sample_using_hash",
"sampling_kwargs": {
"column_name": "date",
"hash_function_name": "md5",
"hash_value": "f",
},
},
)
my_evr = my_validator.expect_column_values_to_be_between(
column="d", min_value=1, max_value=31
)
assert my_evr.success
# my_evr = my_validator.expect_table_columns_to_match_ordered_list(ordered_list=["x", "y", "z"])
# assert my_evr.success
# No other usage stats calls detected
assert mock_emit.call_count == 1 | [
"def",
"test_golden_path_configured_asset_pandas_datasource_configuration",
"(",
"mock_emit",
",",
"empty_data_context_stats_enabled",
",",
"test_df",
",",
"tmp_path_factory",
")",
":",
"base_directory",
"=",
"str",
"(",
"tmp_path_factory",
".",
"mktemp",
"(",
"\"test_golden_path_pandas_datasource_configuration\"",
")",
")",
"create_files_in_directory",
"(",
"directory",
"=",
"base_directory",
",",
"file_name_list",
"=",
"[",
"\"test_dir_foxtrot/A/A-1.csv\"",
",",
"\"test_dir_foxtrot/A/A-2.csv\"",
",",
"\"test_dir_foxtrot/A/A-3.csv\"",
",",
"\"test_dir_foxtrot/B/B-1.txt\"",
",",
"\"test_dir_foxtrot/B/B-2.txt\"",
",",
"\"test_dir_foxtrot/B/B-3.txt\"",
",",
"\"test_dir_foxtrot/C/C-2017.csv\"",
",",
"\"test_dir_foxtrot/C/C-2018.csv\"",
",",
"\"test_dir_foxtrot/C/C-2019.csv\"",
",",
"\"test_dir_foxtrot/D/D-aaa.csv\"",
",",
"\"test_dir_foxtrot/D/D-bbb.csv\"",
",",
"\"test_dir_foxtrot/D/D-ccc.csv\"",
",",
"\"test_dir_foxtrot/D/D-ddd.csv\"",
",",
"\"test_dir_foxtrot/D/D-eee.csv\"",
",",
"]",
",",
"file_content_fn",
"=",
"lambda",
":",
"test_df",
".",
"to_csv",
"(",
"header",
"=",
"True",
",",
"index",
"=",
"False",
")",
",",
")",
"context",
":",
"DataContext",
"=",
"empty_data_context_stats_enabled",
"os",
".",
"chdir",
"(",
"context",
".",
"root_directory",
")",
"import",
"great_expectations",
"as",
"ge",
"context",
"=",
"ge",
".",
"get_context",
"(",
")",
"mock_emit",
".",
"reset_mock",
"(",
")",
"# Remove data_context.__init__ call",
"yaml_config",
"=",
"f\"\"\"\nclass_name: Datasource\n\nexecution_engine:\n class_name: PandasExecutionEngine\n\ndata_connectors:\n my_filesystem_data_connector:\n class_name: ConfiguredAssetFilesystemDataConnector\n base_directory: {base_directory}\n # glob_directive: \"*\"\n\n default_regex:\n pattern: (.+)\\\\.csv\n group_names:\n - alphanumeric\n\n assets:\n A:\n base_directory: {base_directory}/test_dir_foxtrot/A\n pattern: (.+)-(\\\\d+)\\\\.csv\n group_names:\n - letter\n - number\n B:\n base_directory: {base_directory}/test_dir_foxtrot/B\n pattern: (.+)-(\\\\d+)\\\\.csv\n group_names:\n - letter\n - number\n C:\n base_directory: {base_directory}/test_dir_foxtrot/C\n pattern: (.+)-(\\\\d+)\\\\.csv\n group_names:\n - letter\n - year\n D:\n base_directory: {base_directory}/test_dir_foxtrot/D\n pattern: (.+)-(\\\\d+)\\\\.csv\n group_names:\n - letter\n - checksum\n\"\"\"",
"# noinspection PyUnusedLocal",
"report_object",
"=",
"context",
".",
"test_yaml_config",
"(",
"name",
"=",
"\"my_directory_datasource\"",
",",
"yaml_config",
"=",
"yaml_config",
",",
"return_mode",
"=",
"\"report_object\"",
",",
")",
"# print(json.dumps(report_object, indent=2))",
"# print(context.datasources)",
"assert",
"mock_emit",
".",
"call_count",
"==",
"1",
"# Substitute anonymized names since it changes for each run",
"anonymized_datasource_name",
"=",
"mock_emit",
".",
"call_args_list",
"[",
"0",
"]",
"[",
"0",
"]",
"[",
"0",
"]",
"[",
"\"event_payload\"",
"]",
"[",
"\"anonymized_name\"",
"]",
"anonymized_execution_engine_name",
"=",
"mock_emit",
".",
"call_args_list",
"[",
"0",
"]",
"[",
"0",
"]",
"[",
"0",
"]",
"[",
"\"event_payload\"",
"]",
"[",
"\"anonymized_execution_engine\"",
"]",
"[",
"\"anonymized_name\"",
"]",
"anonymized_data_connector_name",
"=",
"mock_emit",
".",
"call_args_list",
"[",
"0",
"]",
"[",
"0",
"]",
"[",
"0",
"]",
"[",
"\"event_payload\"",
"]",
"[",
"\"anonymized_data_connectors\"",
"]",
"[",
"0",
"]",
"[",
"\"anonymized_name\"",
"]",
"expected_call_args_list",
"=",
"[",
"mock",
".",
"call",
"(",
"{",
"\"event\"",
":",
"\"data_context.test_yaml_config\"",
",",
"\"event_payload\"",
":",
"{",
"\"anonymized_name\"",
":",
"anonymized_datasource_name",
",",
"\"parent_class\"",
":",
"\"Datasource\"",
",",
"\"anonymized_execution_engine\"",
":",
"{",
"\"anonymized_name\"",
":",
"anonymized_execution_engine_name",
",",
"\"parent_class\"",
":",
"\"PandasExecutionEngine\"",
",",
"}",
",",
"\"anonymized_data_connectors\"",
":",
"[",
"{",
"\"anonymized_name\"",
":",
"anonymized_data_connector_name",
",",
"\"parent_class\"",
":",
"\"ConfiguredAssetFilesystemDataConnector\"",
",",
"}",
"]",
",",
"}",
",",
"\"success\"",
":",
"True",
",",
"}",
")",
",",
"]",
"assert",
"mock_emit",
".",
"call_args_list",
"==",
"expected_call_args_list",
"my_batch",
"=",
"context",
".",
"get_batch",
"(",
"datasource_name",
"=",
"\"my_directory_datasource\"",
",",
"data_connector_name",
"=",
"\"my_filesystem_data_connector\"",
",",
"data_asset_name",
"=",
"\"A\"",
",",
"batch_identifiers",
"=",
"{",
"\"number\"",
":",
"\"2\"",
",",
"}",
",",
"batch_spec_passthrough",
"=",
"{",
"\"sampling_method\"",
":",
"\"_sample_using_hash\"",
",",
"\"sampling_kwargs\"",
":",
"{",
"\"column_name\"",
":",
"\"date\"",
",",
"\"hash_function_name\"",
":",
"\"md5\"",
",",
"\"hash_value\"",
":",
"\"f\"",
",",
"}",
",",
"}",
",",
")",
"assert",
"my_batch",
".",
"batch_definition",
"[",
"\"data_asset_name\"",
"]",
"==",
"\"A\"",
"my_batch",
".",
"head",
"(",
")",
"df_data",
"=",
"my_batch",
".",
"data",
".",
"dataframe",
"assert",
"df_data",
".",
"shape",
"==",
"(",
"10",
",",
"10",
")",
"df_data",
"[",
"\"date\"",
"]",
"=",
"df_data",
".",
"apply",
"(",
"lambda",
"row",
":",
"datetime",
".",
"datetime",
".",
"strptime",
"(",
"row",
"[",
"\"date\"",
"]",
",",
"\"%Y-%m-%d\"",
")",
".",
"date",
"(",
")",
",",
"axis",
"=",
"1",
")",
"assert",
"(",
"test_df",
"[",
"(",
"test_df",
"[",
"\"date\"",
"]",
"==",
"datetime",
".",
"date",
"(",
"2020",
",",
"1",
",",
"15",
")",
")",
"|",
"(",
"test_df",
"[",
"\"date\"",
"]",
"==",
"datetime",
".",
"date",
"(",
"2020",
",",
"1",
",",
"29",
")",
")",
"]",
".",
"drop",
"(",
"\"timestamp\"",
",",
"axis",
"=",
"1",
")",
".",
"equals",
"(",
"df_data",
".",
"drop",
"(",
"\"timestamp\"",
",",
"axis",
"=",
"1",
")",
")",
")",
"with",
"pytest",
".",
"raises",
"(",
"ValueError",
")",
":",
"# noinspection PyUnusedLocal",
"my_batch",
"=",
"context",
".",
"get_batch",
"(",
"datasource_name",
"=",
"\"my_directory_datasource\"",
",",
"data_connector_name",
"=",
"\"my_filesystem_data_connector\"",
",",
"data_asset_name",
"=",
"\"DOES_NOT_EXIST\"",
",",
")",
"my_validator",
"=",
"context",
".",
"get_validator",
"(",
"datasource_name",
"=",
"\"my_directory_datasource\"",
",",
"data_connector_name",
"=",
"\"my_filesystem_data_connector\"",
",",
"data_asset_name",
"=",
"\"C\"",
",",
"data_connector_query",
"=",
"{",
"\"batch_filter_parameters\"",
":",
"{",
"\"year\"",
":",
"\"2019\"",
"}",
"}",
",",
"create_expectation_suite_with_name",
"=",
"\"my_expectations\"",
",",
"batch_spec_passthrough",
"=",
"{",
"\"sampling_method\"",
":",
"\"_sample_using_hash\"",
",",
"\"sampling_kwargs\"",
":",
"{",
"\"column_name\"",
":",
"\"date\"",
",",
"\"hash_function_name\"",
":",
"\"md5\"",
",",
"\"hash_value\"",
":",
"\"f\"",
",",
"}",
",",
"}",
",",
")",
"my_evr",
"=",
"my_validator",
".",
"expect_column_values_to_be_between",
"(",
"column",
"=",
"\"d\"",
",",
"min_value",
"=",
"1",
",",
"max_value",
"=",
"31",
")",
"assert",
"my_evr",
".",
"success",
"# my_evr = my_validator.expect_table_columns_to_match_ordered_list(ordered_list=[\"x\", \"y\", \"z\"])",
"# assert my_evr.success",
"# No other usage stats calls detected",
"assert",
"mock_emit",
".",
"call_count",
"==",
"1"
] | [
901,
0
] | [
1093,
36
] | python | en | ['en', 'error', 'th'] | False |
is_token_subtype | (ttype, other) |
Return True if ``ttype`` is a subtype of ``other``.
exists for backwards compatibility. use ``ttype in other`` now.
|
Return True if ``ttype`` is a subtype of ``other``. | def is_token_subtype(ttype, other):
"""
Return True if ``ttype`` is a subtype of ``other``.
exists for backwards compatibility. use ``ttype in other`` now.
"""
return ttype in other | [
"def",
"is_token_subtype",
"(",
"ttype",
",",
"other",
")",
":",
"return",
"ttype",
"in",
"other"
] | [
84,
0
] | [
90,
25
] | python | en | ['en', 'error', 'th'] | False |
string_to_tokentype | (s) |
Convert a string into a token type::
>>> string_to_token('String.Double')
Token.Literal.String.Double
>>> string_to_token('Token.Literal.Number')
Token.Literal.Number
>>> string_to_token('')
Token
Tokens that are already tokens are returned unchanged:
>>> string_to_token(String)
Token.Literal.String
|
Convert a string into a token type:: | def string_to_tokentype(s):
"""
Convert a string into a token type::
>>> string_to_token('String.Double')
Token.Literal.String.Double
>>> string_to_token('Token.Literal.Number')
Token.Literal.Number
>>> string_to_token('')
Token
Tokens that are already tokens are returned unchanged:
>>> string_to_token(String)
Token.Literal.String
"""
if isinstance(s, _TokenType):
return s
if not s:
return Token
node = Token
for item in s.split('.'):
node = getattr(node, item)
return node | [
"def",
"string_to_tokentype",
"(",
"s",
")",
":",
"if",
"isinstance",
"(",
"s",
",",
"_TokenType",
")",
":",
"return",
"s",
"if",
"not",
"s",
":",
"return",
"Token",
"node",
"=",
"Token",
"for",
"item",
"in",
"s",
".",
"split",
"(",
"'.'",
")",
":",
"node",
"=",
"getattr",
"(",
"node",
",",
"item",
")",
"return",
"node"
] | [
93,
0
] | [
116,
15
] | python | en | ['en', 'error', 'th'] | False |
infer_camera_intrinsics | (points2d, points3d) | Infer camera instrinsics from 2D<->3D point correspondences. | Infer camera instrinsics from 2D<->3D point correspondences. | def infer_camera_intrinsics(points2d, points3d):
"""Infer camera instrinsics from 2D<->3D point correspondences."""
pose2d = points2d.reshape(-1, 2)
pose3d = points3d.reshape(-1, 3)
x3d = np.stack([pose3d[:, 0], pose3d[:, 2]], axis=-1)
x2d = (pose2d[:, 0] * pose3d[:, 2])
alpha_x, x_0 = list(np.linalg.lstsq(x3d, x2d, rcond=-1)[0].flatten())
y3d = np.stack([pose3d[:, 1], pose3d[:, 2]], axis=-1)
y2d = (pose2d[:, 1] * pose3d[:, 2])
alpha_y, y_0 = list(np.linalg.lstsq(y3d, y2d, rcond=-1)[0].flatten())
return np.array([alpha_x, x_0, alpha_y, y_0]) | [
"def",
"infer_camera_intrinsics",
"(",
"points2d",
",",
"points3d",
")",
":",
"pose2d",
"=",
"points2d",
".",
"reshape",
"(",
"-",
"1",
",",
"2",
")",
"pose3d",
"=",
"points3d",
".",
"reshape",
"(",
"-",
"1",
",",
"3",
")",
"x3d",
"=",
"np",
".",
"stack",
"(",
"[",
"pose3d",
"[",
":",
",",
"0",
"]",
",",
"pose3d",
"[",
":",
",",
"2",
"]",
"]",
",",
"axis",
"=",
"-",
"1",
")",
"x2d",
"=",
"(",
"pose2d",
"[",
":",
",",
"0",
"]",
"*",
"pose3d",
"[",
":",
",",
"2",
"]",
")",
"alpha_x",
",",
"x_0",
"=",
"list",
"(",
"np",
".",
"linalg",
".",
"lstsq",
"(",
"x3d",
",",
"x2d",
",",
"rcond",
"=",
"-",
"1",
")",
"[",
"0",
"]",
".",
"flatten",
"(",
")",
")",
"y3d",
"=",
"np",
".",
"stack",
"(",
"[",
"pose3d",
"[",
":",
",",
"1",
"]",
",",
"pose3d",
"[",
":",
",",
"2",
"]",
"]",
",",
"axis",
"=",
"-",
"1",
")",
"y2d",
"=",
"(",
"pose2d",
"[",
":",
",",
"1",
"]",
"*",
"pose3d",
"[",
":",
",",
"2",
"]",
")",
"alpha_y",
",",
"y_0",
"=",
"list",
"(",
"np",
".",
"linalg",
".",
"lstsq",
"(",
"y3d",
",",
"y2d",
",",
"rcond",
"=",
"-",
"1",
")",
"[",
"0",
"]",
".",
"flatten",
"(",
")",
")",
"return",
"np",
".",
"array",
"(",
"[",
"alpha_x",
",",
"x_0",
",",
"alpha_y",
",",
"y_0",
"]",
")"
] | [
56,
0
] | [
66,
49
] | python | en | ['en', 'en', 'en'] | True |
project_to_2d | (X, camera_params) |
Project 3D points to 2D using the Human3.6M camera projection function.
This is a differentiable and batched reimplementation of the original MATLAB script.
Arguments:
X -- 3D points in *camera space* to transform (N, *, 3)
camera_params -- intrinsic parameteres (N, 2+2+3+2=9)
|
Project 3D points to 2D using the Human3.6M camera projection function.
This is a differentiable and batched reimplementation of the original MATLAB script. | def project_to_2d(X, camera_params):
"""
Project 3D points to 2D using the Human3.6M camera projection function.
This is a differentiable and batched reimplementation of the original MATLAB script.
Arguments:
X -- 3D points in *camera space* to transform (N, *, 3)
camera_params -- intrinsic parameteres (N, 2+2+3+2=9)
"""
assert X.shape[-1] == 3
assert len(camera_params.shape) == 2
assert camera_params.shape[-1] == 9
assert X.shape[0] == camera_params.shape[0]
while len(camera_params.shape) < len(X.shape):
camera_params = camera_params.unsqueeze(1)
f = camera_params[..., :2]
c = camera_params[..., 2:4]
k = camera_params[..., 4:7]
p = camera_params[..., 7:]
XX = torch.clamp(X[..., :2] / X[..., 2:], min=-1, max=1)
r2 = torch.sum(XX[..., :2] ** 2, dim=len(XX.shape) - 1, keepdim=True)
radial = 1 + torch.sum(k * torch.cat((r2, r2 ** 2, r2 ** 3), dim=len(r2.shape) - 1), dim=len(r2.shape) - 1,
keepdim=True)
tan = torch.sum(p * XX, dim=len(XX.shape) - 1, keepdim=True)
XXX = XX * (radial + tan) + p * r2
return f * XXX + c | [
"def",
"project_to_2d",
"(",
"X",
",",
"camera_params",
")",
":",
"assert",
"X",
".",
"shape",
"[",
"-",
"1",
"]",
"==",
"3",
"assert",
"len",
"(",
"camera_params",
".",
"shape",
")",
"==",
"2",
"assert",
"camera_params",
".",
"shape",
"[",
"-",
"1",
"]",
"==",
"9",
"assert",
"X",
".",
"shape",
"[",
"0",
"]",
"==",
"camera_params",
".",
"shape",
"[",
"0",
"]",
"while",
"len",
"(",
"camera_params",
".",
"shape",
")",
"<",
"len",
"(",
"X",
".",
"shape",
")",
":",
"camera_params",
"=",
"camera_params",
".",
"unsqueeze",
"(",
"1",
")",
"f",
"=",
"camera_params",
"[",
"...",
",",
":",
"2",
"]",
"c",
"=",
"camera_params",
"[",
"...",
",",
"2",
":",
"4",
"]",
"k",
"=",
"camera_params",
"[",
"...",
",",
"4",
":",
"7",
"]",
"p",
"=",
"camera_params",
"[",
"...",
",",
"7",
":",
"]",
"XX",
"=",
"torch",
".",
"clamp",
"(",
"X",
"[",
"...",
",",
":",
"2",
"]",
"/",
"X",
"[",
"...",
",",
"2",
":",
"]",
",",
"min",
"=",
"-",
"1",
",",
"max",
"=",
"1",
")",
"r2",
"=",
"torch",
".",
"sum",
"(",
"XX",
"[",
"...",
",",
":",
"2",
"]",
"**",
"2",
",",
"dim",
"=",
"len",
"(",
"XX",
".",
"shape",
")",
"-",
"1",
",",
"keepdim",
"=",
"True",
")",
"radial",
"=",
"1",
"+",
"torch",
".",
"sum",
"(",
"k",
"*",
"torch",
".",
"cat",
"(",
"(",
"r2",
",",
"r2",
"**",
"2",
",",
"r2",
"**",
"3",
")",
",",
"dim",
"=",
"len",
"(",
"r2",
".",
"shape",
")",
"-",
"1",
")",
",",
"dim",
"=",
"len",
"(",
"r2",
".",
"shape",
")",
"-",
"1",
",",
"keepdim",
"=",
"True",
")",
"tan",
"=",
"torch",
".",
"sum",
"(",
"p",
"*",
"XX",
",",
"dim",
"=",
"len",
"(",
"XX",
".",
"shape",
")",
"-",
"1",
",",
"keepdim",
"=",
"True",
")",
"XXX",
"=",
"XX",
"*",
"(",
"radial",
"+",
"tan",
")",
"+",
"p",
"*",
"r2",
"return",
"f",
"*",
"XXX",
"+",
"c"
] | [
69,
0
] | [
100,
22
] | python | en | ['en', 'error', 'th'] | False |
project_to_2d_linear | (X, camera_params) |
Project 3D points to 2D using only linear parameters (focal length and principal point).
Arguments:
X -- 3D points in *camera space* to transform (N, *, 3)
camera_params -- intrinsic parameteres (N, 2+2+3+2=9)
|
Project 3D points to 2D using only linear parameters (focal length and principal point). | def project_to_2d_linear(X, camera_params):
"""
Project 3D points to 2D using only linear parameters (focal length and principal point).
Arguments:
X -- 3D points in *camera space* to transform (N, *, 3)
camera_params -- intrinsic parameteres (N, 2+2+3+2=9)
"""
assert X.shape[-1] == 3
assert len(camera_params.shape) == 2
assert camera_params.shape[-1] == 9
assert X.shape[0] == camera_params.shape[0]
while len(camera_params.shape) < len(X.shape):
if type(camera_params) == torch:
camera_params = camera_params.unsqueeze(1)
else:
camera_params = camera_params[:, np.newaxis]
f = camera_params[..., :2]
c = camera_params[..., 2:4]
XX = X[..., :2] / X[..., 2:]
# XX = torch.clamp(X[..., :2] / X[..., 2:], min=-1, max=1)
if np.array(XX).any() > 1 or np.array(XX).any() < -1:
print(np.array(XX).any() > 1 or np.array(XX).any() < -1)
print('Attention for this pose!!!')
return f * XX + c | [
"def",
"project_to_2d_linear",
"(",
"X",
",",
"camera_params",
")",
":",
"assert",
"X",
".",
"shape",
"[",
"-",
"1",
"]",
"==",
"3",
"assert",
"len",
"(",
"camera_params",
".",
"shape",
")",
"==",
"2",
"assert",
"camera_params",
".",
"shape",
"[",
"-",
"1",
"]",
"==",
"9",
"assert",
"X",
".",
"shape",
"[",
"0",
"]",
"==",
"camera_params",
".",
"shape",
"[",
"0",
"]",
"while",
"len",
"(",
"camera_params",
".",
"shape",
")",
"<",
"len",
"(",
"X",
".",
"shape",
")",
":",
"if",
"type",
"(",
"camera_params",
")",
"==",
"torch",
":",
"camera_params",
"=",
"camera_params",
".",
"unsqueeze",
"(",
"1",
")",
"else",
":",
"camera_params",
"=",
"camera_params",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"f",
"=",
"camera_params",
"[",
"...",
",",
":",
"2",
"]",
"c",
"=",
"camera_params",
"[",
"...",
",",
"2",
":",
"4",
"]",
"XX",
"=",
"X",
"[",
"...",
",",
":",
"2",
"]",
"/",
"X",
"[",
"...",
",",
"2",
":",
"]",
"# XX = torch.clamp(X[..., :2] / X[..., 2:], min=-1, max=1)",
"if",
"np",
".",
"array",
"(",
"XX",
")",
".",
"any",
"(",
")",
">",
"1",
"or",
"np",
".",
"array",
"(",
"XX",
")",
".",
"any",
"(",
")",
"<",
"-",
"1",
":",
"print",
"(",
"np",
".",
"array",
"(",
"XX",
")",
".",
"any",
"(",
")",
">",
"1",
"or",
"np",
".",
"array",
"(",
"XX",
")",
".",
"any",
"(",
")",
"<",
"-",
"1",
")",
"print",
"(",
"'Attention for this pose!!!'",
")",
"return",
"f",
"*",
"XX",
"+",
"c"
] | [
103,
0
] | [
129,
21
] | python | en | ['en', 'error', 'th'] | False |
reprojection | (pose_3d, abs_depth, camera) |
:param pose_3d: predicted 3d or normed 3d with pixel unit
:param abs_depth: absolute depth root Z in the camera coordinate
:param camera: camera intrinsic parameters
:return: 3d pose in the camera cooridinate with millimeter unit, root joint: zero-center
|
:param pose_3d: predicted 3d or normed 3d with pixel unit
:param abs_depth: absolute depth root Z in the camera coordinate
:param camera: camera intrinsic parameters
:return: 3d pose in the camera cooridinate with millimeter unit, root joint: zero-center
| def reprojection(pose_3d, abs_depth, camera):
"""
:param pose_3d: predicted 3d or normed 3d with pixel unit
:param abs_depth: absolute depth root Z in the camera coordinate
:param camera: camera intrinsic parameters
:return: 3d pose in the camera cooridinate with millimeter unit, root joint: zero-center
"""
camera = camera.unsqueeze(dim=1).unsqueeze(dim=1)
cx, cy, fx, fy = camera[:,:,:,2:3], camera[:,:,:,3:4], camera[:,:,:,0:1], camera[:,:,:,1:2]
final_3d = torch.zeros_like(pose_3d)
final_3d_x = (pose_3d[:, :, :, 0:1] - cx) / fx
final_3d_y = (pose_3d[:, :, :, 1:2] - cy) / fy
final_3d[:, :, :, 0:1] = final_3d_x * abs_depth
final_3d[:, :, :, 1:2] = final_3d_y * abs_depth
final_3d[:, :, :, 2:3] = abs_depth
return final_3d | [
"def",
"reprojection",
"(",
"pose_3d",
",",
"abs_depth",
",",
"camera",
")",
":",
"camera",
"=",
"camera",
".",
"unsqueeze",
"(",
"dim",
"=",
"1",
")",
".",
"unsqueeze",
"(",
"dim",
"=",
"1",
")",
"cx",
",",
"cy",
",",
"fx",
",",
"fy",
"=",
"camera",
"[",
":",
",",
":",
",",
":",
",",
"2",
":",
"3",
"]",
",",
"camera",
"[",
":",
",",
":",
",",
":",
",",
"3",
":",
"4",
"]",
",",
"camera",
"[",
":",
",",
":",
",",
":",
",",
"0",
":",
"1",
"]",
",",
"camera",
"[",
":",
",",
":",
",",
":",
",",
"1",
":",
"2",
"]",
"final_3d",
"=",
"torch",
".",
"zeros_like",
"(",
"pose_3d",
")",
"final_3d_x",
"=",
"(",
"pose_3d",
"[",
":",
",",
":",
",",
":",
",",
"0",
":",
"1",
"]",
"-",
"cx",
")",
"/",
"fx",
"final_3d_y",
"=",
"(",
"pose_3d",
"[",
":",
",",
":",
",",
":",
",",
"1",
":",
"2",
"]",
"-",
"cy",
")",
"/",
"fy",
"final_3d",
"[",
":",
",",
":",
",",
":",
",",
"0",
":",
"1",
"]",
"=",
"final_3d_x",
"*",
"abs_depth",
"final_3d",
"[",
":",
",",
":",
",",
":",
",",
"1",
":",
"2",
"]",
"=",
"final_3d_y",
"*",
"abs_depth",
"final_3d",
"[",
":",
",",
":",
",",
":",
",",
"2",
":",
"3",
"]",
"=",
"abs_depth",
"return",
"final_3d"
] | [
132,
0
] | [
147,
19
] | python | en | ['en', 'error', 'th'] | False |
BaseDRIVLearner.__init__ | (
self,
learner=None,
control_outcome_learner=None,
treatment_outcome_learner=None,
treatment_effect_learner=None,
ate_alpha=0.05,
control_name=0,
) | Initialize a DR-learner.
Args:
learner (optional): a model to estimate outcomes and treatment effects in both the control and treatment
groups
control_outcome_learner (optional): a model to estimate outcomes in the control group
treatment_outcome_learner (optional): a model to estimate outcomes in the treatment group
treatment_effect_learner (optional): a model to estimate treatment effects in the treatment group. It needs
to take `sample_weight` as an input argument in `fit()`.
ate_alpha (float, optional): the confidence level alpha of the ATE estimate
control_name (str or int, optional): name of control group
| Initialize a DR-learner. | def __init__(
self,
learner=None,
control_outcome_learner=None,
treatment_outcome_learner=None,
treatment_effect_learner=None,
ate_alpha=0.05,
control_name=0,
):
"""Initialize a DR-learner.
Args:
learner (optional): a model to estimate outcomes and treatment effects in both the control and treatment
groups
control_outcome_learner (optional): a model to estimate outcomes in the control group
treatment_outcome_learner (optional): a model to estimate outcomes in the treatment group
treatment_effect_learner (optional): a model to estimate treatment effects in the treatment group. It needs
to take `sample_weight` as an input argument in `fit()`.
ate_alpha (float, optional): the confidence level alpha of the ATE estimate
control_name (str or int, optional): name of control group
"""
assert (learner is not None) or (
(control_outcome_learner is not None)
and (treatment_outcome_learner is not None)
and (treatment_effect_learner is not None)
)
if control_outcome_learner is None:
self.model_mu_c = deepcopy(learner)
else:
self.model_mu_c = control_outcome_learner
if treatment_outcome_learner is None:
self.model_mu_t = deepcopy(learner)
else:
self.model_mu_t = treatment_outcome_learner
if treatment_effect_learner is None:
self.model_tau = deepcopy(learner)
else:
self.model_tau = treatment_effect_learner
self.ate_alpha = ate_alpha
self.control_name = control_name
self.propensity_1 = None
self.propensity_0 = None
self.propensity_assign = None | [
"def",
"__init__",
"(",
"self",
",",
"learner",
"=",
"None",
",",
"control_outcome_learner",
"=",
"None",
",",
"treatment_outcome_learner",
"=",
"None",
",",
"treatment_effect_learner",
"=",
"None",
",",
"ate_alpha",
"=",
"0.05",
",",
"control_name",
"=",
"0",
",",
")",
":",
"assert",
"(",
"learner",
"is",
"not",
"None",
")",
"or",
"(",
"(",
"control_outcome_learner",
"is",
"not",
"None",
")",
"and",
"(",
"treatment_outcome_learner",
"is",
"not",
"None",
")",
"and",
"(",
"treatment_effect_learner",
"is",
"not",
"None",
")",
")",
"if",
"control_outcome_learner",
"is",
"None",
":",
"self",
".",
"model_mu_c",
"=",
"deepcopy",
"(",
"learner",
")",
"else",
":",
"self",
".",
"model_mu_c",
"=",
"control_outcome_learner",
"if",
"treatment_outcome_learner",
"is",
"None",
":",
"self",
".",
"model_mu_t",
"=",
"deepcopy",
"(",
"learner",
")",
"else",
":",
"self",
".",
"model_mu_t",
"=",
"treatment_outcome_learner",
"if",
"treatment_effect_learner",
"is",
"None",
":",
"self",
".",
"model_tau",
"=",
"deepcopy",
"(",
"learner",
")",
"else",
":",
"self",
".",
"model_tau",
"=",
"treatment_effect_learner",
"self",
".",
"ate_alpha",
"=",
"ate_alpha",
"self",
".",
"control_name",
"=",
"control_name",
"self",
".",
"propensity_1",
"=",
"None",
"self",
".",
"propensity_0",
"=",
"None",
"self",
".",
"propensity_assign",
"=",
"None"
] | [
29,
4
] | [
76,
37
] | python | en | ['en', 'en', 'it'] | True |
BaseDRIVLearner.fit | (
self, X, assignment, treatment, y, p=None, pZ=None, seed=None, calibrate=True
) | Fit the inference model.
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
assignment (np.array or pd.Series): a (0,1)-valued assignment vector. The assignment is the
instrumental variable that does not depend on unknown confounders. The assignment status
influences treatment in a monotonic way, i.e. one can only be more likely to take the
treatment if assigned.
treatment (np.array or pd.Series): a treatment vector
y (np.array or pd.Series): an outcome vector
p (2-tuple of np.ndarray or pd.Series or dict, optional): The first (second) element corresponds to
unassigned (assigned) units. Each is an array of propensity scores of float (0,1) in the single-treatment
case; or, a dictionary of treatment groups that map to propensity vectors of float (0,1). If None will run
ElasticNetPropensityModel() to generate the propensity scores.
pZ (np.array or pd.Series, optional): an array of assignment probability of float (0,1); if None
will run ElasticNetPropensityModel() to generate the assignment probability score.
seed (int): random seed for cross-fitting
| Fit the inference model. | def fit(
self, X, assignment, treatment, y, p=None, pZ=None, seed=None, calibrate=True
):
"""Fit the inference model.
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
assignment (np.array or pd.Series): a (0,1)-valued assignment vector. The assignment is the
instrumental variable that does not depend on unknown confounders. The assignment status
influences treatment in a monotonic way, i.e. one can only be more likely to take the
treatment if assigned.
treatment (np.array or pd.Series): a treatment vector
y (np.array or pd.Series): an outcome vector
p (2-tuple of np.ndarray or pd.Series or dict, optional): The first (second) element corresponds to
unassigned (assigned) units. Each is an array of propensity scores of float (0,1) in the single-treatment
case; or, a dictionary of treatment groups that map to propensity vectors of float (0,1). If None will run
ElasticNetPropensityModel() to generate the propensity scores.
pZ (np.array or pd.Series, optional): an array of assignment probability of float (0,1); if None
will run ElasticNetPropensityModel() to generate the assignment probability score.
seed (int): random seed for cross-fitting
"""
X, treatment, assignment, y = convert_pd_to_np(X, treatment, assignment, y)
check_treatment_vector(treatment, self.control_name)
self.t_groups = np.unique(treatment[treatment != self.control_name])
self.t_groups.sort()
self._classes = {group: i for i, group in enumerate(self.t_groups)}
# The estimator splits the data into 3 partitions for cross-fit on the propensity score estimation,
# the outcome regression, and the treatment regression on the doubly robust estimates. The use of
# the partitions is rotated so we do not lose on the sample size. We do not cross-fit the assignment
# score estimation as the assignment process is usually simple.
cv = KFold(n_splits=3, shuffle=True, random_state=seed)
split_indices = [index for _, index in cv.split(y)]
self.models_mu_c = {
group: [
deepcopy(self.model_mu_c),
deepcopy(self.model_mu_c),
deepcopy(self.model_mu_c),
]
for group in self.t_groups
}
self.models_mu_t = {
group: [
deepcopy(self.model_mu_t),
deepcopy(self.model_mu_t),
deepcopy(self.model_mu_t),
]
for group in self.t_groups
}
self.models_tau = {
group: [
deepcopy(self.model_tau),
deepcopy(self.model_tau),
deepcopy(self.model_tau),
]
for group in self.t_groups
}
if p is None:
self.propensity_1 = {
group: np.zeros(y.shape[0]) for group in self.t_groups
} # propensity scores for those assigned
self.propensity_0 = {
group: np.zeros(y.shape[0]) for group in self.t_groups
} # propensity scores for those not assigned
if pZ is None:
self.propensity_assign, _ = compute_propensity_score(
X=X,
treatment=assignment,
X_pred=X,
treatment_pred=assignment,
calibrate_p=calibrate,
)
else:
self.propensity_assign = pZ
for ifold in range(3):
treatment_idx = split_indices[ifold]
outcome_idx = split_indices[(ifold + 1) % 3]
tau_idx = split_indices[(ifold + 2) % 3]
treatment_treat, treatment_out, treatment_tau = (
treatment[treatment_idx],
treatment[outcome_idx],
treatment[tau_idx],
)
assignment_treat, assignment_out, assignment_tau = (
assignment[treatment_idx],
assignment[outcome_idx],
assignment[tau_idx],
)
y_out, y_tau = y[outcome_idx], y[tau_idx]
X_treat, X_out, X_tau = X[treatment_idx], X[outcome_idx], X[tau_idx]
pZ_tau = self.propensity_assign[tau_idx]
if p is None:
logger.info("Generating propensity score")
cur_p_1 = dict()
cur_p_0 = dict()
for group in self.t_groups:
mask = (treatment_treat == group) | (
treatment_treat == self.control_name
)
mask_1, mask_0 = mask & (assignment_treat == 1), mask & (
assignment_treat == 0
)
cur_p_1[group], _ = compute_propensity_score(
X=X_treat[mask_1],
treatment=(treatment_treat[mask_1] == group).astype(int),
X_pred=X_tau,
treatment_pred=(treatment_tau == group).astype(int),
)
if (treatment_treat[mask_0] == group).sum() == 0:
cur_p_0[group] = np.zeros(X_tau.shape[0])
else:
cur_p_0[group], _ = compute_propensity_score(
X=X_treat[mask_0],
treatment=(treatment_treat[mask_0] == group).astype(int),
X_pred=X_tau,
treatment_pred=(treatment_tau == group).astype(int),
)
self.propensity_1[group][tau_idx] = cur_p_1[group]
self.propensity_0[group][tau_idx] = cur_p_0[group]
else:
cur_p_1 = dict()
cur_p_0 = dict()
if isinstance(p[0], (np.ndarray, pd.Series)):
cur_p_0 = {self.t_groups[0]: convert_pd_to_np(p[0][tau_idx])}
else:
cur_p_0 = {g: prop[tau_idx] for g, prop in p[0].items()}
check_p_conditions(cur_p_0, self.t_groups)
if isinstance(p[1], (np.ndarray, pd.Series)):
cur_p_1 = {self.t_groups[0]: convert_pd_to_np(p[1][tau_idx])}
else:
cur_p_1 = {g: prop[tau_idx] for g, prop in p[1].items()}
check_p_conditions(cur_p_1, self.t_groups)
logger.info("Generate outcome regressions")
for group in self.t_groups:
mask = (treatment_out == group) | (treatment_out == self.control_name)
mask_1, mask_0 = mask & (assignment_out == 1), mask & (
assignment_out == 0
)
self.models_mu_c[group][ifold].fit(X_out[mask_0], y_out[mask_0])
self.models_mu_t[group][ifold].fit(X_out[mask_1], y_out[mask_1])
logger.info("Fit pseudo outcomes from the DR formula")
for group in self.t_groups:
mask = (treatment_tau == group) | (treatment_tau == self.control_name)
treatment_filt = treatment_tau[mask]
X_filt = X_tau[mask]
y_filt = y_tau[mask]
w_filt = (treatment_filt == group).astype(int)
p_1_filt = cur_p_1[group][mask]
p_0_filt = cur_p_0[group][mask]
z_filt = assignment_tau[mask]
pZ_filt = pZ_tau[mask]
mu_t = self.models_mu_t[group][ifold].predict(X_filt)
mu_c = self.models_mu_c[group][ifold].predict(X_filt)
dr = (
z_filt * (y_filt - mu_t) / pZ_filt
- (1 - z_filt) * (y_filt - mu_c) / (1 - pZ_filt)
+ mu_t
- mu_c
)
weight = (
z_filt * (w_filt - p_1_filt) / pZ_filt
- (1 - z_filt) * (w_filt - p_0_filt) / (1 - pZ_filt)
+ p_1_filt
- p_0_filt
)
dr /= weight
self.models_tau[group][ifold].fit(X_filt, dr, sample_weight=weight ** 2) | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"assignment",
",",
"treatment",
",",
"y",
",",
"p",
"=",
"None",
",",
"pZ",
"=",
"None",
",",
"seed",
"=",
"None",
",",
"calibrate",
"=",
"True",
")",
":",
"X",
",",
"treatment",
",",
"assignment",
",",
"y",
"=",
"convert_pd_to_np",
"(",
"X",
",",
"treatment",
",",
"assignment",
",",
"y",
")",
"check_treatment_vector",
"(",
"treatment",
",",
"self",
".",
"control_name",
")",
"self",
".",
"t_groups",
"=",
"np",
".",
"unique",
"(",
"treatment",
"[",
"treatment",
"!=",
"self",
".",
"control_name",
"]",
")",
"self",
".",
"t_groups",
".",
"sort",
"(",
")",
"self",
".",
"_classes",
"=",
"{",
"group",
":",
"i",
"for",
"i",
",",
"group",
"in",
"enumerate",
"(",
"self",
".",
"t_groups",
")",
"}",
"# The estimator splits the data into 3 partitions for cross-fit on the propensity score estimation,",
"# the outcome regression, and the treatment regression on the doubly robust estimates. The use of",
"# the partitions is rotated so we do not lose on the sample size. We do not cross-fit the assignment",
"# score estimation as the assignment process is usually simple.",
"cv",
"=",
"KFold",
"(",
"n_splits",
"=",
"3",
",",
"shuffle",
"=",
"True",
",",
"random_state",
"=",
"seed",
")",
"split_indices",
"=",
"[",
"index",
"for",
"_",
",",
"index",
"in",
"cv",
".",
"split",
"(",
"y",
")",
"]",
"self",
".",
"models_mu_c",
"=",
"{",
"group",
":",
"[",
"deepcopy",
"(",
"self",
".",
"model_mu_c",
")",
",",
"deepcopy",
"(",
"self",
".",
"model_mu_c",
")",
",",
"deepcopy",
"(",
"self",
".",
"model_mu_c",
")",
",",
"]",
"for",
"group",
"in",
"self",
".",
"t_groups",
"}",
"self",
".",
"models_mu_t",
"=",
"{",
"group",
":",
"[",
"deepcopy",
"(",
"self",
".",
"model_mu_t",
")",
",",
"deepcopy",
"(",
"self",
".",
"model_mu_t",
")",
",",
"deepcopy",
"(",
"self",
".",
"model_mu_t",
")",
",",
"]",
"for",
"group",
"in",
"self",
".",
"t_groups",
"}",
"self",
".",
"models_tau",
"=",
"{",
"group",
":",
"[",
"deepcopy",
"(",
"self",
".",
"model_tau",
")",
",",
"deepcopy",
"(",
"self",
".",
"model_tau",
")",
",",
"deepcopy",
"(",
"self",
".",
"model_tau",
")",
",",
"]",
"for",
"group",
"in",
"self",
".",
"t_groups",
"}",
"if",
"p",
"is",
"None",
":",
"self",
".",
"propensity_1",
"=",
"{",
"group",
":",
"np",
".",
"zeros",
"(",
"y",
".",
"shape",
"[",
"0",
"]",
")",
"for",
"group",
"in",
"self",
".",
"t_groups",
"}",
"# propensity scores for those assigned",
"self",
".",
"propensity_0",
"=",
"{",
"group",
":",
"np",
".",
"zeros",
"(",
"y",
".",
"shape",
"[",
"0",
"]",
")",
"for",
"group",
"in",
"self",
".",
"t_groups",
"}",
"# propensity scores for those not assigned",
"if",
"pZ",
"is",
"None",
":",
"self",
".",
"propensity_assign",
",",
"_",
"=",
"compute_propensity_score",
"(",
"X",
"=",
"X",
",",
"treatment",
"=",
"assignment",
",",
"X_pred",
"=",
"X",
",",
"treatment_pred",
"=",
"assignment",
",",
"calibrate_p",
"=",
"calibrate",
",",
")",
"else",
":",
"self",
".",
"propensity_assign",
"=",
"pZ",
"for",
"ifold",
"in",
"range",
"(",
"3",
")",
":",
"treatment_idx",
"=",
"split_indices",
"[",
"ifold",
"]",
"outcome_idx",
"=",
"split_indices",
"[",
"(",
"ifold",
"+",
"1",
")",
"%",
"3",
"]",
"tau_idx",
"=",
"split_indices",
"[",
"(",
"ifold",
"+",
"2",
")",
"%",
"3",
"]",
"treatment_treat",
",",
"treatment_out",
",",
"treatment_tau",
"=",
"(",
"treatment",
"[",
"treatment_idx",
"]",
",",
"treatment",
"[",
"outcome_idx",
"]",
",",
"treatment",
"[",
"tau_idx",
"]",
",",
")",
"assignment_treat",
",",
"assignment_out",
",",
"assignment_tau",
"=",
"(",
"assignment",
"[",
"treatment_idx",
"]",
",",
"assignment",
"[",
"outcome_idx",
"]",
",",
"assignment",
"[",
"tau_idx",
"]",
",",
")",
"y_out",
",",
"y_tau",
"=",
"y",
"[",
"outcome_idx",
"]",
",",
"y",
"[",
"tau_idx",
"]",
"X_treat",
",",
"X_out",
",",
"X_tau",
"=",
"X",
"[",
"treatment_idx",
"]",
",",
"X",
"[",
"outcome_idx",
"]",
",",
"X",
"[",
"tau_idx",
"]",
"pZ_tau",
"=",
"self",
".",
"propensity_assign",
"[",
"tau_idx",
"]",
"if",
"p",
"is",
"None",
":",
"logger",
".",
"info",
"(",
"\"Generating propensity score\"",
")",
"cur_p_1",
"=",
"dict",
"(",
")",
"cur_p_0",
"=",
"dict",
"(",
")",
"for",
"group",
"in",
"self",
".",
"t_groups",
":",
"mask",
"=",
"(",
"treatment_treat",
"==",
"group",
")",
"|",
"(",
"treatment_treat",
"==",
"self",
".",
"control_name",
")",
"mask_1",
",",
"mask_0",
"=",
"mask",
"&",
"(",
"assignment_treat",
"==",
"1",
")",
",",
"mask",
"&",
"(",
"assignment_treat",
"==",
"0",
")",
"cur_p_1",
"[",
"group",
"]",
",",
"_",
"=",
"compute_propensity_score",
"(",
"X",
"=",
"X_treat",
"[",
"mask_1",
"]",
",",
"treatment",
"=",
"(",
"treatment_treat",
"[",
"mask_1",
"]",
"==",
"group",
")",
".",
"astype",
"(",
"int",
")",
",",
"X_pred",
"=",
"X_tau",
",",
"treatment_pred",
"=",
"(",
"treatment_tau",
"==",
"group",
")",
".",
"astype",
"(",
"int",
")",
",",
")",
"if",
"(",
"treatment_treat",
"[",
"mask_0",
"]",
"==",
"group",
")",
".",
"sum",
"(",
")",
"==",
"0",
":",
"cur_p_0",
"[",
"group",
"]",
"=",
"np",
".",
"zeros",
"(",
"X_tau",
".",
"shape",
"[",
"0",
"]",
")",
"else",
":",
"cur_p_0",
"[",
"group",
"]",
",",
"_",
"=",
"compute_propensity_score",
"(",
"X",
"=",
"X_treat",
"[",
"mask_0",
"]",
",",
"treatment",
"=",
"(",
"treatment_treat",
"[",
"mask_0",
"]",
"==",
"group",
")",
".",
"astype",
"(",
"int",
")",
",",
"X_pred",
"=",
"X_tau",
",",
"treatment_pred",
"=",
"(",
"treatment_tau",
"==",
"group",
")",
".",
"astype",
"(",
"int",
")",
",",
")",
"self",
".",
"propensity_1",
"[",
"group",
"]",
"[",
"tau_idx",
"]",
"=",
"cur_p_1",
"[",
"group",
"]",
"self",
".",
"propensity_0",
"[",
"group",
"]",
"[",
"tau_idx",
"]",
"=",
"cur_p_0",
"[",
"group",
"]",
"else",
":",
"cur_p_1",
"=",
"dict",
"(",
")",
"cur_p_0",
"=",
"dict",
"(",
")",
"if",
"isinstance",
"(",
"p",
"[",
"0",
"]",
",",
"(",
"np",
".",
"ndarray",
",",
"pd",
".",
"Series",
")",
")",
":",
"cur_p_0",
"=",
"{",
"self",
".",
"t_groups",
"[",
"0",
"]",
":",
"convert_pd_to_np",
"(",
"p",
"[",
"0",
"]",
"[",
"tau_idx",
"]",
")",
"}",
"else",
":",
"cur_p_0",
"=",
"{",
"g",
":",
"prop",
"[",
"tau_idx",
"]",
"for",
"g",
",",
"prop",
"in",
"p",
"[",
"0",
"]",
".",
"items",
"(",
")",
"}",
"check_p_conditions",
"(",
"cur_p_0",
",",
"self",
".",
"t_groups",
")",
"if",
"isinstance",
"(",
"p",
"[",
"1",
"]",
",",
"(",
"np",
".",
"ndarray",
",",
"pd",
".",
"Series",
")",
")",
":",
"cur_p_1",
"=",
"{",
"self",
".",
"t_groups",
"[",
"0",
"]",
":",
"convert_pd_to_np",
"(",
"p",
"[",
"1",
"]",
"[",
"tau_idx",
"]",
")",
"}",
"else",
":",
"cur_p_1",
"=",
"{",
"g",
":",
"prop",
"[",
"tau_idx",
"]",
"for",
"g",
",",
"prop",
"in",
"p",
"[",
"1",
"]",
".",
"items",
"(",
")",
"}",
"check_p_conditions",
"(",
"cur_p_1",
",",
"self",
".",
"t_groups",
")",
"logger",
".",
"info",
"(",
"\"Generate outcome regressions\"",
")",
"for",
"group",
"in",
"self",
".",
"t_groups",
":",
"mask",
"=",
"(",
"treatment_out",
"==",
"group",
")",
"|",
"(",
"treatment_out",
"==",
"self",
".",
"control_name",
")",
"mask_1",
",",
"mask_0",
"=",
"mask",
"&",
"(",
"assignment_out",
"==",
"1",
")",
",",
"mask",
"&",
"(",
"assignment_out",
"==",
"0",
")",
"self",
".",
"models_mu_c",
"[",
"group",
"]",
"[",
"ifold",
"]",
".",
"fit",
"(",
"X_out",
"[",
"mask_0",
"]",
",",
"y_out",
"[",
"mask_0",
"]",
")",
"self",
".",
"models_mu_t",
"[",
"group",
"]",
"[",
"ifold",
"]",
".",
"fit",
"(",
"X_out",
"[",
"mask_1",
"]",
",",
"y_out",
"[",
"mask_1",
"]",
")",
"logger",
".",
"info",
"(",
"\"Fit pseudo outcomes from the DR formula\"",
")",
"for",
"group",
"in",
"self",
".",
"t_groups",
":",
"mask",
"=",
"(",
"treatment_tau",
"==",
"group",
")",
"|",
"(",
"treatment_tau",
"==",
"self",
".",
"control_name",
")",
"treatment_filt",
"=",
"treatment_tau",
"[",
"mask",
"]",
"X_filt",
"=",
"X_tau",
"[",
"mask",
"]",
"y_filt",
"=",
"y_tau",
"[",
"mask",
"]",
"w_filt",
"=",
"(",
"treatment_filt",
"==",
"group",
")",
".",
"astype",
"(",
"int",
")",
"p_1_filt",
"=",
"cur_p_1",
"[",
"group",
"]",
"[",
"mask",
"]",
"p_0_filt",
"=",
"cur_p_0",
"[",
"group",
"]",
"[",
"mask",
"]",
"z_filt",
"=",
"assignment_tau",
"[",
"mask",
"]",
"pZ_filt",
"=",
"pZ_tau",
"[",
"mask",
"]",
"mu_t",
"=",
"self",
".",
"models_mu_t",
"[",
"group",
"]",
"[",
"ifold",
"]",
".",
"predict",
"(",
"X_filt",
")",
"mu_c",
"=",
"self",
".",
"models_mu_c",
"[",
"group",
"]",
"[",
"ifold",
"]",
".",
"predict",
"(",
"X_filt",
")",
"dr",
"=",
"(",
"z_filt",
"*",
"(",
"y_filt",
"-",
"mu_t",
")",
"/",
"pZ_filt",
"-",
"(",
"1",
"-",
"z_filt",
")",
"*",
"(",
"y_filt",
"-",
"mu_c",
")",
"/",
"(",
"1",
"-",
"pZ_filt",
")",
"+",
"mu_t",
"-",
"mu_c",
")",
"weight",
"=",
"(",
"z_filt",
"*",
"(",
"w_filt",
"-",
"p_1_filt",
")",
"/",
"pZ_filt",
"-",
"(",
"1",
"-",
"z_filt",
")",
"*",
"(",
"w_filt",
"-",
"p_0_filt",
")",
"/",
"(",
"1",
"-",
"pZ_filt",
")",
"+",
"p_1_filt",
"-",
"p_0_filt",
")",
"dr",
"/=",
"weight",
"self",
".",
"models_tau",
"[",
"group",
"]",
"[",
"ifold",
"]",
".",
"fit",
"(",
"X_filt",
",",
"dr",
",",
"sample_weight",
"=",
"weight",
"**",
"2",
")"
] | [
90,
4
] | [
266,
88
] | python | en | ['en', 'en', 'en'] | True |
BaseDRIVLearner.predict | (self, X, treatment=None, y=None, return_components=False, verbose=True) | Predict treatment effects.
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series, optional): a treatment vector
y (np.array or pd.Series, optional): an outcome vector
verbose (bool, optional): whether to output progress logs
Returns:
(numpy.ndarray): Predictions of treatment effects for compliers, i.e. those individuals
who take the treatment only if they are assigned.
| Predict treatment effects. | def predict(self, X, treatment=None, y=None, return_components=False, verbose=True):
"""Predict treatment effects.
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series, optional): a treatment vector
y (np.array or pd.Series, optional): an outcome vector
verbose (bool, optional): whether to output progress logs
Returns:
(numpy.ndarray): Predictions of treatment effects for compliers, i.e. those individuals
who take the treatment only if they are assigned.
"""
X, treatment, y = convert_pd_to_np(X, treatment, y)
te = np.zeros((X.shape[0], self.t_groups.shape[0]))
yhat_cs = {}
yhat_ts = {}
for i, group in enumerate(self.t_groups):
models_tau = self.models_tau[group]
_te = np.r_[[model.predict(X) for model in models_tau]].mean(axis=0)
te[:, i] = np.ravel(_te)
yhat_cs[group] = np.r_[
[model.predict(X) for model in self.models_mu_c[group]]
].mean(axis=0)
yhat_ts[group] = np.r_[
[model.predict(X) for model in self.models_mu_t[group]]
].mean(axis=0)
if (y is not None) and (treatment is not None) and verbose:
mask = (treatment == group) | (treatment == self.control_name)
treatment_filt = treatment[mask]
X_filt = X[mask]
y_filt = y[mask]
w = (treatment_filt == group).astype(int)
yhat = np.zeros_like(y_filt, dtype=float)
yhat[w == 0] = yhat_cs[group][mask][w == 0]
yhat[w == 1] = yhat_ts[group][mask][w == 1]
logger.info("Error metrics for group {}".format(group))
regression_metrics(y_filt, yhat, w)
if not return_components:
return te
else:
return te, yhat_cs, yhat_ts | [
"def",
"predict",
"(",
"self",
",",
"X",
",",
"treatment",
"=",
"None",
",",
"y",
"=",
"None",
",",
"return_components",
"=",
"False",
",",
"verbose",
"=",
"True",
")",
":",
"X",
",",
"treatment",
",",
"y",
"=",
"convert_pd_to_np",
"(",
"X",
",",
"treatment",
",",
"y",
")",
"te",
"=",
"np",
".",
"zeros",
"(",
"(",
"X",
".",
"shape",
"[",
"0",
"]",
",",
"self",
".",
"t_groups",
".",
"shape",
"[",
"0",
"]",
")",
")",
"yhat_cs",
"=",
"{",
"}",
"yhat_ts",
"=",
"{",
"}",
"for",
"i",
",",
"group",
"in",
"enumerate",
"(",
"self",
".",
"t_groups",
")",
":",
"models_tau",
"=",
"self",
".",
"models_tau",
"[",
"group",
"]",
"_te",
"=",
"np",
".",
"r_",
"[",
"[",
"model",
".",
"predict",
"(",
"X",
")",
"for",
"model",
"in",
"models_tau",
"]",
"]",
".",
"mean",
"(",
"axis",
"=",
"0",
")",
"te",
"[",
":",
",",
"i",
"]",
"=",
"np",
".",
"ravel",
"(",
"_te",
")",
"yhat_cs",
"[",
"group",
"]",
"=",
"np",
".",
"r_",
"[",
"[",
"model",
".",
"predict",
"(",
"X",
")",
"for",
"model",
"in",
"self",
".",
"models_mu_c",
"[",
"group",
"]",
"]",
"]",
".",
"mean",
"(",
"axis",
"=",
"0",
")",
"yhat_ts",
"[",
"group",
"]",
"=",
"np",
".",
"r_",
"[",
"[",
"model",
".",
"predict",
"(",
"X",
")",
"for",
"model",
"in",
"self",
".",
"models_mu_t",
"[",
"group",
"]",
"]",
"]",
".",
"mean",
"(",
"axis",
"=",
"0",
")",
"if",
"(",
"y",
"is",
"not",
"None",
")",
"and",
"(",
"treatment",
"is",
"not",
"None",
")",
"and",
"verbose",
":",
"mask",
"=",
"(",
"treatment",
"==",
"group",
")",
"|",
"(",
"treatment",
"==",
"self",
".",
"control_name",
")",
"treatment_filt",
"=",
"treatment",
"[",
"mask",
"]",
"X_filt",
"=",
"X",
"[",
"mask",
"]",
"y_filt",
"=",
"y",
"[",
"mask",
"]",
"w",
"=",
"(",
"treatment_filt",
"==",
"group",
")",
".",
"astype",
"(",
"int",
")",
"yhat",
"=",
"np",
".",
"zeros_like",
"(",
"y_filt",
",",
"dtype",
"=",
"float",
")",
"yhat",
"[",
"w",
"==",
"0",
"]",
"=",
"yhat_cs",
"[",
"group",
"]",
"[",
"mask",
"]",
"[",
"w",
"==",
"0",
"]",
"yhat",
"[",
"w",
"==",
"1",
"]",
"=",
"yhat_ts",
"[",
"group",
"]",
"[",
"mask",
"]",
"[",
"w",
"==",
"1",
"]",
"logger",
".",
"info",
"(",
"\"Error metrics for group {}\"",
".",
"format",
"(",
"group",
")",
")",
"regression_metrics",
"(",
"y_filt",
",",
"yhat",
",",
"w",
")",
"if",
"not",
"return_components",
":",
"return",
"te",
"else",
":",
"return",
"te",
",",
"yhat_cs",
",",
"yhat_ts"
] | [
268,
4
] | [
314,
39
] | python | en | ['fr', 'en', 'en'] | True |
BaseDRIVLearner.fit_predict | (
self,
X,
assignment,
treatment,
y,
p=None,
pZ=None,
return_ci=False,
n_bootstraps=1000,
bootstrap_size=10000,
return_components=False,
verbose=True,
seed=None,
calibrate=True,
) | Fit the treatment effect and outcome models of the R learner and predict treatment effects.
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
assignment (np.array or pd.Series): a (0,1)-valued assignment vector. The assignment is the
instrumental variable that does not depend on unknown confounders. The assignment status
influences treatment in a monotonic way, i.e. one can only be more likely to take the
treatment if assigned.
treatment (np.array or pd.Series): a treatment vector
y (np.array or pd.Series): an outcome vector
p (2-tuple of np.ndarray or pd.Series or dict, optional): The first (second) element corresponds to
unassigned (assigned) units. Each is an array of propensity scores of float (0,1) in the single-treatment
case; or, a dictionary of treatment groups that map to propensity vectors of float (0,1). If None will run
ElasticNetPropensityModel() to generate the propensity scores.
pZ (np.array or pd.Series, optional): an array of assignment probability of float (0,1); if None
will run ElasticNetPropensityModel() to generate the assignment probability score.
return_ci (bool): whether to return confidence intervals
n_bootstraps (int): number of bootstrap iterations
bootstrap_size (int): number of samples per bootstrap
return_components (bool, optional): whether to return outcome for treatment and control seperately
verbose (str): whether to output progress logs
seed (int): random seed for cross-fitting
Returns:
(numpy.ndarray): Predictions of treatment effects for compliers, , i.e. those individuals
who take the treatment only if they are assigned. Output dim: [n_samples, n_treatment]
If return_ci, returns CATE [n_samples, n_treatment], LB [n_samples, n_treatment],
UB [n_samples, n_treatment]
| Fit the treatment effect and outcome models of the R learner and predict treatment effects. | def fit_predict(
self,
X,
assignment,
treatment,
y,
p=None,
pZ=None,
return_ci=False,
n_bootstraps=1000,
bootstrap_size=10000,
return_components=False,
verbose=True,
seed=None,
calibrate=True,
):
"""Fit the treatment effect and outcome models of the R learner and predict treatment effects.
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
assignment (np.array or pd.Series): a (0,1)-valued assignment vector. The assignment is the
instrumental variable that does not depend on unknown confounders. The assignment status
influences treatment in a monotonic way, i.e. one can only be more likely to take the
treatment if assigned.
treatment (np.array or pd.Series): a treatment vector
y (np.array or pd.Series): an outcome vector
p (2-tuple of np.ndarray or pd.Series or dict, optional): The first (second) element corresponds to
unassigned (assigned) units. Each is an array of propensity scores of float (0,1) in the single-treatment
case; or, a dictionary of treatment groups that map to propensity vectors of float (0,1). If None will run
ElasticNetPropensityModel() to generate the propensity scores.
pZ (np.array or pd.Series, optional): an array of assignment probability of float (0,1); if None
will run ElasticNetPropensityModel() to generate the assignment probability score.
return_ci (bool): whether to return confidence intervals
n_bootstraps (int): number of bootstrap iterations
bootstrap_size (int): number of samples per bootstrap
return_components (bool, optional): whether to return outcome for treatment and control seperately
verbose (str): whether to output progress logs
seed (int): random seed for cross-fitting
Returns:
(numpy.ndarray): Predictions of treatment effects for compliers, , i.e. those individuals
who take the treatment only if they are assigned. Output dim: [n_samples, n_treatment]
If return_ci, returns CATE [n_samples, n_treatment], LB [n_samples, n_treatment],
UB [n_samples, n_treatment]
"""
X, assignment, treatment, y = convert_pd_to_np(X, assignment, treatment, y)
self.fit(X, assignment, treatment, y, p, seed, calibrate)
if p is None:
p = (self.propensity_0, self.propensity_1)
else:
check_p_conditions(p[0], self.t_groups)
check_p_conditions(p[1], self.t_groups)
if isinstance(p[0], (np.ndarray, pd.Series)):
treatment_name = self.t_groups[0]
p = (
{treatment_name: convert_pd_to_np(p[0])},
{treatment_name: convert_pd_to_np(p[1])},
)
elif isinstance(p[0], dict):
p = (
{
treatment_name: convert_pd_to_np(_p)
for treatment_name, _p in p[0].items()
},
{
treatment_name: convert_pd_to_np(_p)
for treatment_name, _p in p[1].items()
},
)
if pZ is None:
pZ = self.propensity_assign
te = self.predict(
X, treatment=treatment, y=y, return_components=return_components
)
if not return_ci:
return te
else:
t_groups_global = self.t_groups
_classes_global = self._classes
models_mu_c_global = deepcopy(self.models_mu_c)
models_mu_t_global = deepcopy(self.models_mu_t)
models_tau_global = deepcopy(self.models_tau)
te_bootstraps = np.zeros(
shape=(X.shape[0], self.t_groups.shape[0], n_bootstraps)
)
logger.info("Bootstrap Confidence Intervals")
for i in tqdm(range(n_bootstraps)):
te_b = self.bootstrap(
X, assignment, treatment, y, p, pZ, size=bootstrap_size, seed=seed
)
te_bootstraps[:, :, i] = te_b
te_lower = np.percentile(te_bootstraps, (self.ate_alpha / 2) * 100, axis=2)
te_upper = np.percentile(
te_bootstraps, (1 - self.ate_alpha / 2) * 100, axis=2
)
# set member variables back to global (currently last bootstrapped outcome)
self.t_groups = t_groups_global
self._classes = _classes_global
self.models_mu_c = deepcopy(models_mu_c_global)
self.models_mu_t = deepcopy(models_mu_t_global)
self.models_tau = deepcopy(models_tau_global)
return (te, te_lower, te_upper) | [
"def",
"fit_predict",
"(",
"self",
",",
"X",
",",
"assignment",
",",
"treatment",
",",
"y",
",",
"p",
"=",
"None",
",",
"pZ",
"=",
"None",
",",
"return_ci",
"=",
"False",
",",
"n_bootstraps",
"=",
"1000",
",",
"bootstrap_size",
"=",
"10000",
",",
"return_components",
"=",
"False",
",",
"verbose",
"=",
"True",
",",
"seed",
"=",
"None",
",",
"calibrate",
"=",
"True",
",",
")",
":",
"X",
",",
"assignment",
",",
"treatment",
",",
"y",
"=",
"convert_pd_to_np",
"(",
"X",
",",
"assignment",
",",
"treatment",
",",
"y",
")",
"self",
".",
"fit",
"(",
"X",
",",
"assignment",
",",
"treatment",
",",
"y",
",",
"p",
",",
"seed",
",",
"calibrate",
")",
"if",
"p",
"is",
"None",
":",
"p",
"=",
"(",
"self",
".",
"propensity_0",
",",
"self",
".",
"propensity_1",
")",
"else",
":",
"check_p_conditions",
"(",
"p",
"[",
"0",
"]",
",",
"self",
".",
"t_groups",
")",
"check_p_conditions",
"(",
"p",
"[",
"1",
"]",
",",
"self",
".",
"t_groups",
")",
"if",
"isinstance",
"(",
"p",
"[",
"0",
"]",
",",
"(",
"np",
".",
"ndarray",
",",
"pd",
".",
"Series",
")",
")",
":",
"treatment_name",
"=",
"self",
".",
"t_groups",
"[",
"0",
"]",
"p",
"=",
"(",
"{",
"treatment_name",
":",
"convert_pd_to_np",
"(",
"p",
"[",
"0",
"]",
")",
"}",
",",
"{",
"treatment_name",
":",
"convert_pd_to_np",
"(",
"p",
"[",
"1",
"]",
")",
"}",
",",
")",
"elif",
"isinstance",
"(",
"p",
"[",
"0",
"]",
",",
"dict",
")",
":",
"p",
"=",
"(",
"{",
"treatment_name",
":",
"convert_pd_to_np",
"(",
"_p",
")",
"for",
"treatment_name",
",",
"_p",
"in",
"p",
"[",
"0",
"]",
".",
"items",
"(",
")",
"}",
",",
"{",
"treatment_name",
":",
"convert_pd_to_np",
"(",
"_p",
")",
"for",
"treatment_name",
",",
"_p",
"in",
"p",
"[",
"1",
"]",
".",
"items",
"(",
")",
"}",
",",
")",
"if",
"pZ",
"is",
"None",
":",
"pZ",
"=",
"self",
".",
"propensity_assign",
"te",
"=",
"self",
".",
"predict",
"(",
"X",
",",
"treatment",
"=",
"treatment",
",",
"y",
"=",
"y",
",",
"return_components",
"=",
"return_components",
")",
"if",
"not",
"return_ci",
":",
"return",
"te",
"else",
":",
"t_groups_global",
"=",
"self",
".",
"t_groups",
"_classes_global",
"=",
"self",
".",
"_classes",
"models_mu_c_global",
"=",
"deepcopy",
"(",
"self",
".",
"models_mu_c",
")",
"models_mu_t_global",
"=",
"deepcopy",
"(",
"self",
".",
"models_mu_t",
")",
"models_tau_global",
"=",
"deepcopy",
"(",
"self",
".",
"models_tau",
")",
"te_bootstraps",
"=",
"np",
".",
"zeros",
"(",
"shape",
"=",
"(",
"X",
".",
"shape",
"[",
"0",
"]",
",",
"self",
".",
"t_groups",
".",
"shape",
"[",
"0",
"]",
",",
"n_bootstraps",
")",
")",
"logger",
".",
"info",
"(",
"\"Bootstrap Confidence Intervals\"",
")",
"for",
"i",
"in",
"tqdm",
"(",
"range",
"(",
"n_bootstraps",
")",
")",
":",
"te_b",
"=",
"self",
".",
"bootstrap",
"(",
"X",
",",
"assignment",
",",
"treatment",
",",
"y",
",",
"p",
",",
"pZ",
",",
"size",
"=",
"bootstrap_size",
",",
"seed",
"=",
"seed",
")",
"te_bootstraps",
"[",
":",
",",
":",
",",
"i",
"]",
"=",
"te_b",
"te_lower",
"=",
"np",
".",
"percentile",
"(",
"te_bootstraps",
",",
"(",
"self",
".",
"ate_alpha",
"/",
"2",
")",
"*",
"100",
",",
"axis",
"=",
"2",
")",
"te_upper",
"=",
"np",
".",
"percentile",
"(",
"te_bootstraps",
",",
"(",
"1",
"-",
"self",
".",
"ate_alpha",
"/",
"2",
")",
"*",
"100",
",",
"axis",
"=",
"2",
")",
"# set member variables back to global (currently last bootstrapped outcome)",
"self",
".",
"t_groups",
"=",
"t_groups_global",
"self",
".",
"_classes",
"=",
"_classes_global",
"self",
".",
"models_mu_c",
"=",
"deepcopy",
"(",
"models_mu_c_global",
")",
"self",
".",
"models_mu_t",
"=",
"deepcopy",
"(",
"models_mu_t_global",
")",
"self",
".",
"models_tau",
"=",
"deepcopy",
"(",
"models_tau_global",
")",
"return",
"(",
"te",
",",
"te_lower",
",",
"te_upper",
")"
] | [
316,
4
] | [
425,
43
] | python | en | ['en', 'en', 'en'] | True |
BaseDRIVLearner.estimate_ate | (
self,
X,
assignment,
treatment,
y,
p=None,
pz=None,
bootstrap_ci=False,
n_bootstraps=1000,
bootstrap_size=10000,
seed=None,
calibrate=True,
) | Estimate the Average Treatment Effect (ATE) for compliers.
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
assignment (np.array or pd.Series): an assignment vector. The assignment is the
instrumental variable that does not depend on unknown confounders. The assignment status
influences treatment in a monotonic way, i.e. one can only be more likely to take the
treatment if assigned.
treatment (np.array or pd.Series): a treatment vector
y (np.array or pd.Series): an outcome vector
p (2-tuple of np.ndarray or pd.Series or dict, optional): The first (second) element corresponds to
unassigned (assigned) units. Each is an array of propensity scores of float (0,1) in the single-treatment
case; or, a dictionary of treatment groups that map to propensity vectors of float (0,1). If None will run
ElasticNetPropensityModel() to generate the propensity scores.
pZ (np.array or pd.Series, optional): an array of assignment probability of float (0,1); if None
will run ElasticNetPropensityModel() to generate the assignment probability score.
bootstrap_ci (bool): whether run bootstrap for confidence intervals
n_bootstraps (int): number of bootstrap iterations
bootstrap_size (int): number of samples per bootstrap
seed (int): random seed for cross-fitting
Returns:
The mean and confidence interval (LB, UB) of the ATE estimate.
| Estimate the Average Treatment Effect (ATE) for compliers. | def estimate_ate(
self,
X,
assignment,
treatment,
y,
p=None,
pz=None,
bootstrap_ci=False,
n_bootstraps=1000,
bootstrap_size=10000,
seed=None,
calibrate=True,
):
"""Estimate the Average Treatment Effect (ATE) for compliers.
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
assignment (np.array or pd.Series): an assignment vector. The assignment is the
instrumental variable that does not depend on unknown confounders. The assignment status
influences treatment in a monotonic way, i.e. one can only be more likely to take the
treatment if assigned.
treatment (np.array or pd.Series): a treatment vector
y (np.array or pd.Series): an outcome vector
p (2-tuple of np.ndarray or pd.Series or dict, optional): The first (second) element corresponds to
unassigned (assigned) units. Each is an array of propensity scores of float (0,1) in the single-treatment
case; or, a dictionary of treatment groups that map to propensity vectors of float (0,1). If None will run
ElasticNetPropensityModel() to generate the propensity scores.
pZ (np.array or pd.Series, optional): an array of assignment probability of float (0,1); if None
will run ElasticNetPropensityModel() to generate the assignment probability score.
bootstrap_ci (bool): whether run bootstrap for confidence intervals
n_bootstraps (int): number of bootstrap iterations
bootstrap_size (int): number of samples per bootstrap
seed (int): random seed for cross-fitting
Returns:
The mean and confidence interval (LB, UB) of the ATE estimate.
"""
te, yhat_cs, yhat_ts = self.fit_predict(
X,
assignment,
treatment,
y,
p,
return_components=True,
seed=seed,
calibrate=calibrate,
)
X, assignment, treatment, y = convert_pd_to_np(X, assignment, treatment, y)
if p is None:
p = (self.propensity_0, self.propensity_1)
else:
check_p_conditions(p[0], self.t_groups)
check_p_conditions(p[1], self.t_groups)
if isinstance(p[0], (np.ndarray, pd.Series)):
treatment_name = self.t_groups[0]
p = (
{treatment_name: convert_pd_to_np(p[0])},
{treatment_name: convert_pd_to_np(p[1])},
)
elif isinstance(p[0], dict):
p = (
{
treatment_name: convert_pd_to_np(_p)
for treatment_name, _p in p[0].items()
},
{
treatment_name: convert_pd_to_np(_p)
for treatment_name, _p in p[1].items()
},
)
ate = np.zeros(self.t_groups.shape[0])
ate_lb = np.zeros(self.t_groups.shape[0])
ate_ub = np.zeros(self.t_groups.shape[0])
for i, group in enumerate(self.t_groups):
_ate = te[:, i].mean()
mask = (treatment == group) | (treatment == self.control_name)
mask_1, mask_0 = mask & (assignment == 1), mask & (assignment == 0)
Gamma = (treatment[mask_1] == group).mean() - (
treatment[mask_0] == group
).mean()
y_filt_1, y_filt_0 = y[mask_1], y[mask_0]
yhat_0 = yhat_cs[group][mask_0]
yhat_1 = yhat_ts[group][mask_1]
treatment_filt_1, treatment_filt_0 = treatment[mask_1], treatment[mask_0]
prob_treatment_1, prob_treatment_0 = (
p[1][group][mask_1],
p[0][group][mask_0],
)
w = (assignment[mask]).mean()
part_1 = (
(y_filt_1 - yhat_1).var()
+ _ate ** 2 * (treatment_filt_1 - prob_treatment_1).var()
- 2
* _ate
* (y_filt_1 * treatment_filt_1 - yhat_1 * prob_treatment_1).mean()
)
part_0 = (
(y_filt_0 - yhat_0).var()
+ _ate ** 2 * (treatment_filt_0 - prob_treatment_0).var()
- 2
* _ate
* (y_filt_0 * treatment_filt_0 - yhat_0 * prob_treatment_0).mean()
)
part_2 = np.mean(
(
yhat_ts[group][mask]
- yhat_cs[group][mask]
- _ate * (p[1][group][mask] - p[0][group][mask])
)
** 2
)
# SE formula is based on the lower bound formula (9) from Frölich, Markus. 2006.
# "Nonparametric IV estimation of local average treatment effects wth covariates."
# Journal of Econometrics.
se = np.sqrt((part_1 / w + part_2 / (1 - w)) + part_2) / Gamma
_ate_lb = _ate - se * norm.ppf(1 - self.ate_alpha / 2)
_ate_ub = _ate + se * norm.ppf(1 - self.ate_alpha / 2)
ate[i] = _ate
ate_lb[i] = _ate_lb
ate_ub[i] = _ate_ub
if not bootstrap_ci:
return ate, ate_lb, ate_ub
else:
t_groups_global = self.t_groups
_classes_global = self._classes
models_mu_c_global = deepcopy(self.models_mu_c)
models_mu_t_global = deepcopy(self.models_mu_t)
models_tau_global = deepcopy(self.models_tau)
logger.info("Bootstrap Confidence Intervals for ATE")
ate_bootstraps = np.zeros(shape=(self.t_groups.shape[0], n_bootstraps))
for n in tqdm(range(n_bootstraps)):
cate_b = self.bootstrap(
X, assignment, treatment, y, p, pZ, size=bootstrap_size, seed=seed
)
ate_bootstraps[:, n] = cate_b.mean()
ate_lower = np.percentile(
ate_bootstraps, (self.ate_alpha / 2) * 100, axis=1
)
ate_upper = np.percentile(
ate_bootstraps, (1 - self.ate_alpha / 2) * 100, axis=1
)
# set member variables back to global (currently last bootstrapped outcome)
self.t_groups = t_groups_global
self._classes = _classes_global
self.models_mu_c = deepcopy(models_mu_c_global)
self.models_mu_t = deepcopy(models_mu_t_global)
self.models_tau = deepcopy(models_tau_global)
return ate, ate_lower, ate_upper | [
"def",
"estimate_ate",
"(",
"self",
",",
"X",
",",
"assignment",
",",
"treatment",
",",
"y",
",",
"p",
"=",
"None",
",",
"pz",
"=",
"None",
",",
"bootstrap_ci",
"=",
"False",
",",
"n_bootstraps",
"=",
"1000",
",",
"bootstrap_size",
"=",
"10000",
",",
"seed",
"=",
"None",
",",
"calibrate",
"=",
"True",
",",
")",
":",
"te",
",",
"yhat_cs",
",",
"yhat_ts",
"=",
"self",
".",
"fit_predict",
"(",
"X",
",",
"assignment",
",",
"treatment",
",",
"y",
",",
"p",
",",
"return_components",
"=",
"True",
",",
"seed",
"=",
"seed",
",",
"calibrate",
"=",
"calibrate",
",",
")",
"X",
",",
"assignment",
",",
"treatment",
",",
"y",
"=",
"convert_pd_to_np",
"(",
"X",
",",
"assignment",
",",
"treatment",
",",
"y",
")",
"if",
"p",
"is",
"None",
":",
"p",
"=",
"(",
"self",
".",
"propensity_0",
",",
"self",
".",
"propensity_1",
")",
"else",
":",
"check_p_conditions",
"(",
"p",
"[",
"0",
"]",
",",
"self",
".",
"t_groups",
")",
"check_p_conditions",
"(",
"p",
"[",
"1",
"]",
",",
"self",
".",
"t_groups",
")",
"if",
"isinstance",
"(",
"p",
"[",
"0",
"]",
",",
"(",
"np",
".",
"ndarray",
",",
"pd",
".",
"Series",
")",
")",
":",
"treatment_name",
"=",
"self",
".",
"t_groups",
"[",
"0",
"]",
"p",
"=",
"(",
"{",
"treatment_name",
":",
"convert_pd_to_np",
"(",
"p",
"[",
"0",
"]",
")",
"}",
",",
"{",
"treatment_name",
":",
"convert_pd_to_np",
"(",
"p",
"[",
"1",
"]",
")",
"}",
",",
")",
"elif",
"isinstance",
"(",
"p",
"[",
"0",
"]",
",",
"dict",
")",
":",
"p",
"=",
"(",
"{",
"treatment_name",
":",
"convert_pd_to_np",
"(",
"_p",
")",
"for",
"treatment_name",
",",
"_p",
"in",
"p",
"[",
"0",
"]",
".",
"items",
"(",
")",
"}",
",",
"{",
"treatment_name",
":",
"convert_pd_to_np",
"(",
"_p",
")",
"for",
"treatment_name",
",",
"_p",
"in",
"p",
"[",
"1",
"]",
".",
"items",
"(",
")",
"}",
",",
")",
"ate",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"t_groups",
".",
"shape",
"[",
"0",
"]",
")",
"ate_lb",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"t_groups",
".",
"shape",
"[",
"0",
"]",
")",
"ate_ub",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"t_groups",
".",
"shape",
"[",
"0",
"]",
")",
"for",
"i",
",",
"group",
"in",
"enumerate",
"(",
"self",
".",
"t_groups",
")",
":",
"_ate",
"=",
"te",
"[",
":",
",",
"i",
"]",
".",
"mean",
"(",
")",
"mask",
"=",
"(",
"treatment",
"==",
"group",
")",
"|",
"(",
"treatment",
"==",
"self",
".",
"control_name",
")",
"mask_1",
",",
"mask_0",
"=",
"mask",
"&",
"(",
"assignment",
"==",
"1",
")",
",",
"mask",
"&",
"(",
"assignment",
"==",
"0",
")",
"Gamma",
"=",
"(",
"treatment",
"[",
"mask_1",
"]",
"==",
"group",
")",
".",
"mean",
"(",
")",
"-",
"(",
"treatment",
"[",
"mask_0",
"]",
"==",
"group",
")",
".",
"mean",
"(",
")",
"y_filt_1",
",",
"y_filt_0",
"=",
"y",
"[",
"mask_1",
"]",
",",
"y",
"[",
"mask_0",
"]",
"yhat_0",
"=",
"yhat_cs",
"[",
"group",
"]",
"[",
"mask_0",
"]",
"yhat_1",
"=",
"yhat_ts",
"[",
"group",
"]",
"[",
"mask_1",
"]",
"treatment_filt_1",
",",
"treatment_filt_0",
"=",
"treatment",
"[",
"mask_1",
"]",
",",
"treatment",
"[",
"mask_0",
"]",
"prob_treatment_1",
",",
"prob_treatment_0",
"=",
"(",
"p",
"[",
"1",
"]",
"[",
"group",
"]",
"[",
"mask_1",
"]",
",",
"p",
"[",
"0",
"]",
"[",
"group",
"]",
"[",
"mask_0",
"]",
",",
")",
"w",
"=",
"(",
"assignment",
"[",
"mask",
"]",
")",
".",
"mean",
"(",
")",
"part_1",
"=",
"(",
"(",
"y_filt_1",
"-",
"yhat_1",
")",
".",
"var",
"(",
")",
"+",
"_ate",
"**",
"2",
"*",
"(",
"treatment_filt_1",
"-",
"prob_treatment_1",
")",
".",
"var",
"(",
")",
"-",
"2",
"*",
"_ate",
"*",
"(",
"y_filt_1",
"*",
"treatment_filt_1",
"-",
"yhat_1",
"*",
"prob_treatment_1",
")",
".",
"mean",
"(",
")",
")",
"part_0",
"=",
"(",
"(",
"y_filt_0",
"-",
"yhat_0",
")",
".",
"var",
"(",
")",
"+",
"_ate",
"**",
"2",
"*",
"(",
"treatment_filt_0",
"-",
"prob_treatment_0",
")",
".",
"var",
"(",
")",
"-",
"2",
"*",
"_ate",
"*",
"(",
"y_filt_0",
"*",
"treatment_filt_0",
"-",
"yhat_0",
"*",
"prob_treatment_0",
")",
".",
"mean",
"(",
")",
")",
"part_2",
"=",
"np",
".",
"mean",
"(",
"(",
"yhat_ts",
"[",
"group",
"]",
"[",
"mask",
"]",
"-",
"yhat_cs",
"[",
"group",
"]",
"[",
"mask",
"]",
"-",
"_ate",
"*",
"(",
"p",
"[",
"1",
"]",
"[",
"group",
"]",
"[",
"mask",
"]",
"-",
"p",
"[",
"0",
"]",
"[",
"group",
"]",
"[",
"mask",
"]",
")",
")",
"**",
"2",
")",
"# SE formula is based on the lower bound formula (9) from Frölich, Markus. 2006.",
"# \"Nonparametric IV estimation of local average treatment effects wth covariates.\"",
"# Journal of Econometrics.",
"se",
"=",
"np",
".",
"sqrt",
"(",
"(",
"part_1",
"/",
"w",
"+",
"part_2",
"/",
"(",
"1",
"-",
"w",
")",
")",
"+",
"part_2",
")",
"/",
"Gamma",
"_ate_lb",
"=",
"_ate",
"-",
"se",
"*",
"norm",
".",
"ppf",
"(",
"1",
"-",
"self",
".",
"ate_alpha",
"/",
"2",
")",
"_ate_ub",
"=",
"_ate",
"+",
"se",
"*",
"norm",
".",
"ppf",
"(",
"1",
"-",
"self",
".",
"ate_alpha",
"/",
"2",
")",
"ate",
"[",
"i",
"]",
"=",
"_ate",
"ate_lb",
"[",
"i",
"]",
"=",
"_ate_lb",
"ate_ub",
"[",
"i",
"]",
"=",
"_ate_ub",
"if",
"not",
"bootstrap_ci",
":",
"return",
"ate",
",",
"ate_lb",
",",
"ate_ub",
"else",
":",
"t_groups_global",
"=",
"self",
".",
"t_groups",
"_classes_global",
"=",
"self",
".",
"_classes",
"models_mu_c_global",
"=",
"deepcopy",
"(",
"self",
".",
"models_mu_c",
")",
"models_mu_t_global",
"=",
"deepcopy",
"(",
"self",
".",
"models_mu_t",
")",
"models_tau_global",
"=",
"deepcopy",
"(",
"self",
".",
"models_tau",
")",
"logger",
".",
"info",
"(",
"\"Bootstrap Confidence Intervals for ATE\"",
")",
"ate_bootstraps",
"=",
"np",
".",
"zeros",
"(",
"shape",
"=",
"(",
"self",
".",
"t_groups",
".",
"shape",
"[",
"0",
"]",
",",
"n_bootstraps",
")",
")",
"for",
"n",
"in",
"tqdm",
"(",
"range",
"(",
"n_bootstraps",
")",
")",
":",
"cate_b",
"=",
"self",
".",
"bootstrap",
"(",
"X",
",",
"assignment",
",",
"treatment",
",",
"y",
",",
"p",
",",
"pZ",
",",
"size",
"=",
"bootstrap_size",
",",
"seed",
"=",
"seed",
")",
"ate_bootstraps",
"[",
":",
",",
"n",
"]",
"=",
"cate_b",
".",
"mean",
"(",
")",
"ate_lower",
"=",
"np",
".",
"percentile",
"(",
"ate_bootstraps",
",",
"(",
"self",
".",
"ate_alpha",
"/",
"2",
")",
"*",
"100",
",",
"axis",
"=",
"1",
")",
"ate_upper",
"=",
"np",
".",
"percentile",
"(",
"ate_bootstraps",
",",
"(",
"1",
"-",
"self",
".",
"ate_alpha",
"/",
"2",
")",
"*",
"100",
",",
"axis",
"=",
"1",
")",
"# set member variables back to global (currently last bootstrapped outcome)",
"self",
".",
"t_groups",
"=",
"t_groups_global",
"self",
".",
"_classes",
"=",
"_classes_global",
"self",
".",
"models_mu_c",
"=",
"deepcopy",
"(",
"models_mu_c_global",
")",
"self",
".",
"models_mu_t",
"=",
"deepcopy",
"(",
"models_mu_t_global",
")",
"self",
".",
"models_tau",
"=",
"deepcopy",
"(",
"models_tau_global",
")",
"return",
"ate",
",",
"ate_lower",
",",
"ate_upper"
] | [
427,
4
] | [
589,
44
] | python | en | ['en', 'en', 'en'] | True |
BaseDRIVLearner.bootstrap | (self, X, assignment, treatment, y, p, pZ, size=10000, seed=None) | Runs a single bootstrap. Fits on bootstrapped sample, then predicts on whole population. | Runs a single bootstrap. Fits on bootstrapped sample, then predicts on whole population. | def bootstrap(self, X, assignment, treatment, y, p, pZ, size=10000, seed=None):
"""Runs a single bootstrap. Fits on bootstrapped sample, then predicts on whole population."""
idxs = np.random.choice(np.arange(0, X.shape[0]), size=size)
X_b = X[idxs]
if isinstance(p[0], (np.ndarray, pd.Series)):
p0_b = {self.t_groups[0]: convert_pd_to_np(p[0][idxs])}
else:
p0_b = {g: prop[idxs] for g, prop in p[0].items()}
if isinstance(p[1], (np.ndarray, pd.Series)):
p1_b = {self.t_groups[0]: convert_pd_to_np(p[1][idxs])}
else:
p1_b = {g: prop[idxs] for g, prop in p[1].items()}
pZ_b = pZ[idxs]
assignment_b = assignment[idxs]
treatment_b = treatment[idxs]
y_b = y[idxs]
self.fit(
X=X_b,
assignment=assignment_b,
treatment=treatment_b,
y=y_b,
p=(p0_b, p1_b),
pZ=pZ_b,
seed=seed,
)
te_b = self.predict(X=X)
return te_b | [
"def",
"bootstrap",
"(",
"self",
",",
"X",
",",
"assignment",
",",
"treatment",
",",
"y",
",",
"p",
",",
"pZ",
",",
"size",
"=",
"10000",
",",
"seed",
"=",
"None",
")",
":",
"idxs",
"=",
"np",
".",
"random",
".",
"choice",
"(",
"np",
".",
"arange",
"(",
"0",
",",
"X",
".",
"shape",
"[",
"0",
"]",
")",
",",
"size",
"=",
"size",
")",
"X_b",
"=",
"X",
"[",
"idxs",
"]",
"if",
"isinstance",
"(",
"p",
"[",
"0",
"]",
",",
"(",
"np",
".",
"ndarray",
",",
"pd",
".",
"Series",
")",
")",
":",
"p0_b",
"=",
"{",
"self",
".",
"t_groups",
"[",
"0",
"]",
":",
"convert_pd_to_np",
"(",
"p",
"[",
"0",
"]",
"[",
"idxs",
"]",
")",
"}",
"else",
":",
"p0_b",
"=",
"{",
"g",
":",
"prop",
"[",
"idxs",
"]",
"for",
"g",
",",
"prop",
"in",
"p",
"[",
"0",
"]",
".",
"items",
"(",
")",
"}",
"if",
"isinstance",
"(",
"p",
"[",
"1",
"]",
",",
"(",
"np",
".",
"ndarray",
",",
"pd",
".",
"Series",
")",
")",
":",
"p1_b",
"=",
"{",
"self",
".",
"t_groups",
"[",
"0",
"]",
":",
"convert_pd_to_np",
"(",
"p",
"[",
"1",
"]",
"[",
"idxs",
"]",
")",
"}",
"else",
":",
"p1_b",
"=",
"{",
"g",
":",
"prop",
"[",
"idxs",
"]",
"for",
"g",
",",
"prop",
"in",
"p",
"[",
"1",
"]",
".",
"items",
"(",
")",
"}",
"pZ_b",
"=",
"pZ",
"[",
"idxs",
"]",
"assignment_b",
"=",
"assignment",
"[",
"idxs",
"]",
"treatment_b",
"=",
"treatment",
"[",
"idxs",
"]",
"y_b",
"=",
"y",
"[",
"idxs",
"]",
"self",
".",
"fit",
"(",
"X",
"=",
"X_b",
",",
"assignment",
"=",
"assignment_b",
",",
"treatment",
"=",
"treatment_b",
",",
"y",
"=",
"y_b",
",",
"p",
"=",
"(",
"p0_b",
",",
"p1_b",
")",
",",
"pZ",
"=",
"pZ_b",
",",
"seed",
"=",
"seed",
",",
")",
"te_b",
"=",
"self",
".",
"predict",
"(",
"X",
"=",
"X",
")",
"return",
"te_b"
] | [
591,
4
] | [
619,
19
] | python | en | ['en', 'en', 'en'] | True |
BaseDRIVLearner.get_importance | (
self,
X=None,
tau=None,
model_tau_feature=None,
features=None,
method="auto",
normalize=True,
test_size=0.3,
random_state=None,
) |
Builds a model (using X to predict estimated/actual tau), and then calculates feature importances
based on a specified method.
Currently supported methods are:
- auto (calculates importance based on estimator's default implementation of feature importance;
estimator must be tree-based)
Note: if none provided, it uses lightgbm's LGBMRegressor as estimator, and "gain" as
importance type
- permutation (calculates importance based on mean decrease in accuracy when a feature column is permuted;
estimator can be any form)
Hint: for permutation, downsample data for better performance especially if X.shape[1] is large
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
tau (np.array): a treatment effect vector (estimated/actual)
model_tau_feature (sklearn/lightgbm/xgboost model object): an unfitted model object
features (np.array): list/array of feature names. If None, an enumerated list will be used
method (str): auto, permutation
normalize (bool): normalize by sum of importances if method=auto (defaults to True)
test_size (float/int): if float, represents the proportion of the dataset to include in the test split.
If int, represents the absolute number of test samples (used for estimating
permutation importance)
random_state (int/RandomState instance/None): random state used in permutation importance estimation
|
Builds a model (using X to predict estimated/actual tau), and then calculates feature importances
based on a specified method. | def get_importance(
self,
X=None,
tau=None,
model_tau_feature=None,
features=None,
method="auto",
normalize=True,
test_size=0.3,
random_state=None,
):
"""
Builds a model (using X to predict estimated/actual tau), and then calculates feature importances
based on a specified method.
Currently supported methods are:
- auto (calculates importance based on estimator's default implementation of feature importance;
estimator must be tree-based)
Note: if none provided, it uses lightgbm's LGBMRegressor as estimator, and "gain" as
importance type
- permutation (calculates importance based on mean decrease in accuracy when a feature column is permuted;
estimator can be any form)
Hint: for permutation, downsample data for better performance especially if X.shape[1] is large
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
tau (np.array): a treatment effect vector (estimated/actual)
model_tau_feature (sklearn/lightgbm/xgboost model object): an unfitted model object
features (np.array): list/array of feature names. If None, an enumerated list will be used
method (str): auto, permutation
normalize (bool): normalize by sum of importances if method=auto (defaults to True)
test_size (float/int): if float, represents the proportion of the dataset to include in the test split.
If int, represents the absolute number of test samples (used for estimating
permutation importance)
random_state (int/RandomState instance/None): random state used in permutation importance estimation
"""
explainer = Explainer(
method=method,
control_name=self.control_name,
X=X,
tau=tau,
model_tau=model_tau_feature,
features=features,
classes=self._classes,
normalize=normalize,
test_size=test_size,
random_state=random_state,
)
return explainer.get_importance() | [
"def",
"get_importance",
"(",
"self",
",",
"X",
"=",
"None",
",",
"tau",
"=",
"None",
",",
"model_tau_feature",
"=",
"None",
",",
"features",
"=",
"None",
",",
"method",
"=",
"\"auto\"",
",",
"normalize",
"=",
"True",
",",
"test_size",
"=",
"0.3",
",",
"random_state",
"=",
"None",
",",
")",
":",
"explainer",
"=",
"Explainer",
"(",
"method",
"=",
"method",
",",
"control_name",
"=",
"self",
".",
"control_name",
",",
"X",
"=",
"X",
",",
"tau",
"=",
"tau",
",",
"model_tau",
"=",
"model_tau_feature",
",",
"features",
"=",
"features",
",",
"classes",
"=",
"self",
".",
"_classes",
",",
"normalize",
"=",
"normalize",
",",
"test_size",
"=",
"test_size",
",",
"random_state",
"=",
"random_state",
",",
")",
"return",
"explainer",
".",
"get_importance",
"(",
")"
] | [
621,
4
] | [
669,
41
] | python | en | ['en', 'error', 'th'] | False |
BaseDRIVLearner.get_shap_values | (self, X=None, model_tau_feature=None, tau=None, features=None) |
Builds a model (using X to predict estimated/actual tau), and then calculates shapley values.
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
tau (np.array): a treatment effect vector (estimated/actual)
model_tau_feature (sklearn/lightgbm/xgboost model object): an unfitted model object
features (optional, np.array): list/array of feature names. If None, an enumerated list will be used.
|
Builds a model (using X to predict estimated/actual tau), and then calculates shapley values.
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
tau (np.array): a treatment effect vector (estimated/actual)
model_tau_feature (sklearn/lightgbm/xgboost model object): an unfitted model object
features (optional, np.array): list/array of feature names. If None, an enumerated list will be used.
| def get_shap_values(self, X=None, model_tau_feature=None, tau=None, features=None):
"""
Builds a model (using X to predict estimated/actual tau), and then calculates shapley values.
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
tau (np.array): a treatment effect vector (estimated/actual)
model_tau_feature (sklearn/lightgbm/xgboost model object): an unfitted model object
features (optional, np.array): list/array of feature names. If None, an enumerated list will be used.
"""
explainer = Explainer(
method="shapley",
control_name=self.control_name,
X=X,
tau=tau,
model_tau=model_tau_feature,
features=features,
classes=self._classes,
)
return explainer.get_shap_values() | [
"def",
"get_shap_values",
"(",
"self",
",",
"X",
"=",
"None",
",",
"model_tau_feature",
"=",
"None",
",",
"tau",
"=",
"None",
",",
"features",
"=",
"None",
")",
":",
"explainer",
"=",
"Explainer",
"(",
"method",
"=",
"\"shapley\"",
",",
"control_name",
"=",
"self",
".",
"control_name",
",",
"X",
"=",
"X",
",",
"tau",
"=",
"tau",
",",
"model_tau",
"=",
"model_tau_feature",
",",
"features",
"=",
"features",
",",
"classes",
"=",
"self",
".",
"_classes",
",",
")",
"return",
"explainer",
".",
"get_shap_values",
"(",
")"
] | [
671,
4
] | [
689,
42
] | python | en | ['en', 'error', 'th'] | False |
BaseDRIVLearner.plot_importance | (
self,
X=None,
tau=None,
model_tau_feature=None,
features=None,
method="auto",
normalize=True,
test_size=0.3,
random_state=None,
) |
Builds a model (using X to predict estimated/actual tau), and then plots feature importances
based on a specified method.
Currently supported methods are:
- auto (calculates importance based on estimator's default implementation of feature importance;
estimator must be tree-based)
Note: if none provided, it uses lightgbm's LGBMRegressor as estimator, and "gain" as
importance type
- permutation (calculates importance based on mean decrease in accuracy when a feature column is permuted;
estimator can be any form)
Hint: for permutation, downsample data for better performance especially if X.shape[1] is large
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
tau (np.array): a treatment effect vector (estimated/actual)
model_tau_feature (sklearn/lightgbm/xgboost model object): an unfitted model object
features (optional, np.array): list/array of feature names. If None, an enumerated list will be used
method (str): auto, permutation
normalize (bool): normalize by sum of importances if method=auto (defaults to True)
test_size (float/int): if float, represents the proportion of the dataset to include in the test split.
If int, represents the absolute number of test samples (used for estimating
permutation importance)
random_state (int/RandomState instance/None): random state used in permutation importance estimation
|
Builds a model (using X to predict estimated/actual tau), and then plots feature importances
based on a specified method. | def plot_importance(
self,
X=None,
tau=None,
model_tau_feature=None,
features=None,
method="auto",
normalize=True,
test_size=0.3,
random_state=None,
):
"""
Builds a model (using X to predict estimated/actual tau), and then plots feature importances
based on a specified method.
Currently supported methods are:
- auto (calculates importance based on estimator's default implementation of feature importance;
estimator must be tree-based)
Note: if none provided, it uses lightgbm's LGBMRegressor as estimator, and "gain" as
importance type
- permutation (calculates importance based on mean decrease in accuracy when a feature column is permuted;
estimator can be any form)
Hint: for permutation, downsample data for better performance especially if X.shape[1] is large
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
tau (np.array): a treatment effect vector (estimated/actual)
model_tau_feature (sklearn/lightgbm/xgboost model object): an unfitted model object
features (optional, np.array): list/array of feature names. If None, an enumerated list will be used
method (str): auto, permutation
normalize (bool): normalize by sum of importances if method=auto (defaults to True)
test_size (float/int): if float, represents the proportion of the dataset to include in the test split.
If int, represents the absolute number of test samples (used for estimating
permutation importance)
random_state (int/RandomState instance/None): random state used in permutation importance estimation
"""
explainer = Explainer(
method=method,
control_name=self.control_name,
X=X,
tau=tau,
model_tau=model_tau_feature,
features=features,
classes=self._classes,
normalize=normalize,
test_size=test_size,
random_state=random_state,
)
explainer.plot_importance() | [
"def",
"plot_importance",
"(",
"self",
",",
"X",
"=",
"None",
",",
"tau",
"=",
"None",
",",
"model_tau_feature",
"=",
"None",
",",
"features",
"=",
"None",
",",
"method",
"=",
"\"auto\"",
",",
"normalize",
"=",
"True",
",",
"test_size",
"=",
"0.3",
",",
"random_state",
"=",
"None",
",",
")",
":",
"explainer",
"=",
"Explainer",
"(",
"method",
"=",
"method",
",",
"control_name",
"=",
"self",
".",
"control_name",
",",
"X",
"=",
"X",
",",
"tau",
"=",
"tau",
",",
"model_tau",
"=",
"model_tau_feature",
",",
"features",
"=",
"features",
",",
"classes",
"=",
"self",
".",
"_classes",
",",
"normalize",
"=",
"normalize",
",",
"test_size",
"=",
"test_size",
",",
"random_state",
"=",
"random_state",
",",
")",
"explainer",
".",
"plot_importance",
"(",
")"
] | [
691,
4
] | [
739,
35
] | python | en | ['en', 'error', 'th'] | False |
BaseDRIVLearner.plot_shap_values | (
self,
X=None,
tau=None,
model_tau_feature=None,
features=None,
shap_dict=None,
**kwargs
) |
Plots distribution of shapley values.
If shapley values have been pre-computed, pass it through the shap_dict parameter.
If shap_dict is not provided, this builds a new model (using X to predict estimated/actual tau),
and then calculates shapley values.
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix. Required if shap_dict is None.
tau (np.array): a treatment effect vector (estimated/actual)
model_tau_feature (sklearn/lightgbm/xgboost model object): an unfitted model object
features (optional, np.array): list/array of feature names. If None, an enumerated list will be used.
shap_dict (optional, dict): a dict of shapley value matrices. If None, shap_dict will be computed.
|
Plots distribution of shapley values. | def plot_shap_values(
self,
X=None,
tau=None,
model_tau_feature=None,
features=None,
shap_dict=None,
**kwargs
):
"""
Plots distribution of shapley values.
If shapley values have been pre-computed, pass it through the shap_dict parameter.
If shap_dict is not provided, this builds a new model (using X to predict estimated/actual tau),
and then calculates shapley values.
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix. Required if shap_dict is None.
tau (np.array): a treatment effect vector (estimated/actual)
model_tau_feature (sklearn/lightgbm/xgboost model object): an unfitted model object
features (optional, np.array): list/array of feature names. If None, an enumerated list will be used.
shap_dict (optional, dict): a dict of shapley value matrices. If None, shap_dict will be computed.
"""
override_checks = False if shap_dict is None else True
explainer = Explainer(
method="shapley",
control_name=self.control_name,
X=X,
tau=tau,
model_tau=model_tau_feature,
features=features,
override_checks=override_checks,
classes=self._classes,
)
explainer.plot_shap_values(shap_dict=shap_dict) | [
"def",
"plot_shap_values",
"(",
"self",
",",
"X",
"=",
"None",
",",
"tau",
"=",
"None",
",",
"model_tau_feature",
"=",
"None",
",",
"features",
"=",
"None",
",",
"shap_dict",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"override_checks",
"=",
"False",
"if",
"shap_dict",
"is",
"None",
"else",
"True",
"explainer",
"=",
"Explainer",
"(",
"method",
"=",
"\"shapley\"",
",",
"control_name",
"=",
"self",
".",
"control_name",
",",
"X",
"=",
"X",
",",
"tau",
"=",
"tau",
",",
"model_tau",
"=",
"model_tau_feature",
",",
"features",
"=",
"features",
",",
"override_checks",
"=",
"override_checks",
",",
"classes",
"=",
"self",
".",
"_classes",
",",
")",
"explainer",
".",
"plot_shap_values",
"(",
"shap_dict",
"=",
"shap_dict",
")"
] | [
741,
4
] | [
775,
55
] | python | en | ['en', 'error', 'th'] | False |
BaseDRIVLearner.plot_shap_dependence | (
self,
treatment_group,
feature_idx,
X,
tau,
model_tau_feature=None,
features=None,
shap_dict=None,
interaction_idx="auto",
**kwargs
) |
Plots dependency of shapley values for a specified feature, colored by an interaction feature.
If shapley values have been pre-computed, pass it through the shap_dict parameter.
If shap_dict is not provided, this builds a new model (using X to predict estimated/actual tau),
and then calculates shapley values.
This plots the value of the feature on the x-axis and the SHAP value of the same feature
on the y-axis. This shows how the model depends on the given feature, and is like a
richer extension of the classical partial dependence plots. Vertical dispersion of the
data points represents interaction effects.
Args:
treatment_group (str or int): name of treatment group to create dependency plot on
feature_idx (str or int): feature index / name to create dependency plot on
X (np.matrix or np.array or pd.Dataframe): a feature matrix
tau (np.array): a treatment effect vector (estimated/actual)
model_tau_feature (sklearn/lightgbm/xgboost model object): an unfitted model object
features (optional, np.array): list/array of feature names. If None, an enumerated list will be used.
shap_dict (optional, dict): a dict of shapley value matrices. If None, shap_dict will be computed.
interaction_idx (optional, str or int): feature index / name used in coloring scheme as interaction feature.
If "auto" then shap.common.approximate_interactions is used to pick what seems to be the
strongest interaction (note that to find to true strongest interaction you need to compute
the SHAP interaction values).
|
Plots dependency of shapley values for a specified feature, colored by an interaction feature. | def plot_shap_dependence(
self,
treatment_group,
feature_idx,
X,
tau,
model_tau_feature=None,
features=None,
shap_dict=None,
interaction_idx="auto",
**kwargs
):
"""
Plots dependency of shapley values for a specified feature, colored by an interaction feature.
If shapley values have been pre-computed, pass it through the shap_dict parameter.
If shap_dict is not provided, this builds a new model (using X to predict estimated/actual tau),
and then calculates shapley values.
This plots the value of the feature on the x-axis and the SHAP value of the same feature
on the y-axis. This shows how the model depends on the given feature, and is like a
richer extension of the classical partial dependence plots. Vertical dispersion of the
data points represents interaction effects.
Args:
treatment_group (str or int): name of treatment group to create dependency plot on
feature_idx (str or int): feature index / name to create dependency plot on
X (np.matrix or np.array or pd.Dataframe): a feature matrix
tau (np.array): a treatment effect vector (estimated/actual)
model_tau_feature (sklearn/lightgbm/xgboost model object): an unfitted model object
features (optional, np.array): list/array of feature names. If None, an enumerated list will be used.
shap_dict (optional, dict): a dict of shapley value matrices. If None, shap_dict will be computed.
interaction_idx (optional, str or int): feature index / name used in coloring scheme as interaction feature.
If "auto" then shap.common.approximate_interactions is used to pick what seems to be the
strongest interaction (note that to find to true strongest interaction you need to compute
the SHAP interaction values).
"""
override_checks = False if shap_dict is None else True
explainer = Explainer(
method="shapley",
control_name=self.control_name,
X=X,
tau=tau,
model_tau=model_tau_feature,
features=features,
override_checks=override_checks,
classes=self._classes,
)
explainer.plot_shap_dependence(
treatment_group=treatment_group,
feature_idx=feature_idx,
shap_dict=shap_dict,
interaction_idx=interaction_idx,
**kwargs
) | [
"def",
"plot_shap_dependence",
"(",
"self",
",",
"treatment_group",
",",
"feature_idx",
",",
"X",
",",
"tau",
",",
"model_tau_feature",
"=",
"None",
",",
"features",
"=",
"None",
",",
"shap_dict",
"=",
"None",
",",
"interaction_idx",
"=",
"\"auto\"",
",",
"*",
"*",
"kwargs",
")",
":",
"override_checks",
"=",
"False",
"if",
"shap_dict",
"is",
"None",
"else",
"True",
"explainer",
"=",
"Explainer",
"(",
"method",
"=",
"\"shapley\"",
",",
"control_name",
"=",
"self",
".",
"control_name",
",",
"X",
"=",
"X",
",",
"tau",
"=",
"tau",
",",
"model_tau",
"=",
"model_tau_feature",
",",
"features",
"=",
"features",
",",
"override_checks",
"=",
"override_checks",
",",
"classes",
"=",
"self",
".",
"_classes",
",",
")",
"explainer",
".",
"plot_shap_dependence",
"(",
"treatment_group",
"=",
"treatment_group",
",",
"feature_idx",
"=",
"feature_idx",
",",
"shap_dict",
"=",
"shap_dict",
",",
"interaction_idx",
"=",
"interaction_idx",
",",
"*",
"*",
"kwargs",
")"
] | [
777,
4
] | [
831,
9
] | python | en | ['en', 'error', 'th'] | False |
BaseDRIVRegressor.__init__ | (
self,
learner=None,
control_outcome_learner=None,
treatment_outcome_learner=None,
treatment_effect_learner=None,
ate_alpha=0.05,
control_name=0,
) | Initialize a DRIV-learner regressor.
Args:
learner (optional): a model to estimate outcomes and treatment effects in both the control and treatment
groups
control_outcome_learner (optional): a model to estimate outcomes in the control group
treatment_outcome_learner (optional): a model to estimate outcomes in the treatment group
control_effect_learner (optional): a model to estimate treatment effects in the control group
treatment_effect_learner (optional): a model to estimate treatment effects in the treatment group. It needs
to take `sample_weight` as an input argument in `fit()`.
ate_alpha (float, optional): the confidence level alpha of the ATE estimate
control_name (str or int, optional): name of control group
| Initialize a DRIV-learner regressor. | def __init__(
self,
learner=None,
control_outcome_learner=None,
treatment_outcome_learner=None,
treatment_effect_learner=None,
ate_alpha=0.05,
control_name=0,
):
"""Initialize a DRIV-learner regressor.
Args:
learner (optional): a model to estimate outcomes and treatment effects in both the control and treatment
groups
control_outcome_learner (optional): a model to estimate outcomes in the control group
treatment_outcome_learner (optional): a model to estimate outcomes in the treatment group
control_effect_learner (optional): a model to estimate treatment effects in the control group
treatment_effect_learner (optional): a model to estimate treatment effects in the treatment group. It needs
to take `sample_weight` as an input argument in `fit()`.
ate_alpha (float, optional): the confidence level alpha of the ATE estimate
control_name (str or int, optional): name of control group
"""
super().__init__(
learner=learner,
control_outcome_learner=control_outcome_learner,
treatment_outcome_learner=treatment_outcome_learner,
treatment_effect_learner=treatment_effect_learner,
ate_alpha=ate_alpha,
control_name=control_name,
) | [
"def",
"__init__",
"(",
"self",
",",
"learner",
"=",
"None",
",",
"control_outcome_learner",
"=",
"None",
",",
"treatment_outcome_learner",
"=",
"None",
",",
"treatment_effect_learner",
"=",
"None",
",",
"ate_alpha",
"=",
"0.05",
",",
"control_name",
"=",
"0",
",",
")",
":",
"super",
"(",
")",
".",
"__init__",
"(",
"learner",
"=",
"learner",
",",
"control_outcome_learner",
"=",
"control_outcome_learner",
",",
"treatment_outcome_learner",
"=",
"treatment_outcome_learner",
",",
"treatment_effect_learner",
"=",
"treatment_effect_learner",
",",
"ate_alpha",
"=",
"ate_alpha",
",",
"control_name",
"=",
"control_name",
",",
")"
] | [
839,
4
] | [
868,
9
] | python | co | ['en', 'co', 'it'] | False |
XGBDRIVRegressor.__init__ | (self, ate_alpha=0.05, control_name=0, *args, **kwargs) | Initialize a DRIV-learner with two XGBoost models. | Initialize a DRIV-learner with two XGBoost models. | def __init__(self, ate_alpha=0.05, control_name=0, *args, **kwargs):
"""Initialize a DRIV-learner with two XGBoost models."""
super().__init__(
learner=XGBRegressor(*args, **kwargs),
ate_alpha=ate_alpha,
control_name=control_name,
) | [
"def",
"__init__",
"(",
"self",
",",
"ate_alpha",
"=",
"0.05",
",",
"control_name",
"=",
"0",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"super",
"(",
")",
".",
"__init__",
"(",
"learner",
"=",
"XGBRegressor",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
",",
"ate_alpha",
"=",
"ate_alpha",
",",
"control_name",
"=",
"control_name",
",",
")"
] | [
872,
4
] | [
878,
9
] | python | en | ['en', 'en', 'en'] | True |
open_ssl_over_tcp_stream | (
host,
port,
*,
https_compatible=False,
ssl_context=None,
# No trailing comma b/c bpo-9232 (fixed in py36)
happy_eyeballs_delay=DEFAULT_DELAY,
) | Make a TLS-encrypted Connection to the given host and port over TCP.
This is a convenience wrapper that calls :func:`open_tcp_stream` and
wraps the result in an :class:`~trio.SSLStream`.
This function does not perform the TLS handshake; you can do it
manually by calling :meth:`~trio.SSLStream.do_handshake`, or else
it will be performed automatically the first time you send or receive
data.
Args:
host (bytes or str): The host to connect to. We require the server
to have a TLS certificate valid for this hostname.
port (int): The port to connect to.
https_compatible (bool): Set this to True if you're connecting to a web
server. See :class:`~trio.SSLStream` for details. Default:
False.
ssl_context (:class:`~ssl.SSLContext` or None): The SSL context to
use. If None (the default), :func:`ssl.create_default_context`
will be called to create a context.
happy_eyeballs_delay (float): See :func:`open_tcp_stream`.
Returns:
trio.SSLStream: the encrypted connection to the server.
| Make a TLS-encrypted Connection to the given host and port over TCP. | async def open_ssl_over_tcp_stream(
host,
port,
*,
https_compatible=False,
ssl_context=None,
# No trailing comma b/c bpo-9232 (fixed in py36)
happy_eyeballs_delay=DEFAULT_DELAY,
):
"""Make a TLS-encrypted Connection to the given host and port over TCP.
This is a convenience wrapper that calls :func:`open_tcp_stream` and
wraps the result in an :class:`~trio.SSLStream`.
This function does not perform the TLS handshake; you can do it
manually by calling :meth:`~trio.SSLStream.do_handshake`, or else
it will be performed automatically the first time you send or receive
data.
Args:
host (bytes or str): The host to connect to. We require the server
to have a TLS certificate valid for this hostname.
port (int): The port to connect to.
https_compatible (bool): Set this to True if you're connecting to a web
server. See :class:`~trio.SSLStream` for details. Default:
False.
ssl_context (:class:`~ssl.SSLContext` or None): The SSL context to
use. If None (the default), :func:`ssl.create_default_context`
will be called to create a context.
happy_eyeballs_delay (float): See :func:`open_tcp_stream`.
Returns:
trio.SSLStream: the encrypted connection to the server.
"""
tcp_stream = await trio.open_tcp_stream(
host, port, happy_eyeballs_delay=happy_eyeballs_delay
)
if ssl_context is None:
ssl_context = ssl.create_default_context()
return trio.SSLStream(
tcp_stream, ssl_context, server_hostname=host, https_compatible=https_compatible
) | [
"async",
"def",
"open_ssl_over_tcp_stream",
"(",
"host",
",",
"port",
",",
"*",
",",
"https_compatible",
"=",
"False",
",",
"ssl_context",
"=",
"None",
",",
"# No trailing comma b/c bpo-9232 (fixed in py36)",
"happy_eyeballs_delay",
"=",
"DEFAULT_DELAY",
",",
")",
":",
"tcp_stream",
"=",
"await",
"trio",
".",
"open_tcp_stream",
"(",
"host",
",",
"port",
",",
"happy_eyeballs_delay",
"=",
"happy_eyeballs_delay",
")",
"if",
"ssl_context",
"is",
"None",
":",
"ssl_context",
"=",
"ssl",
".",
"create_default_context",
"(",
")",
"return",
"trio",
".",
"SSLStream",
"(",
"tcp_stream",
",",
"ssl_context",
",",
"server_hostname",
"=",
"host",
",",
"https_compatible",
"=",
"https_compatible",
")"
] | [
15,
0
] | [
57,
5
] | python | en | ['en', 'en', 'en'] | True |
open_ssl_over_tcp_listeners | (
port, ssl_context, *, host=None, https_compatible=False, backlog=None
) | Start listening for SSL/TLS-encrypted TCP connections to the given port.
Args:
port (int): The port to listen on. See :func:`open_tcp_listeners`.
ssl_context (~ssl.SSLContext): The SSL context to use for all incoming
connections.
host (str, bytes, or None): The address to bind to; use ``None`` to bind
to the wildcard address. See :func:`open_tcp_listeners`.
https_compatible (bool): See :class:`~trio.SSLStream` for details.
backlog (int or None): See :func:`open_tcp_listeners` for details.
| Start listening for SSL/TLS-encrypted TCP connections to the given port. | async def open_ssl_over_tcp_listeners(
port, ssl_context, *, host=None, https_compatible=False, backlog=None
):
"""Start listening for SSL/TLS-encrypted TCP connections to the given port.
Args:
port (int): The port to listen on. See :func:`open_tcp_listeners`.
ssl_context (~ssl.SSLContext): The SSL context to use for all incoming
connections.
host (str, bytes, or None): The address to bind to; use ``None`` to bind
to the wildcard address. See :func:`open_tcp_listeners`.
https_compatible (bool): See :class:`~trio.SSLStream` for details.
backlog (int or None): See :func:`open_tcp_listeners` for details.
"""
tcp_listeners = await trio.open_tcp_listeners(port, host=host, backlog=backlog)
ssl_listeners = [
trio.SSLListener(tcp_listener, ssl_context, https_compatible=https_compatible)
for tcp_listener in tcp_listeners
]
return ssl_listeners | [
"async",
"def",
"open_ssl_over_tcp_listeners",
"(",
"port",
",",
"ssl_context",
",",
"*",
",",
"host",
"=",
"None",
",",
"https_compatible",
"=",
"False",
",",
"backlog",
"=",
"None",
")",
":",
"tcp_listeners",
"=",
"await",
"trio",
".",
"open_tcp_listeners",
"(",
"port",
",",
"host",
"=",
"host",
",",
"backlog",
"=",
"backlog",
")",
"ssl_listeners",
"=",
"[",
"trio",
".",
"SSLListener",
"(",
"tcp_listener",
",",
"ssl_context",
",",
"https_compatible",
"=",
"https_compatible",
")",
"for",
"tcp_listener",
"in",
"tcp_listeners",
"]",
"return",
"ssl_listeners"
] | [
60,
0
] | [
80,
24
] | python | en | ['en', 'en', 'en'] | True |
serve_ssl_over_tcp | (
handler,
port,
ssl_context,
*,
host=None,
https_compatible=False,
backlog=None,
handler_nursery=None,
task_status=trio.TASK_STATUS_IGNORED,
) | Listen for incoming TCP connections, and for each one start a task
running ``handler(stream)``.
This is a thin convenience wrapper around
:func:`open_ssl_over_tcp_listeners` and :func:`serve_listeners` – see them
for full details.
.. warning::
If ``handler`` raises an exception, then this function doesn't do
anything special to catch it – so by default the exception will
propagate out and crash your server. If you don't want this, then catch
exceptions inside your ``handler``, or use a ``handler_nursery`` object
that responds to exceptions in some other way.
When used with ``nursery.start`` you get back the newly opened listeners.
See the documentation for :func:`serve_tcp` for an example where this is
useful.
Args:
handler: The handler to start for each incoming connection. Passed to
:func:`serve_listeners`.
port (int): The port to listen on. Use 0 to let the kernel pick
an open port. Ultimately passed to :func:`open_tcp_listeners`.
ssl_context (~ssl.SSLContext): The SSL context to use for all incoming
connections. Passed to :func:`open_ssl_over_tcp_listeners`.
host (str, bytes, or None): The address to bind to; use ``None`` to bind
to the wildcard address. Ultimately passed to
:func:`open_tcp_listeners`.
https_compatible (bool): Set this to True if you want to use
"HTTPS-style" TLS. See :class:`~trio.SSLStream` for details.
backlog (int or None): See :class:`~trio.SSLStream` for details.
handler_nursery: The nursery to start handlers in, or None to use an
internal nursery. Passed to :func:`serve_listeners`.
task_status: This function can be used with ``nursery.start``.
Returns:
This function only returns when cancelled.
| Listen for incoming TCP connections, and for each one start a task
running ``handler(stream)``. | async def serve_ssl_over_tcp(
handler,
port,
ssl_context,
*,
host=None,
https_compatible=False,
backlog=None,
handler_nursery=None,
task_status=trio.TASK_STATUS_IGNORED,
):
"""Listen for incoming TCP connections, and for each one start a task
running ``handler(stream)``.
This is a thin convenience wrapper around
:func:`open_ssl_over_tcp_listeners` and :func:`serve_listeners` – see them
for full details.
.. warning::
If ``handler`` raises an exception, then this function doesn't do
anything special to catch it – so by default the exception will
propagate out and crash your server. If you don't want this, then catch
exceptions inside your ``handler``, or use a ``handler_nursery`` object
that responds to exceptions in some other way.
When used with ``nursery.start`` you get back the newly opened listeners.
See the documentation for :func:`serve_tcp` for an example where this is
useful.
Args:
handler: The handler to start for each incoming connection. Passed to
:func:`serve_listeners`.
port (int): The port to listen on. Use 0 to let the kernel pick
an open port. Ultimately passed to :func:`open_tcp_listeners`.
ssl_context (~ssl.SSLContext): The SSL context to use for all incoming
connections. Passed to :func:`open_ssl_over_tcp_listeners`.
host (str, bytes, or None): The address to bind to; use ``None`` to bind
to the wildcard address. Ultimately passed to
:func:`open_tcp_listeners`.
https_compatible (bool): Set this to True if you want to use
"HTTPS-style" TLS. See :class:`~trio.SSLStream` for details.
backlog (int or None): See :class:`~trio.SSLStream` for details.
handler_nursery: The nursery to start handlers in, or None to use an
internal nursery. Passed to :func:`serve_listeners`.
task_status: This function can be used with ``nursery.start``.
Returns:
This function only returns when cancelled.
"""
listeners = await trio.open_ssl_over_tcp_listeners(
port,
ssl_context,
host=host,
https_compatible=https_compatible,
backlog=backlog,
)
await trio.serve_listeners(
handler, listeners, handler_nursery=handler_nursery, task_status=task_status
) | [
"async",
"def",
"serve_ssl_over_tcp",
"(",
"handler",
",",
"port",
",",
"ssl_context",
",",
"*",
",",
"host",
"=",
"None",
",",
"https_compatible",
"=",
"False",
",",
"backlog",
"=",
"None",
",",
"handler_nursery",
"=",
"None",
",",
"task_status",
"=",
"trio",
".",
"TASK_STATUS_IGNORED",
",",
")",
":",
"listeners",
"=",
"await",
"trio",
".",
"open_ssl_over_tcp_listeners",
"(",
"port",
",",
"ssl_context",
",",
"host",
"=",
"host",
",",
"https_compatible",
"=",
"https_compatible",
",",
"backlog",
"=",
"backlog",
",",
")",
"await",
"trio",
".",
"serve_listeners",
"(",
"handler",
",",
"listeners",
",",
"handler_nursery",
"=",
"handler_nursery",
",",
"task_status",
"=",
"task_status",
")"
] | [
83,
0
] | [
150,
5
] | python | en | ['en', 'en', 'en'] | True |
JsonSchemaProfiler._create_boolean_expectation | (
self, key: str, details: dict
) | https://json-schema.org/understanding-json-schema/reference/boolean.html | https://json-schema.org/understanding-json-schema/reference/boolean.html | def _create_boolean_expectation(
self, key: str, details: dict
) -> Optional[ExpectationConfiguration]:
"""https://json-schema.org/understanding-json-schema/reference/boolean.html"""
object_types = self._get_object_types(details=details)
if JsonSchemaTypes.BOOLEAN.value not in object_types:
return None
# TODO map JSONSchema types to which type backend? Pandas? Should this value set be parameterized per back end?
kwargs = {"column": key, "value_set": [True, False]}
return ExpectationConfiguration("expect_column_values_to_be_in_set", kwargs) | [
"def",
"_create_boolean_expectation",
"(",
"self",
",",
"key",
":",
"str",
",",
"details",
":",
"dict",
")",
"->",
"Optional",
"[",
"ExpectationConfiguration",
"]",
":",
"object_types",
"=",
"self",
".",
"_get_object_types",
"(",
"details",
"=",
"details",
")",
"if",
"JsonSchemaTypes",
".",
"BOOLEAN",
".",
"value",
"not",
"in",
"object_types",
":",
"return",
"None",
"# TODO map JSONSchema types to which type backend? Pandas? Should this value set be parameterized per back end?",
"kwargs",
"=",
"{",
"\"column\"",
":",
"key",
",",
"\"value_set\"",
":",
"[",
"True",
",",
"False",
"]",
"}",
"return",
"ExpectationConfiguration",
"(",
"\"expect_column_values_to_be_in_set\"",
",",
"kwargs",
")"
] | [
196,
4
] | [
207,
84
] | python | de | ['de', 'de', 'nl'] | False |
JsonSchemaProfiler._create_range_expectation | (
self, key: str, details: dict
) | https://json-schema.org/understanding-json-schema/reference/numeric.html#range | https://json-schema.org/understanding-json-schema/reference/numeric.html#range | def _create_range_expectation(
self, key: str, details: dict
) -> Optional[ExpectationConfiguration]:
"""https://json-schema.org/understanding-json-schema/reference/numeric.html#range"""
object_types = self._get_object_types(details=details)
object_types = filter(
lambda object_type: object_type != JsonSchemaTypes.NULL.value, object_types
)
range_types = [JsonSchemaTypes.INTEGER.value, JsonSchemaTypes.NUMBER.value]
if set(object_types).issubset(set(range_types)) is False:
return None
type_ = details.get("type", None)
any_of = details.get("anyOf", None)
if not type_ and not any_of:
return None
minimum = None
maximum = None
exclusive_minimum = None
exclusive_maximum = None
if type_:
minimum = details.get("minimum", None)
maximum = details.get("maximum", None)
exclusive_minimum = details.get("exclusiveMinimum", None)
exclusive_maximum = details.get("exclusiveMaximum", None)
elif any_of:
for item in any_of:
item_type = item.get("type", None)
if item_type in range_types:
minimum = item.get("minimum", None)
maximum = item.get("maximum", None)
exclusive_minimum = item.get("exclusiveMinimum", None)
exclusive_maximum = item.get("exclusiveMaximum", None)
break
if (
minimum is None
and maximum is None
and exclusive_minimum is None
and exclusive_maximum is None
):
return None
kwargs: Dict[str, Any] = {"column": key}
if minimum is not None:
kwargs["min_value"] = minimum
if maximum is not None:
kwargs["max_value"] = maximum
if exclusive_minimum is not None:
kwargs["min_value"] = exclusive_minimum
kwargs["strict_min"] = True
if exclusive_maximum is not None:
kwargs["max_value"] = exclusive_maximum
kwargs["strict_max"] = True
return ExpectationConfiguration("expect_column_values_to_be_between", kwargs) | [
"def",
"_create_range_expectation",
"(",
"self",
",",
"key",
":",
"str",
",",
"details",
":",
"dict",
")",
"->",
"Optional",
"[",
"ExpectationConfiguration",
"]",
":",
"object_types",
"=",
"self",
".",
"_get_object_types",
"(",
"details",
"=",
"details",
")",
"object_types",
"=",
"filter",
"(",
"lambda",
"object_type",
":",
"object_type",
"!=",
"JsonSchemaTypes",
".",
"NULL",
".",
"value",
",",
"object_types",
")",
"range_types",
"=",
"[",
"JsonSchemaTypes",
".",
"INTEGER",
".",
"value",
",",
"JsonSchemaTypes",
".",
"NUMBER",
".",
"value",
"]",
"if",
"set",
"(",
"object_types",
")",
".",
"issubset",
"(",
"set",
"(",
"range_types",
")",
")",
"is",
"False",
":",
"return",
"None",
"type_",
"=",
"details",
".",
"get",
"(",
"\"type\"",
",",
"None",
")",
"any_of",
"=",
"details",
".",
"get",
"(",
"\"anyOf\"",
",",
"None",
")",
"if",
"not",
"type_",
"and",
"not",
"any_of",
":",
"return",
"None",
"minimum",
"=",
"None",
"maximum",
"=",
"None",
"exclusive_minimum",
"=",
"None",
"exclusive_maximum",
"=",
"None",
"if",
"type_",
":",
"minimum",
"=",
"details",
".",
"get",
"(",
"\"minimum\"",
",",
"None",
")",
"maximum",
"=",
"details",
".",
"get",
"(",
"\"maximum\"",
",",
"None",
")",
"exclusive_minimum",
"=",
"details",
".",
"get",
"(",
"\"exclusiveMinimum\"",
",",
"None",
")",
"exclusive_maximum",
"=",
"details",
".",
"get",
"(",
"\"exclusiveMaximum\"",
",",
"None",
")",
"elif",
"any_of",
":",
"for",
"item",
"in",
"any_of",
":",
"item_type",
"=",
"item",
".",
"get",
"(",
"\"type\"",
",",
"None",
")",
"if",
"item_type",
"in",
"range_types",
":",
"minimum",
"=",
"item",
".",
"get",
"(",
"\"minimum\"",
",",
"None",
")",
"maximum",
"=",
"item",
".",
"get",
"(",
"\"maximum\"",
",",
"None",
")",
"exclusive_minimum",
"=",
"item",
".",
"get",
"(",
"\"exclusiveMinimum\"",
",",
"None",
")",
"exclusive_maximum",
"=",
"item",
".",
"get",
"(",
"\"exclusiveMaximum\"",
",",
"None",
")",
"break",
"if",
"(",
"minimum",
"is",
"None",
"and",
"maximum",
"is",
"None",
"and",
"exclusive_minimum",
"is",
"None",
"and",
"exclusive_maximum",
"is",
"None",
")",
":",
"return",
"None",
"kwargs",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
"=",
"{",
"\"column\"",
":",
"key",
"}",
"if",
"minimum",
"is",
"not",
"None",
":",
"kwargs",
"[",
"\"min_value\"",
"]",
"=",
"minimum",
"if",
"maximum",
"is",
"not",
"None",
":",
"kwargs",
"[",
"\"max_value\"",
"]",
"=",
"maximum",
"if",
"exclusive_minimum",
"is",
"not",
"None",
":",
"kwargs",
"[",
"\"min_value\"",
"]",
"=",
"exclusive_minimum",
"kwargs",
"[",
"\"strict_min\"",
"]",
"=",
"True",
"if",
"exclusive_maximum",
"is",
"not",
"None",
":",
"kwargs",
"[",
"\"max_value\"",
"]",
"=",
"exclusive_maximum",
"kwargs",
"[",
"\"strict_max\"",
"]",
"=",
"True",
"return",
"ExpectationConfiguration",
"(",
"\"expect_column_values_to_be_between\"",
",",
"kwargs",
")"
] | [
209,
4
] | [
268,
85
] | python | de | ['de', 'de', 'sw'] | False |
JsonSchemaProfiler._create_string_length_expectation | (
self, key: str, details: dict
) | https://json-schema.org/understanding-json-schema/reference/string.html#length | https://json-schema.org/understanding-json-schema/reference/string.html#length | def _create_string_length_expectation(
self, key: str, details: dict
) -> Optional[ExpectationConfiguration]:
"""https://json-schema.org/understanding-json-schema/reference/string.html#length"""
object_types = self._get_object_types(details=details)
if JsonSchemaTypes.STRING.value not in object_types:
return None
type_ = details.get("type", None)
any_of = details.get("anyOf", None)
if not type_ and not any_of:
return None
if type_:
minimum = details.get("minLength", None)
maximum = details.get("maxLength", None)
elif any_of:
for item in any_of:
item_type = item.get("type", None)
if item_type == JsonSchemaTypes.STRING.value:
minimum = item.get("minLength", None)
maximum = item.get("maxLength", None)
break
if minimum is None and maximum is None:
return None
kwargs = {
"column": key,
}
if minimum == maximum:
kwargs["value"] = minimum
return ExpectationConfiguration(
"expect_column_value_lengths_to_equal", kwargs
)
if minimum is not None:
kwargs["min_value"] = minimum
if maximum is not None:
kwargs["max_value"] = maximum
return ExpectationConfiguration(
"expect_column_value_lengths_to_be_between", kwargs
) | [
"def",
"_create_string_length_expectation",
"(",
"self",
",",
"key",
":",
"str",
",",
"details",
":",
"dict",
")",
"->",
"Optional",
"[",
"ExpectationConfiguration",
"]",
":",
"object_types",
"=",
"self",
".",
"_get_object_types",
"(",
"details",
"=",
"details",
")",
"if",
"JsonSchemaTypes",
".",
"STRING",
".",
"value",
"not",
"in",
"object_types",
":",
"return",
"None",
"type_",
"=",
"details",
".",
"get",
"(",
"\"type\"",
",",
"None",
")",
"any_of",
"=",
"details",
".",
"get",
"(",
"\"anyOf\"",
",",
"None",
")",
"if",
"not",
"type_",
"and",
"not",
"any_of",
":",
"return",
"None",
"if",
"type_",
":",
"minimum",
"=",
"details",
".",
"get",
"(",
"\"minLength\"",
",",
"None",
")",
"maximum",
"=",
"details",
".",
"get",
"(",
"\"maxLength\"",
",",
"None",
")",
"elif",
"any_of",
":",
"for",
"item",
"in",
"any_of",
":",
"item_type",
"=",
"item",
".",
"get",
"(",
"\"type\"",
",",
"None",
")",
"if",
"item_type",
"==",
"JsonSchemaTypes",
".",
"STRING",
".",
"value",
":",
"minimum",
"=",
"item",
".",
"get",
"(",
"\"minLength\"",
",",
"None",
")",
"maximum",
"=",
"item",
".",
"get",
"(",
"\"maxLength\"",
",",
"None",
")",
"break",
"if",
"minimum",
"is",
"None",
"and",
"maximum",
"is",
"None",
":",
"return",
"None",
"kwargs",
"=",
"{",
"\"column\"",
":",
"key",
",",
"}",
"if",
"minimum",
"==",
"maximum",
":",
"kwargs",
"[",
"\"value\"",
"]",
"=",
"minimum",
"return",
"ExpectationConfiguration",
"(",
"\"expect_column_value_lengths_to_equal\"",
",",
"kwargs",
")",
"if",
"minimum",
"is",
"not",
"None",
":",
"kwargs",
"[",
"\"min_value\"",
"]",
"=",
"minimum",
"if",
"maximum",
"is",
"not",
"None",
":",
"kwargs",
"[",
"\"max_value\"",
"]",
"=",
"maximum",
"return",
"ExpectationConfiguration",
"(",
"\"expect_column_value_lengths_to_be_between\"",
",",
"kwargs",
")"
] | [
270,
4
] | [
314,
9
] | python | de | ['de', 'de', 'en'] | False |
JsonSchemaProfiler._create_set_expectation | (
self, key: str, details: dict
) | https://json-schema.org/understanding-json-schema/reference/generic.html#enumerated-values | https://json-schema.org/understanding-json-schema/reference/generic.html#enumerated-values | def _create_set_expectation(
self, key: str, details: dict
) -> Optional[ExpectationConfiguration]:
"""https://json-schema.org/understanding-json-schema/reference/generic.html#enumerated-values"""
enum_list = self._get_enum_list(details=details)
if not enum_list:
return None
enum_list = list(
filter(lambda item: item is not JsonSchemaTypes.NULL.value, enum_list)
)
kwargs = {"column": key, "value_set": enum_list}
return ExpectationConfiguration("expect_column_values_to_be_in_set", kwargs) | [
"def",
"_create_set_expectation",
"(",
"self",
",",
"key",
":",
"str",
",",
"details",
":",
"dict",
")",
"->",
"Optional",
"[",
"ExpectationConfiguration",
"]",
":",
"enum_list",
"=",
"self",
".",
"_get_enum_list",
"(",
"details",
"=",
"details",
")",
"if",
"not",
"enum_list",
":",
"return",
"None",
"enum_list",
"=",
"list",
"(",
"filter",
"(",
"lambda",
"item",
":",
"item",
"is",
"not",
"JsonSchemaTypes",
".",
"NULL",
".",
"value",
",",
"enum_list",
")",
")",
"kwargs",
"=",
"{",
"\"column\"",
":",
"key",
",",
"\"value_set\"",
":",
"enum_list",
"}",
"return",
"ExpectationConfiguration",
"(",
"\"expect_column_values_to_be_in_set\"",
",",
"kwargs",
")"
] | [
316,
4
] | [
330,
84
] | python | de | ['de', 'de', 'en'] | False |
JsonSchemaProfiler._create_null_or_not_null_column_expectation | (
self, key: str, details: dict
) | https://json-schema.org/understanding-json-schema/reference/null.html | https://json-schema.org/understanding-json-schema/reference/null.html | def _create_null_or_not_null_column_expectation(
self, key: str, details: dict
) -> Optional[ExpectationConfiguration]:
"""https://json-schema.org/understanding-json-schema/reference/null.html"""
object_types = self._get_object_types(details=details)
enum_list = self._get_enum_list(details=details)
kwargs = {"column": key}
if enum_list:
object_types = set(enum_list).union(set(object_types))
if JsonSchemaTypes.NULL.value not in object_types:
return ExpectationConfiguration(
"expect_column_values_to_not_be_null", kwargs
)
if len(object_types) == 1:
return ExpectationConfiguration("expect_column_values_to_be_null", kwargs)
return None | [
"def",
"_create_null_or_not_null_column_expectation",
"(",
"self",
",",
"key",
":",
"str",
",",
"details",
":",
"dict",
")",
"->",
"Optional",
"[",
"ExpectationConfiguration",
"]",
":",
"object_types",
"=",
"self",
".",
"_get_object_types",
"(",
"details",
"=",
"details",
")",
"enum_list",
"=",
"self",
".",
"_get_enum_list",
"(",
"details",
"=",
"details",
")",
"kwargs",
"=",
"{",
"\"column\"",
":",
"key",
"}",
"if",
"enum_list",
":",
"object_types",
"=",
"set",
"(",
"enum_list",
")",
".",
"union",
"(",
"set",
"(",
"object_types",
")",
")",
"if",
"JsonSchemaTypes",
".",
"NULL",
".",
"value",
"not",
"in",
"object_types",
":",
"return",
"ExpectationConfiguration",
"(",
"\"expect_column_values_to_not_be_null\"",
",",
"kwargs",
")",
"if",
"len",
"(",
"object_types",
")",
"==",
"1",
":",
"return",
"ExpectationConfiguration",
"(",
"\"expect_column_values_to_be_null\"",
",",
"kwargs",
")",
"return",
"None"
] | [
332,
4
] | [
351,
19
] | python | de | ['de', 'de', 'en'] | False |
deltas | (errors, epsilon, mean, std) | Compute mean and std deltas.
delta_mean = mean(errors) - mean(all errors below epsilon)
delta_std = std(errors) - std(all errors below epsilon)
Args:
errors (ndarray):
Array of errors.
epsilon (ndarray):
Threshold value.
mean (float):
Mean of errors.
std (float):
Standard deviation of errors.
Returns:
float, float:
* delta_mean.
* delta_std.
| Compute mean and std deltas. | def deltas(errors, epsilon, mean, std):
"""Compute mean and std deltas.
delta_mean = mean(errors) - mean(all errors below epsilon)
delta_std = std(errors) - std(all errors below epsilon)
Args:
errors (ndarray):
Array of errors.
epsilon (ndarray):
Threshold value.
mean (float):
Mean of errors.
std (float):
Standard deviation of errors.
Returns:
float, float:
* delta_mean.
* delta_std.
"""
below = errors[errors <= epsilon]
if not len(below):
return 0, 0
return mean - below.mean(), std - below.std() | [
"def",
"deltas",
"(",
"errors",
",",
"epsilon",
",",
"mean",
",",
"std",
")",
":",
"below",
"=",
"errors",
"[",
"errors",
"<=",
"epsilon",
"]",
"if",
"not",
"len",
"(",
"below",
")",
":",
"return",
"0",
",",
"0",
"return",
"mean",
"-",
"below",
".",
"mean",
"(",
")",
",",
"std",
"-",
"below",
".",
"std",
"(",
")"
] | [
10,
0
] | [
35,
49
] | python | en | ['en', 'es', 'en'] | True |
count_above | (errors, epsilon) | Count number of errors and continuous sequences above epsilon.
Continuous sequences are counted by shifting and counting the number
of positions where there was a change and the original value was true,
which means that a sequence started at that position.
Args:
errors (ndarray):
Array of errors.
epsilon (ndarray):
Threshold value.
Returns:
int, int:
* Number of errors above epsilon.
* Number of continuous sequences above epsilon.
| Count number of errors and continuous sequences above epsilon. | def count_above(errors, epsilon):
"""Count number of errors and continuous sequences above epsilon.
Continuous sequences are counted by shifting and counting the number
of positions where there was a change and the original value was true,
which means that a sequence started at that position.
Args:
errors (ndarray):
Array of errors.
epsilon (ndarray):
Threshold value.
Returns:
int, int:
* Number of errors above epsilon.
* Number of continuous sequences above epsilon.
"""
above = errors > epsilon
total_above = len(errors[above])
above = pd.Series(above)
shift = above.shift(1)
change = above != shift
total_consecutive = sum(above & change)
return total_above, total_consecutive | [
"def",
"count_above",
"(",
"errors",
",",
"epsilon",
")",
":",
"above",
"=",
"errors",
">",
"epsilon",
"total_above",
"=",
"len",
"(",
"errors",
"[",
"above",
"]",
")",
"above",
"=",
"pd",
".",
"Series",
"(",
"above",
")",
"shift",
"=",
"above",
".",
"shift",
"(",
"1",
")",
"change",
"=",
"above",
"!=",
"shift",
"total_consecutive",
"=",
"sum",
"(",
"above",
"&",
"change",
")",
"return",
"total_above",
",",
"total_consecutive"
] | [
38,
0
] | [
65,
41
] | python | en | ['en', 'en', 'en'] | True |
z_cost | (z, errors, mean, std) | Compute how bad a z value is.
The original formula is::
(delta_mean/mean) + (delta_std/std)
------------------------------------------------------
number of errors above + (number of sequences above)^2
which computes the "goodness" of `z`, meaning that the higher the value
the better the `z`.
In this case, we return this value inverted (we make it negative), to convert
it into a cost function, as later on we will use scipy.fmin to minimize it.
Args:
z (ndarray):
Value for which a cost score is calculated.
errors (ndarray):
Array of errors.
mean (float):
Mean of errors.
std (float):
Standard deviation of errors.
Returns:
float:
Cost of z.
| Compute how bad a z value is. | def z_cost(z, errors, mean, std):
"""Compute how bad a z value is.
The original formula is::
(delta_mean/mean) + (delta_std/std)
------------------------------------------------------
number of errors above + (number of sequences above)^2
which computes the "goodness" of `z`, meaning that the higher the value
the better the `z`.
In this case, we return this value inverted (we make it negative), to convert
it into a cost function, as later on we will use scipy.fmin to minimize it.
Args:
z (ndarray):
Value for which a cost score is calculated.
errors (ndarray):
Array of errors.
mean (float):
Mean of errors.
std (float):
Standard deviation of errors.
Returns:
float:
Cost of z.
"""
epsilon = mean + z * std
delta_mean, delta_std = deltas(errors, epsilon, mean, std)
above, consecutive = count_above(errors, epsilon)
numerator = -(delta_mean / mean + delta_std / std)
denominator = above + consecutive ** 2
if denominator == 0:
return np.inf
return numerator / denominator | [
"def",
"z_cost",
"(",
"z",
",",
"errors",
",",
"mean",
",",
"std",
")",
":",
"epsilon",
"=",
"mean",
"+",
"z",
"*",
"std",
"delta_mean",
",",
"delta_std",
"=",
"deltas",
"(",
"errors",
",",
"epsilon",
",",
"mean",
",",
"std",
")",
"above",
",",
"consecutive",
"=",
"count_above",
"(",
"errors",
",",
"epsilon",
")",
"numerator",
"=",
"-",
"(",
"delta_mean",
"/",
"mean",
"+",
"delta_std",
"/",
"std",
")",
"denominator",
"=",
"above",
"+",
"consecutive",
"**",
"2",
"if",
"denominator",
"==",
"0",
":",
"return",
"np",
".",
"inf",
"return",
"numerator",
"/",
"denominator"
] | [
68,
0
] | [
108,
34
] | python | en | ['en', 'en', 'en'] | True |
_find_threshold | (errors, z_range) | Find the ideal threshold.
The ideal threshold is the one that minimizes the z_cost function. Scipy.fmin is used
to find the minimum, using the values from z_range as starting points.
Args:
errors (ndarray):
Array of errors.
z_range (list):
List of two values denoting the range out of which the start points for the
scipy.fmin function are chosen.
Returns:
float:
Calculated threshold value.
| Find the ideal threshold. | def _find_threshold(errors, z_range):
"""Find the ideal threshold.
The ideal threshold is the one that minimizes the z_cost function. Scipy.fmin is used
to find the minimum, using the values from z_range as starting points.
Args:
errors (ndarray):
Array of errors.
z_range (list):
List of two values denoting the range out of which the start points for the
scipy.fmin function are chosen.
Returns:
float:
Calculated threshold value.
"""
mean = errors.mean()
std = errors.std()
min_z, max_z = z_range
best_z = min_z
best_cost = np.inf
for z in range(min_z, max_z):
best = fmin(z_cost, z, args=(errors, mean, std), full_output=True, disp=False)
z, cost = best[0:2]
if cost < best_cost:
best_z = z[0]
return mean + best_z * std | [
"def",
"_find_threshold",
"(",
"errors",
",",
"z_range",
")",
":",
"mean",
"=",
"errors",
".",
"mean",
"(",
")",
"std",
"=",
"errors",
".",
"std",
"(",
")",
"min_z",
",",
"max_z",
"=",
"z_range",
"best_z",
"=",
"min_z",
"best_cost",
"=",
"np",
".",
"inf",
"for",
"z",
"in",
"range",
"(",
"min_z",
",",
"max_z",
")",
":",
"best",
"=",
"fmin",
"(",
"z_cost",
",",
"z",
",",
"args",
"=",
"(",
"errors",
",",
"mean",
",",
"std",
")",
",",
"full_output",
"=",
"True",
",",
"disp",
"=",
"False",
")",
"z",
",",
"cost",
"=",
"best",
"[",
"0",
":",
"2",
"]",
"if",
"cost",
"<",
"best_cost",
":",
"best_z",
"=",
"z",
"[",
"0",
"]",
"return",
"mean",
"+",
"best_z",
"*",
"std"
] | [
111,
0
] | [
140,
30
] | python | en | ['en', 'en', 'en'] | True |
_fixed_threshold | (errors, k=4) | Calculate the threshold.
The fixed threshold is defined as k standard deviations away from the mean.
Args:
errors (ndarray):
Array of errors.
Returns:
float:
Calculated threshold value.
| Calculate the threshold. | def _fixed_threshold(errors, k=4):
"""Calculate the threshold.
The fixed threshold is defined as k standard deviations away from the mean.
Args:
errors (ndarray):
Array of errors.
Returns:
float:
Calculated threshold value.
"""
mean = errors.mean()
std = errors.std()
return mean + k * std | [
"def",
"_fixed_threshold",
"(",
"errors",
",",
"k",
"=",
"4",
")",
":",
"mean",
"=",
"errors",
".",
"mean",
"(",
")",
"std",
"=",
"errors",
".",
"std",
"(",
")",
"return",
"mean",
"+",
"k",
"*",
"std"
] | [
143,
0
] | [
159,
25
] | python | en | ['en', 'en', 'en'] | True |
_find_sequences | (errors, epsilon, anomaly_padding) | Find sequences of values that are above epsilon.
This is done following this steps:
* create a boolean mask that indicates which values are above epsilon.
* mark certain range of errors around True values with a True as well.
* shift this mask by one place, filing the empty gap with a False.
* compare the shifted mask with the original one to see if there are changes.
* Consider a sequence start any point which was true and has changed.
* Consider a sequence end any point which was false and has changed.
Args:
errors (ndarray):
Array of errors.
epsilon (float):
Threshold value. All errors above epsilon are considered an anomaly.
anomaly_padding (int):
Number of errors before and after a found anomaly that are added to the
anomalous sequence.
Returns:
ndarray, float:
* Array containing start, end of each found anomalous sequence.
* Maximum error value that was not considered an anomaly.
| Find sequences of values that are above epsilon. | def _find_sequences(errors, epsilon, anomaly_padding):
"""Find sequences of values that are above epsilon.
This is done following this steps:
* create a boolean mask that indicates which values are above epsilon.
* mark certain range of errors around True values with a True as well.
* shift this mask by one place, filing the empty gap with a False.
* compare the shifted mask with the original one to see if there are changes.
* Consider a sequence start any point which was true and has changed.
* Consider a sequence end any point which was false and has changed.
Args:
errors (ndarray):
Array of errors.
epsilon (float):
Threshold value. All errors above epsilon are considered an anomaly.
anomaly_padding (int):
Number of errors before and after a found anomaly that are added to the
anomalous sequence.
Returns:
ndarray, float:
* Array containing start, end of each found anomalous sequence.
* Maximum error value that was not considered an anomaly.
"""
above = pd.Series(errors > epsilon)
index_above = np.argwhere(above.values)
for idx in index_above.flatten():
above[max(0, idx - anomaly_padding):min(idx + anomaly_padding + 1, len(above))] = True
shift = above.shift(1).fillna(False)
change = above != shift
if above.all():
max_below = 0
else:
max_below = max(errors[~above])
index = above.index
starts = index[above & change].tolist()
ends = (index[~above & change] - 1).tolist()
if len(ends) == len(starts) - 1:
ends.append(len(above) - 1)
return np.array([starts, ends]).T, max_below | [
"def",
"_find_sequences",
"(",
"errors",
",",
"epsilon",
",",
"anomaly_padding",
")",
":",
"above",
"=",
"pd",
".",
"Series",
"(",
"errors",
">",
"epsilon",
")",
"index_above",
"=",
"np",
".",
"argwhere",
"(",
"above",
".",
"values",
")",
"for",
"idx",
"in",
"index_above",
".",
"flatten",
"(",
")",
":",
"above",
"[",
"max",
"(",
"0",
",",
"idx",
"-",
"anomaly_padding",
")",
":",
"min",
"(",
"idx",
"+",
"anomaly_padding",
"+",
"1",
",",
"len",
"(",
"above",
")",
")",
"]",
"=",
"True",
"shift",
"=",
"above",
".",
"shift",
"(",
"1",
")",
".",
"fillna",
"(",
"False",
")",
"change",
"=",
"above",
"!=",
"shift",
"if",
"above",
".",
"all",
"(",
")",
":",
"max_below",
"=",
"0",
"else",
":",
"max_below",
"=",
"max",
"(",
"errors",
"[",
"~",
"above",
"]",
")",
"index",
"=",
"above",
".",
"index",
"starts",
"=",
"index",
"[",
"above",
"&",
"change",
"]",
".",
"tolist",
"(",
")",
"ends",
"=",
"(",
"index",
"[",
"~",
"above",
"&",
"change",
"]",
"-",
"1",
")",
".",
"tolist",
"(",
")",
"if",
"len",
"(",
"ends",
")",
"==",
"len",
"(",
"starts",
")",
"-",
"1",
":",
"ends",
".",
"append",
"(",
"len",
"(",
"above",
")",
"-",
"1",
")",
"return",
"np",
".",
"array",
"(",
"[",
"starts",
",",
"ends",
"]",
")",
".",
"T",
",",
"max_below"
] | [
162,
0
] | [
209,
48
] | python | en | ['en', 'en', 'en'] | True |
_get_max_errors | (errors, sequences, max_below) | Get the maximum error for each anomalous sequence.
Also add a row with the max error which was not considered anomalous.
Table containing a ``max_error`` column with the maximum error of each
sequence and the columns ``start`` and ``stop`` with the corresponding start and stop
indexes, sorted descendingly by the maximum error.
Args:
errors (ndarray):
Array of errors.
sequences (ndarray):
Array containing start, end of anomalous sequences
max_below (float):
Maximum error value that was not considered an anomaly.
Returns:
pandas.DataFrame:
DataFrame object containing columns ``start``, ``stop`` and ``max_error``.
| Get the maximum error for each anomalous sequence. | def _get_max_errors(errors, sequences, max_below):
"""Get the maximum error for each anomalous sequence.
Also add a row with the max error which was not considered anomalous.
Table containing a ``max_error`` column with the maximum error of each
sequence and the columns ``start`` and ``stop`` with the corresponding start and stop
indexes, sorted descendingly by the maximum error.
Args:
errors (ndarray):
Array of errors.
sequences (ndarray):
Array containing start, end of anomalous sequences
max_below (float):
Maximum error value that was not considered an anomaly.
Returns:
pandas.DataFrame:
DataFrame object containing columns ``start``, ``stop`` and ``max_error``.
"""
max_errors = [{
'max_error': max_below,
'start': -1,
'stop': -1
}]
for sequence in sequences:
start, stop = sequence
sequence_errors = errors[start: stop + 1]
max_errors.append({
'start': start,
'stop': stop,
'max_error': max(sequence_errors)
})
max_errors = pd.DataFrame(max_errors).sort_values('max_error', ascending=False)
return max_errors.reset_index(drop=True) | [
"def",
"_get_max_errors",
"(",
"errors",
",",
"sequences",
",",
"max_below",
")",
":",
"max_errors",
"=",
"[",
"{",
"'max_error'",
":",
"max_below",
",",
"'start'",
":",
"-",
"1",
",",
"'stop'",
":",
"-",
"1",
"}",
"]",
"for",
"sequence",
"in",
"sequences",
":",
"start",
",",
"stop",
"=",
"sequence",
"sequence_errors",
"=",
"errors",
"[",
"start",
":",
"stop",
"+",
"1",
"]",
"max_errors",
".",
"append",
"(",
"{",
"'start'",
":",
"start",
",",
"'stop'",
":",
"stop",
",",
"'max_error'",
":",
"max",
"(",
"sequence_errors",
")",
"}",
")",
"max_errors",
"=",
"pd",
".",
"DataFrame",
"(",
"max_errors",
")",
".",
"sort_values",
"(",
"'max_error'",
",",
"ascending",
"=",
"False",
")",
"return",
"max_errors",
".",
"reset_index",
"(",
"drop",
"=",
"True",
")"
] | [
212,
0
] | [
249,
44
] | python | en | ['en', 'en', 'en'] | True |
_prune_anomalies | (max_errors, min_percent) | Prune anomalies to mitigate false positives.
This is done by following these steps:
* Shift the errors 1 negative step to compare each value with the next one.
* Drop the last row, which we do not want to compare.
* Calculate the percentage increase for each row.
* Find rows which are below ``min_percent``.
* Find the index of the latest of such rows.
* Get the values of all the sequences above that index.
Args:
max_errors (pandas.DataFrame):
DataFrame object containing columns ``start``, ``stop`` and ``max_error``.
min_percent (float):
Percentage of separation the anomalies need to meet between themselves and the
highest non-anomalous error in the window sequence.
Returns:
ndarray:
Array containing start, end, max_error of the pruned anomalies.
| Prune anomalies to mitigate false positives. | def _prune_anomalies(max_errors, min_percent):
"""Prune anomalies to mitigate false positives.
This is done by following these steps:
* Shift the errors 1 negative step to compare each value with the next one.
* Drop the last row, which we do not want to compare.
* Calculate the percentage increase for each row.
* Find rows which are below ``min_percent``.
* Find the index of the latest of such rows.
* Get the values of all the sequences above that index.
Args:
max_errors (pandas.DataFrame):
DataFrame object containing columns ``start``, ``stop`` and ``max_error``.
min_percent (float):
Percentage of separation the anomalies need to meet between themselves and the
highest non-anomalous error in the window sequence.
Returns:
ndarray:
Array containing start, end, max_error of the pruned anomalies.
"""
next_error = max_errors['max_error'].shift(-1).iloc[:-1]
max_error = max_errors['max_error'].iloc[:-1]
increase = (max_error - next_error) / max_error
too_small = increase < min_percent
if too_small.all():
last_index = -1
else:
last_index = max_error[~too_small].index[-1]
return max_errors[['start', 'stop', 'max_error']].iloc[0: last_index + 1].values | [
"def",
"_prune_anomalies",
"(",
"max_errors",
",",
"min_percent",
")",
":",
"next_error",
"=",
"max_errors",
"[",
"'max_error'",
"]",
".",
"shift",
"(",
"-",
"1",
")",
".",
"iloc",
"[",
":",
"-",
"1",
"]",
"max_error",
"=",
"max_errors",
"[",
"'max_error'",
"]",
".",
"iloc",
"[",
":",
"-",
"1",
"]",
"increase",
"=",
"(",
"max_error",
"-",
"next_error",
")",
"/",
"max_error",
"too_small",
"=",
"increase",
"<",
"min_percent",
"if",
"too_small",
".",
"all",
"(",
")",
":",
"last_index",
"=",
"-",
"1",
"else",
":",
"last_index",
"=",
"max_error",
"[",
"~",
"too_small",
"]",
".",
"index",
"[",
"-",
"1",
"]",
"return",
"max_errors",
"[",
"[",
"'start'",
",",
"'stop'",
",",
"'max_error'",
"]",
"]",
".",
"iloc",
"[",
"0",
":",
"last_index",
"+",
"1",
"]",
".",
"values"
] | [
252,
0
] | [
286,
84
] | python | en | ['en', 'en', 'de'] | True |
_compute_scores | (pruned_anomalies, errors, threshold, window_start) | Compute the score of the anomalies.
Calculate the score of the anomalies proportional to the maximum error in the sequence
and add window_start timestamp to make the index absolute.
Args:
pruned_anomalies (ndarray):
Array of anomalies containing the start, end and max_error for all anomalies in
the window.
errors (ndarray):
Array of errors.
threshold (float):
Threshold value.
window_start (int):
Index of the first error value in the window.
Returns:
list:
List of anomalies containing start-index, end-index, score for each anomaly.
| Compute the score of the anomalies. | def _compute_scores(pruned_anomalies, errors, threshold, window_start):
"""Compute the score of the anomalies.
Calculate the score of the anomalies proportional to the maximum error in the sequence
and add window_start timestamp to make the index absolute.
Args:
pruned_anomalies (ndarray):
Array of anomalies containing the start, end and max_error for all anomalies in
the window.
errors (ndarray):
Array of errors.
threshold (float):
Threshold value.
window_start (int):
Index of the first error value in the window.
Returns:
list:
List of anomalies containing start-index, end-index, score for each anomaly.
"""
anomalies = list()
denominator = errors.mean() + errors.std()
for row in pruned_anomalies:
max_error = row[2]
score = (max_error - threshold) / denominator
anomalies.append([row[0] + window_start, row[1] + window_start, score])
return anomalies | [
"def",
"_compute_scores",
"(",
"pruned_anomalies",
",",
"errors",
",",
"threshold",
",",
"window_start",
")",
":",
"anomalies",
"=",
"list",
"(",
")",
"denominator",
"=",
"errors",
".",
"mean",
"(",
")",
"+",
"errors",
".",
"std",
"(",
")",
"for",
"row",
"in",
"pruned_anomalies",
":",
"max_error",
"=",
"row",
"[",
"2",
"]",
"score",
"=",
"(",
"max_error",
"-",
"threshold",
")",
"/",
"denominator",
"anomalies",
".",
"append",
"(",
"[",
"row",
"[",
"0",
"]",
"+",
"window_start",
",",
"row",
"[",
"1",
"]",
"+",
"window_start",
",",
"score",
"]",
")",
"return",
"anomalies"
] | [
289,
0
] | [
318,
20
] | python | en | ['en', 'en', 'en'] | True |
_merge_sequences | (sequences) | Merge consecutive and overlapping sequences.
We iterate over a list of start, end, score triples and merge together
overlapping or consecutive sequences.
The score of a merged sequence is the average of the single scores,
weighted by the length of the corresponding sequences.
Args:
sequences (list):
List of anomalies, containing start-index, end-index, score for each anomaly.
Returns:
ndarray:
Array containing start-index, end-index, score for each anomaly after merging.
| Merge consecutive and overlapping sequences. | def _merge_sequences(sequences):
"""Merge consecutive and overlapping sequences.
We iterate over a list of start, end, score triples and merge together
overlapping or consecutive sequences.
The score of a merged sequence is the average of the single scores,
weighted by the length of the corresponding sequences.
Args:
sequences (list):
List of anomalies, containing start-index, end-index, score for each anomaly.
Returns:
ndarray:
Array containing start-index, end-index, score for each anomaly after merging.
"""
if len(sequences) == 0:
return np.array([])
sorted_sequences = sorted(sequences, key=lambda entry: entry[0])
new_sequences = [sorted_sequences[0]]
score = [sorted_sequences[0][2]]
weights = [sorted_sequences[0][1] - sorted_sequences[0][0]]
for sequence in sorted_sequences[1:]:
prev_sequence = new_sequences[-1]
if sequence[0] <= prev_sequence[1] + 1:
score.append(sequence[2])
weights.append(sequence[1] - sequence[0])
weighted_average = np.average(score, weights=weights)
new_sequences[-1] = (prev_sequence[0], max(prev_sequence[1], sequence[1]),
weighted_average)
else:
score = [sequence[2]]
weights = [sequence[1] - sequence[0]]
new_sequences.append(sequence)
return np.array(new_sequences) | [
"def",
"_merge_sequences",
"(",
"sequences",
")",
":",
"if",
"len",
"(",
"sequences",
")",
"==",
"0",
":",
"return",
"np",
".",
"array",
"(",
"[",
"]",
")",
"sorted_sequences",
"=",
"sorted",
"(",
"sequences",
",",
"key",
"=",
"lambda",
"entry",
":",
"entry",
"[",
"0",
"]",
")",
"new_sequences",
"=",
"[",
"sorted_sequences",
"[",
"0",
"]",
"]",
"score",
"=",
"[",
"sorted_sequences",
"[",
"0",
"]",
"[",
"2",
"]",
"]",
"weights",
"=",
"[",
"sorted_sequences",
"[",
"0",
"]",
"[",
"1",
"]",
"-",
"sorted_sequences",
"[",
"0",
"]",
"[",
"0",
"]",
"]",
"for",
"sequence",
"in",
"sorted_sequences",
"[",
"1",
":",
"]",
":",
"prev_sequence",
"=",
"new_sequences",
"[",
"-",
"1",
"]",
"if",
"sequence",
"[",
"0",
"]",
"<=",
"prev_sequence",
"[",
"1",
"]",
"+",
"1",
":",
"score",
".",
"append",
"(",
"sequence",
"[",
"2",
"]",
")",
"weights",
".",
"append",
"(",
"sequence",
"[",
"1",
"]",
"-",
"sequence",
"[",
"0",
"]",
")",
"weighted_average",
"=",
"np",
".",
"average",
"(",
"score",
",",
"weights",
"=",
"weights",
")",
"new_sequences",
"[",
"-",
"1",
"]",
"=",
"(",
"prev_sequence",
"[",
"0",
"]",
",",
"max",
"(",
"prev_sequence",
"[",
"1",
"]",
",",
"sequence",
"[",
"1",
"]",
")",
",",
"weighted_average",
")",
"else",
":",
"score",
"=",
"[",
"sequence",
"[",
"2",
"]",
"]",
"weights",
"=",
"[",
"sequence",
"[",
"1",
"]",
"-",
"sequence",
"[",
"0",
"]",
"]",
"new_sequences",
".",
"append",
"(",
"sequence",
")",
"return",
"np",
".",
"array",
"(",
"new_sequences",
")"
] | [
321,
0
] | [
359,
34
] | python | en | ['en', 'en', 'en'] | True |
_find_window_sequences | (window, z_range, anomaly_padding, min_percent, window_start,
fixed_threshold) | Find sequences of values that are anomalous.
We first find the threshold for the window, then find all sequences above that threshold.
After that, we get the max errors of the sequences and prune the anomalies. Lastly, the
score of the anomalies is computed.
Args:
window (ndarray):
Array of errors in the window that is analyzed.
z_range (list):
List of two values denoting the range out of which the start points for the
dynamic find_threshold function are chosen.
anomaly_padding (int):
Number of errors before and after a found anomaly that are added to the anomalous
sequence.
min_percent (float):
Percentage of separation the anomalies need to meet between themselves and the
highest non-anomalous error in the window sequence.
window_start (int):
Index of the first error value in the window.
fixed_threshold (bool):
Indicates whether to use fixed threshold or dynamic threshold.
Returns:
ndarray:
Array containing the start-index, end-index, score for each anomalous sequence
that was found in the window.
| Find sequences of values that are anomalous. | def _find_window_sequences(window, z_range, anomaly_padding, min_percent, window_start,
fixed_threshold):
"""Find sequences of values that are anomalous.
We first find the threshold for the window, then find all sequences above that threshold.
After that, we get the max errors of the sequences and prune the anomalies. Lastly, the
score of the anomalies is computed.
Args:
window (ndarray):
Array of errors in the window that is analyzed.
z_range (list):
List of two values denoting the range out of which the start points for the
dynamic find_threshold function are chosen.
anomaly_padding (int):
Number of errors before and after a found anomaly that are added to the anomalous
sequence.
min_percent (float):
Percentage of separation the anomalies need to meet between themselves and the
highest non-anomalous error in the window sequence.
window_start (int):
Index of the first error value in the window.
fixed_threshold (bool):
Indicates whether to use fixed threshold or dynamic threshold.
Returns:
ndarray:
Array containing the start-index, end-index, score for each anomalous sequence
that was found in the window.
"""
if fixed_threshold:
threshold = _fixed_threshold(window)
else:
threshold = _find_threshold(window, z_range)
window_sequences, max_below = _find_sequences(window, threshold, anomaly_padding)
max_errors = _get_max_errors(window, window_sequences, max_below)
pruned_anomalies = _prune_anomalies(max_errors, min_percent)
window_sequences = _compute_scores(pruned_anomalies, window, threshold, window_start)
return window_sequences | [
"def",
"_find_window_sequences",
"(",
"window",
",",
"z_range",
",",
"anomaly_padding",
",",
"min_percent",
",",
"window_start",
",",
"fixed_threshold",
")",
":",
"if",
"fixed_threshold",
":",
"threshold",
"=",
"_fixed_threshold",
"(",
"window",
")",
"else",
":",
"threshold",
"=",
"_find_threshold",
"(",
"window",
",",
"z_range",
")",
"window_sequences",
",",
"max_below",
"=",
"_find_sequences",
"(",
"window",
",",
"threshold",
",",
"anomaly_padding",
")",
"max_errors",
"=",
"_get_max_errors",
"(",
"window",
",",
"window_sequences",
",",
"max_below",
")",
"pruned_anomalies",
"=",
"_prune_anomalies",
"(",
"max_errors",
",",
"min_percent",
")",
"window_sequences",
"=",
"_compute_scores",
"(",
"pruned_anomalies",
",",
"window",
",",
"threshold",
",",
"window_start",
")",
"return",
"window_sequences"
] | [
362,
0
] | [
403,
27
] | python | en | ['en', 'en', 'en'] | True |
find_anomalies | (errors, index, z_range=(0, 10), window_size=None, window_size_portion=None,
window_step_size=None, window_step_size_portion=None, min_percent=0.1,
anomaly_padding=50, lower_threshold=False, fixed_threshold=None) | Find sequences of error values that are anomalous.
We first define the window of errors, that we want to analyze. We then find the anomalous
sequences in that window and store the start/stop index pairs that correspond to each
sequence, along with its score. Optionally, we can flip the error sequence around the mean
and apply the same procedure, allowing us to find unusually low error sequences.
We then move the window and repeat the procedure.
Lastly, we combine overlapping or consecutive sequences.
Args:
errors (ndarray):
Array of errors.
index (ndarray):
Array of indices of the errors.
z_range (list):
Optional. List of two values denoting the range out of which the start points for
the scipy.fmin function are chosen. If not given, (0, 10) is used.
window_size (int):
Optional. Size of the window for which a threshold is calculated. If not given,
`None` is used, which finds one threshold for the entire sequence of errors.
window_size_portion (float):
Optional. Specify the size of the window to be a portion of the sequence of errors.
If not given, `None` is used, and window size is used as is.
window_step_size (int):
Optional. Number of steps the window is moved before another threshold is
calculated for the new window.
window_step_size_portion (float):
Optional. Specify the number of steps to be a portion of the window size. If not given,
`None` is used, and window step size is used as is.
min_percent (float):
Optional. Percentage of separation the anomalies need to meet between themselves and
the highest non-anomalous error in the window sequence. It nof given, 0.1 is used.
anomaly_padding (int):
Optional. Number of errors before and after a found anomaly that are added to the
anomalous sequence. If not given, 50 is used.
lower_threshold (bool):
Optional. Indicates whether to apply a lower threshold to find unusually low errors.
If not given, `False` is used.
fixed_threshold (bool):
Optional. Indicates whether to use fixed threshold or dynamic threshold. If not
given, `False` is used.
Returns:
ndarray:
Array containing start-index, end-index, score for each anomalous sequence that
was found.
| Find sequences of error values that are anomalous. | def find_anomalies(errors, index, z_range=(0, 10), window_size=None, window_size_portion=None,
window_step_size=None, window_step_size_portion=None, min_percent=0.1,
anomaly_padding=50, lower_threshold=False, fixed_threshold=None):
"""Find sequences of error values that are anomalous.
We first define the window of errors, that we want to analyze. We then find the anomalous
sequences in that window and store the start/stop index pairs that correspond to each
sequence, along with its score. Optionally, we can flip the error sequence around the mean
and apply the same procedure, allowing us to find unusually low error sequences.
We then move the window and repeat the procedure.
Lastly, we combine overlapping or consecutive sequences.
Args:
errors (ndarray):
Array of errors.
index (ndarray):
Array of indices of the errors.
z_range (list):
Optional. List of two values denoting the range out of which the start points for
the scipy.fmin function are chosen. If not given, (0, 10) is used.
window_size (int):
Optional. Size of the window for which a threshold is calculated. If not given,
`None` is used, which finds one threshold for the entire sequence of errors.
window_size_portion (float):
Optional. Specify the size of the window to be a portion of the sequence of errors.
If not given, `None` is used, and window size is used as is.
window_step_size (int):
Optional. Number of steps the window is moved before another threshold is
calculated for the new window.
window_step_size_portion (float):
Optional. Specify the number of steps to be a portion of the window size. If not given,
`None` is used, and window step size is used as is.
min_percent (float):
Optional. Percentage of separation the anomalies need to meet between themselves and
the highest non-anomalous error in the window sequence. It nof given, 0.1 is used.
anomaly_padding (int):
Optional. Number of errors before and after a found anomaly that are added to the
anomalous sequence. If not given, 50 is used.
lower_threshold (bool):
Optional. Indicates whether to apply a lower threshold to find unusually low errors.
If not given, `False` is used.
fixed_threshold (bool):
Optional. Indicates whether to use fixed threshold or dynamic threshold. If not
given, `False` is used.
Returns:
ndarray:
Array containing start-index, end-index, score for each anomalous sequence that
was found.
"""
window_size = window_size or len(errors)
if window_size_portion:
window_size = np.ceil(len(errors) * window_size_portion).astype('int')
window_step_size = window_step_size or window_size
if window_step_size_portion:
window_step_size = np.ceil(window_size * window_step_size_portion).astype('int')
window_start = 0
window_end = 0
sequences = list()
while window_end < len(errors):
window_end = window_start + window_size
window = errors[window_start:window_end]
window_sequences = _find_window_sequences(window, z_range, anomaly_padding, min_percent,
window_start, fixed_threshold)
sequences.extend(window_sequences)
if lower_threshold:
# Flip errors sequence around mean
mean = window.mean()
inverted_window = mean - (window - mean)
inverted_window_sequences = _find_window_sequences(inverted_window, z_range,
anomaly_padding, min_percent,
window_start, fixed_threshold)
sequences.extend(inverted_window_sequences)
window_start = window_start + window_step_size
sequences = _merge_sequences(sequences)
anomalies = list()
for start, stop, score in sequences:
anomalies.append([index[int(start)], index[int(stop)], score])
return np.asarray(anomalies) | [
"def",
"find_anomalies",
"(",
"errors",
",",
"index",
",",
"z_range",
"=",
"(",
"0",
",",
"10",
")",
",",
"window_size",
"=",
"None",
",",
"window_size_portion",
"=",
"None",
",",
"window_step_size",
"=",
"None",
",",
"window_step_size_portion",
"=",
"None",
",",
"min_percent",
"=",
"0.1",
",",
"anomaly_padding",
"=",
"50",
",",
"lower_threshold",
"=",
"False",
",",
"fixed_threshold",
"=",
"None",
")",
":",
"window_size",
"=",
"window_size",
"or",
"len",
"(",
"errors",
")",
"if",
"window_size_portion",
":",
"window_size",
"=",
"np",
".",
"ceil",
"(",
"len",
"(",
"errors",
")",
"*",
"window_size_portion",
")",
".",
"astype",
"(",
"'int'",
")",
"window_step_size",
"=",
"window_step_size",
"or",
"window_size",
"if",
"window_step_size_portion",
":",
"window_step_size",
"=",
"np",
".",
"ceil",
"(",
"window_size",
"*",
"window_step_size_portion",
")",
".",
"astype",
"(",
"'int'",
")",
"window_start",
"=",
"0",
"window_end",
"=",
"0",
"sequences",
"=",
"list",
"(",
")",
"while",
"window_end",
"<",
"len",
"(",
"errors",
")",
":",
"window_end",
"=",
"window_start",
"+",
"window_size",
"window",
"=",
"errors",
"[",
"window_start",
":",
"window_end",
"]",
"window_sequences",
"=",
"_find_window_sequences",
"(",
"window",
",",
"z_range",
",",
"anomaly_padding",
",",
"min_percent",
",",
"window_start",
",",
"fixed_threshold",
")",
"sequences",
".",
"extend",
"(",
"window_sequences",
")",
"if",
"lower_threshold",
":",
"# Flip errors sequence around mean",
"mean",
"=",
"window",
".",
"mean",
"(",
")",
"inverted_window",
"=",
"mean",
"-",
"(",
"window",
"-",
"mean",
")",
"inverted_window_sequences",
"=",
"_find_window_sequences",
"(",
"inverted_window",
",",
"z_range",
",",
"anomaly_padding",
",",
"min_percent",
",",
"window_start",
",",
"fixed_threshold",
")",
"sequences",
".",
"extend",
"(",
"inverted_window_sequences",
")",
"window_start",
"=",
"window_start",
"+",
"window_step_size",
"sequences",
"=",
"_merge_sequences",
"(",
"sequences",
")",
"anomalies",
"=",
"list",
"(",
")",
"for",
"start",
",",
"stop",
",",
"score",
"in",
"sequences",
":",
"anomalies",
".",
"append",
"(",
"[",
"index",
"[",
"int",
"(",
"start",
")",
"]",
",",
"index",
"[",
"int",
"(",
"stop",
")",
"]",
",",
"score",
"]",
")",
"return",
"np",
".",
"asarray",
"(",
"anomalies",
")"
] | [
406,
0
] | [
493,
32
] | python | en | ['en', 'en', 'en'] | True |
multibatch_generic_csv_generator | () |
Construct a series of csv files with many data types for use in multibatch testing
|
Construct a series of csv files with many data types for use in multibatch testing
| def multibatch_generic_csv_generator():
"""
Construct a series of csv files with many data types for use in multibatch testing
"""
def _multibatch_generic_csv_generator(
data_path: str,
start_date: Optional[datetime.datetime] = None,
num_event_batches: Optional[int] = 20,
num_events_per_batch: Optional[int] = 5,
) -> List[str]:
if start_date is None:
start_date = datetime.datetime(2000, 1, 1)
file_list = []
category_strings = {
0: "category0",
1: "category1",
2: "category2",
3: "category3",
4: "category4",
5: "category5",
6: "category6",
}
for batch_num in range(num_event_batches):
# generate a dataframe with multiple column types
batch_start_date = start_date + datetime.timedelta(
days=(batch_num * num_events_per_batch)
)
# TODO: AJB 20210416 Add more column types
df = pd.DataFrame(
{
"event_date": [
(batch_start_date + datetime.timedelta(days=i)).strftime(
"%Y-%m-%d"
)
for i in range(num_events_per_batch)
],
"batch_num": [batch_num + 1 for _ in range(num_events_per_batch)],
"string_cardinality_3": [
category_strings[i % 3] for i in range(num_events_per_batch)
],
}
)
filename = f"csv_batch_{batch_num + 1:03}_of_{num_event_batches:03}.csv"
file_list.append(filename)
df.to_csv(
os.path.join(data_path, filename),
index_label="intra_batch_index",
)
return file_list
return _multibatch_generic_csv_generator | [
"def",
"multibatch_generic_csv_generator",
"(",
")",
":",
"def",
"_multibatch_generic_csv_generator",
"(",
"data_path",
":",
"str",
",",
"start_date",
":",
"Optional",
"[",
"datetime",
".",
"datetime",
"]",
"=",
"None",
",",
"num_event_batches",
":",
"Optional",
"[",
"int",
"]",
"=",
"20",
",",
"num_events_per_batch",
":",
"Optional",
"[",
"int",
"]",
"=",
"5",
",",
")",
"->",
"List",
"[",
"str",
"]",
":",
"if",
"start_date",
"is",
"None",
":",
"start_date",
"=",
"datetime",
".",
"datetime",
"(",
"2000",
",",
"1",
",",
"1",
")",
"file_list",
"=",
"[",
"]",
"category_strings",
"=",
"{",
"0",
":",
"\"category0\"",
",",
"1",
":",
"\"category1\"",
",",
"2",
":",
"\"category2\"",
",",
"3",
":",
"\"category3\"",
",",
"4",
":",
"\"category4\"",
",",
"5",
":",
"\"category5\"",
",",
"6",
":",
"\"category6\"",
",",
"}",
"for",
"batch_num",
"in",
"range",
"(",
"num_event_batches",
")",
":",
"# generate a dataframe with multiple column types",
"batch_start_date",
"=",
"start_date",
"+",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"(",
"batch_num",
"*",
"num_events_per_batch",
")",
")",
"# TODO: AJB 20210416 Add more column types",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"{",
"\"event_date\"",
":",
"[",
"(",
"batch_start_date",
"+",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"i",
")",
")",
".",
"strftime",
"(",
"\"%Y-%m-%d\"",
")",
"for",
"i",
"in",
"range",
"(",
"num_events_per_batch",
")",
"]",
",",
"\"batch_num\"",
":",
"[",
"batch_num",
"+",
"1",
"for",
"_",
"in",
"range",
"(",
"num_events_per_batch",
")",
"]",
",",
"\"string_cardinality_3\"",
":",
"[",
"category_strings",
"[",
"i",
"%",
"3",
"]",
"for",
"i",
"in",
"range",
"(",
"num_events_per_batch",
")",
"]",
",",
"}",
")",
"filename",
"=",
"f\"csv_batch_{batch_num + 1:03}_of_{num_event_batches:03}.csv\"",
"file_list",
".",
"append",
"(",
"filename",
")",
"df",
".",
"to_csv",
"(",
"os",
".",
"path",
".",
"join",
"(",
"data_path",
",",
"filename",
")",
",",
"index_label",
"=",
"\"intra_batch_index\"",
",",
")",
"return",
"file_list",
"return",
"_multibatch_generic_csv_generator"
] | [
19,
0
] | [
73,
44
] | python | en | ['en', 'error', 'th'] | False |
test_batches_are_accessible | (
monkeypatch,
multibatch_generic_csv_generator,
multibatch_generic_csv_generator_context,
) |
What does this test and why?
Batches created in the multibatch_generic_csv_generator fixture should be available using the
multibatch_generic_csv_generator_context
This test most likely duplicates tests elsewhere, but it is more of a test of the configurable fixture.
|
What does this test and why?
Batches created in the multibatch_generic_csv_generator fixture should be available using the
multibatch_generic_csv_generator_context
This test most likely duplicates tests elsewhere, but it is more of a test of the configurable fixture.
| def test_batches_are_accessible(
monkeypatch,
multibatch_generic_csv_generator,
multibatch_generic_csv_generator_context,
):
"""
What does this test and why?
Batches created in the multibatch_generic_csv_generator fixture should be available using the
multibatch_generic_csv_generator_context
This test most likely duplicates tests elsewhere, but it is more of a test of the configurable fixture.
"""
context: DataContext = multibatch_generic_csv_generator_context
data_relative_path = "../data"
data_path = os.path.join(context.root_directory, data_relative_path)
datasource_name = "generic_csv_generator"
data_connector_name = "daily_data_connector"
asset_name = "daily_data_asset"
datasource = context.datasources[datasource_name]
data_connector = datasource.data_connectors[data_connector_name]
total_batches: int = 20
file_list = multibatch_generic_csv_generator(
data_path=data_path, num_event_batches=total_batches
)
assert (
data_connector._get_data_reference_list_from_cache_by_data_asset_name(
data_asset_name=asset_name
)
== file_list
)
batch_request_1 = BatchRequest(
datasource_name="generic_csv_generator",
data_connector_name="daily_data_connector",
data_asset_name="daily_data_asset",
data_connector_query={
"index": -1,
},
)
# Should give most recent batch
validator_1 = context.get_validator(
batch_request=batch_request_1,
create_expectation_suite_with_name="my_expectation_suite_name_1",
)
metric_max = validator_1.get_metric(
MetricConfiguration("column.max", metric_domain_kwargs={"column": "batch_num"})
)
assert metric_max == total_batches
metric_value_set = validator_1.get_metric(
MetricConfiguration(
"column.distinct_values",
metric_domain_kwargs={"column": "string_cardinality_3"},
)
)
assert metric_value_set == {"category0", "category1", "category2"}
batch_request_2 = BatchRequest(
datasource_name="generic_csv_generator",
data_connector_name="daily_data_connector",
data_asset_name="daily_data_asset",
data_connector_query={
"index": -2,
},
)
validator_2 = context.get_validator(
batch_request=batch_request_2,
create_expectation_suite_with_name="my_expectation_suite_name_2",
)
metric_max = validator_2.get_metric(
MetricConfiguration("column.max", metric_domain_kwargs={"column": "batch_num"})
)
assert metric_max == total_batches - 1
metric_value_set = validator_2.get_metric(
MetricConfiguration(
"column.distinct_values",
metric_domain_kwargs={"column": "string_cardinality_3"},
)
)
assert metric_value_set == {"category0", "category1", "category2"}
for batch_num in range(1, total_batches + 1):
batch_request = BatchRequest(
datasource_name="generic_csv_generator",
data_connector_name="daily_data_connector",
data_asset_name="daily_data_asset",
data_connector_query={
"index": -batch_num,
},
)
validator = context.get_validator(
batch_request=batch_request,
create_expectation_suite_with_name=f"my_expectation_suite_name__{batch_num}",
)
metric_max = validator.get_metric(
MetricConfiguration(
"column.max", metric_domain_kwargs={"column": "batch_num"}
)
)
assert metric_max == (total_batches + 1) - batch_num
metric_value_set = validator.get_metric(
MetricConfiguration(
"column.distinct_values",
metric_domain_kwargs={"column": "string_cardinality_3"},
)
)
assert metric_value_set == {"category0", "category1", "category2"} | [
"def",
"test_batches_are_accessible",
"(",
"monkeypatch",
",",
"multibatch_generic_csv_generator",
",",
"multibatch_generic_csv_generator_context",
",",
")",
":",
"context",
":",
"DataContext",
"=",
"multibatch_generic_csv_generator_context",
"data_relative_path",
"=",
"\"../data\"",
"data_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"context",
".",
"root_directory",
",",
"data_relative_path",
")",
"datasource_name",
"=",
"\"generic_csv_generator\"",
"data_connector_name",
"=",
"\"daily_data_connector\"",
"asset_name",
"=",
"\"daily_data_asset\"",
"datasource",
"=",
"context",
".",
"datasources",
"[",
"datasource_name",
"]",
"data_connector",
"=",
"datasource",
".",
"data_connectors",
"[",
"data_connector_name",
"]",
"total_batches",
":",
"int",
"=",
"20",
"file_list",
"=",
"multibatch_generic_csv_generator",
"(",
"data_path",
"=",
"data_path",
",",
"num_event_batches",
"=",
"total_batches",
")",
"assert",
"(",
"data_connector",
".",
"_get_data_reference_list_from_cache_by_data_asset_name",
"(",
"data_asset_name",
"=",
"asset_name",
")",
"==",
"file_list",
")",
"batch_request_1",
"=",
"BatchRequest",
"(",
"datasource_name",
"=",
"\"generic_csv_generator\"",
",",
"data_connector_name",
"=",
"\"daily_data_connector\"",
",",
"data_asset_name",
"=",
"\"daily_data_asset\"",
",",
"data_connector_query",
"=",
"{",
"\"index\"",
":",
"-",
"1",
",",
"}",
",",
")",
"# Should give most recent batch",
"validator_1",
"=",
"context",
".",
"get_validator",
"(",
"batch_request",
"=",
"batch_request_1",
",",
"create_expectation_suite_with_name",
"=",
"\"my_expectation_suite_name_1\"",
",",
")",
"metric_max",
"=",
"validator_1",
".",
"get_metric",
"(",
"MetricConfiguration",
"(",
"\"column.max\"",
",",
"metric_domain_kwargs",
"=",
"{",
"\"column\"",
":",
"\"batch_num\"",
"}",
")",
")",
"assert",
"metric_max",
"==",
"total_batches",
"metric_value_set",
"=",
"validator_1",
".",
"get_metric",
"(",
"MetricConfiguration",
"(",
"\"column.distinct_values\"",
",",
"metric_domain_kwargs",
"=",
"{",
"\"column\"",
":",
"\"string_cardinality_3\"",
"}",
",",
")",
")",
"assert",
"metric_value_set",
"==",
"{",
"\"category0\"",
",",
"\"category1\"",
",",
"\"category2\"",
"}",
"batch_request_2",
"=",
"BatchRequest",
"(",
"datasource_name",
"=",
"\"generic_csv_generator\"",
",",
"data_connector_name",
"=",
"\"daily_data_connector\"",
",",
"data_asset_name",
"=",
"\"daily_data_asset\"",
",",
"data_connector_query",
"=",
"{",
"\"index\"",
":",
"-",
"2",
",",
"}",
",",
")",
"validator_2",
"=",
"context",
".",
"get_validator",
"(",
"batch_request",
"=",
"batch_request_2",
",",
"create_expectation_suite_with_name",
"=",
"\"my_expectation_suite_name_2\"",
",",
")",
"metric_max",
"=",
"validator_2",
".",
"get_metric",
"(",
"MetricConfiguration",
"(",
"\"column.max\"",
",",
"metric_domain_kwargs",
"=",
"{",
"\"column\"",
":",
"\"batch_num\"",
"}",
")",
")",
"assert",
"metric_max",
"==",
"total_batches",
"-",
"1",
"metric_value_set",
"=",
"validator_2",
".",
"get_metric",
"(",
"MetricConfiguration",
"(",
"\"column.distinct_values\"",
",",
"metric_domain_kwargs",
"=",
"{",
"\"column\"",
":",
"\"string_cardinality_3\"",
"}",
",",
")",
")",
"assert",
"metric_value_set",
"==",
"{",
"\"category0\"",
",",
"\"category1\"",
",",
"\"category2\"",
"}",
"for",
"batch_num",
"in",
"range",
"(",
"1",
",",
"total_batches",
"+",
"1",
")",
":",
"batch_request",
"=",
"BatchRequest",
"(",
"datasource_name",
"=",
"\"generic_csv_generator\"",
",",
"data_connector_name",
"=",
"\"daily_data_connector\"",
",",
"data_asset_name",
"=",
"\"daily_data_asset\"",
",",
"data_connector_query",
"=",
"{",
"\"index\"",
":",
"-",
"batch_num",
",",
"}",
",",
")",
"validator",
"=",
"context",
".",
"get_validator",
"(",
"batch_request",
"=",
"batch_request",
",",
"create_expectation_suite_with_name",
"=",
"f\"my_expectation_suite_name__{batch_num}\"",
",",
")",
"metric_max",
"=",
"validator",
".",
"get_metric",
"(",
"MetricConfiguration",
"(",
"\"column.max\"",
",",
"metric_domain_kwargs",
"=",
"{",
"\"column\"",
":",
"\"batch_num\"",
"}",
")",
")",
"assert",
"metric_max",
"==",
"(",
"total_batches",
"+",
"1",
")",
"-",
"batch_num",
"metric_value_set",
"=",
"validator",
".",
"get_metric",
"(",
"MetricConfiguration",
"(",
"\"column.distinct_values\"",
",",
"metric_domain_kwargs",
"=",
"{",
"\"column\"",
":",
"\"string_cardinality_3\"",
"}",
",",
")",
")",
"assert",
"metric_value_set",
"==",
"{",
"\"category0\"",
",",
"\"category1\"",
",",
"\"category2\"",
"}"
] | [
149,
0
] | [
258,
74
] | python | en | ['en', 'error', 'th'] | False |
dircmp.phase3 | (self) |
Find out differences between common files.
Ensure we are using content comparison with shallow=False.
|
Find out differences between common files.
Ensure we are using content comparison with shallow=False.
| def phase3(self):
"""
Find out differences between common files.
Ensure we are using content comparison with shallow=False.
"""
fcomp = filecmp.cmpfiles(self.left, self.right, self.common_files, shallow = False)
self.same_files, self.diff_files, self.funny_files = fcomp | [
"def",
"phase3",
"(",
"self",
")",
":",
"fcomp",
"=",
"filecmp",
".",
"cmpfiles",
"(",
"self",
".",
"left",
",",
"self",
".",
"right",
",",
"self",
".",
"common_files",
",",
"shallow",
"=",
"False",
")",
"self",
".",
"same_files",
",",
"self",
".",
"diff_files",
",",
"self",
".",
"funny_files",
"=",
"fcomp"
] | [
9,
4
] | [
15,
66
] | python | en | ['en', 'error', 'th'] | False |
Crawler.__init__ | (self, output_dir: str, urls: Optional[List[str]] = None, crawler_depth: int = 1,
filter_urls: Optional[List] = None, overwrite_existing_files=True) |
Init object with basic params for crawling (can be overwritten later).
:param output_dir: Path for the directory to store files
:param urls: List of http(s) address(es) (can also be supplied later when calling crawl())
:param crawler_depth: How many sublinks to follow from the initial list of URLs. Current options:
0: Only initial list of urls
1: Follow links found on the initial URLs (but no further)
:param filter_urls: Optional list of regular expressions that the crawled URLs must comply with.
All URLs not matching at least one of the regular expressions will be dropped.
:param overwrite_existing_files: Whether to overwrite existing files in output_dir with new content
|
Init object with basic params for crawling (can be overwritten later). | def __init__(self, output_dir: str, urls: Optional[List[str]] = None, crawler_depth: int = 1,
filter_urls: Optional[List] = None, overwrite_existing_files=True):
"""
Init object with basic params for crawling (can be overwritten later).
:param output_dir: Path for the directory to store files
:param urls: List of http(s) address(es) (can also be supplied later when calling crawl())
:param crawler_depth: How many sublinks to follow from the initial list of URLs. Current options:
0: Only initial list of urls
1: Follow links found on the initial URLs (but no further)
:param filter_urls: Optional list of regular expressions that the crawled URLs must comply with.
All URLs not matching at least one of the regular expressions will be dropped.
:param overwrite_existing_files: Whether to overwrite existing files in output_dir with new content
"""
try:
from webdriver_manager.chrome import ChromeDriverManager
except ImportError:
raise ImportError("Can't find package `webdriver-manager` \n"
"You can install it via `pip install webdriver-manager`")
try:
from selenium import webdriver
except ImportError:
raise ImportError("Can't find package `selenium` \n"
"You can install it via `pip install selenium`")
options = webdriver.chrome.options.Options()
options.add_argument('--headless')
self.driver = webdriver.Chrome(ChromeDriverManager().install(), options=options)
self.urls = urls
self.output_dir = output_dir
self.crawler_depth = crawler_depth
self.filter_urls = filter_urls
self.overwrite_existing_files = overwrite_existing_files | [
"def",
"__init__",
"(",
"self",
",",
"output_dir",
":",
"str",
",",
"urls",
":",
"Optional",
"[",
"List",
"[",
"str",
"]",
"]",
"=",
"None",
",",
"crawler_depth",
":",
"int",
"=",
"1",
",",
"filter_urls",
":",
"Optional",
"[",
"List",
"]",
"=",
"None",
",",
"overwrite_existing_files",
"=",
"True",
")",
":",
"try",
":",
"from",
"webdriver_manager",
".",
"chrome",
"import",
"ChromeDriverManager",
"except",
"ImportError",
":",
"raise",
"ImportError",
"(",
"\"Can't find package `webdriver-manager` \\n\"",
"\"You can install it via `pip install webdriver-manager`\"",
")",
"try",
":",
"from",
"selenium",
"import",
"webdriver",
"except",
"ImportError",
":",
"raise",
"ImportError",
"(",
"\"Can't find package `selenium` \\n\"",
"\"You can install it via `pip install selenium`\"",
")",
"options",
"=",
"webdriver",
".",
"chrome",
".",
"options",
".",
"Options",
"(",
")",
"options",
".",
"add_argument",
"(",
"'--headless'",
")",
"self",
".",
"driver",
"=",
"webdriver",
".",
"Chrome",
"(",
"ChromeDriverManager",
"(",
")",
".",
"install",
"(",
")",
",",
"options",
"=",
"options",
")",
"self",
".",
"urls",
"=",
"urls",
"self",
".",
"output_dir",
"=",
"output_dir",
"self",
".",
"crawler_depth",
"=",
"crawler_depth",
"self",
".",
"filter_urls",
"=",
"filter_urls",
"self",
".",
"overwrite_existing_files",
"=",
"overwrite_existing_files"
] | [
27,
4
] | [
60,
64
] | python | en | ['en', 'error', 'th'] | False |
Crawler.crawl | (self, output_dir: Union[str, Path, None] = None,
urls: Optional[List[str]] = None,
crawler_depth: Optional[int] = None,
filter_urls: Optional[List] = None,
overwrite_existing_files: Optional[bool] = None) |
Craw URL(s), extract the text from the HTML, create a Haystack Document object out of it and save it (one JSON
file per URL, including text and basic meta data).
You can optionally specify via `filter_urls` to only crawl URLs that match a certain pattern.
All parameters are optional here and only meant to overwrite instance attributes at runtime.
If no parameters are provided to this method, the instance attributes that were passed during __init__ will be used.
:param output_dir: Path for the directory to store files
:param urls: List of http addresses or single http address
:param crawler_depth: How many sublinks to follow from the initial list of URLs. Current options:
0: Only initial list of urls
1: Follow links found on the initial URLs (but no further)
:param filter_urls: Optional list of regular expressions that the crawled URLs must comply with.
All URLs not matching at least one of the regular expressions will be dropped.
:param overwrite_existing_files: Whether to overwrite existing files in output_dir with new content
:return: List of paths where the crawled webpages got stored
|
Craw URL(s), extract the text from the HTML, create a Haystack Document object out of it and save it (one JSON
file per URL, including text and basic meta data).
You can optionally specify via `filter_urls` to only crawl URLs that match a certain pattern.
All parameters are optional here and only meant to overwrite instance attributes at runtime.
If no parameters are provided to this method, the instance attributes that were passed during __init__ will be used. | def crawl(self, output_dir: Union[str, Path, None] = None,
urls: Optional[List[str]] = None,
crawler_depth: Optional[int] = None,
filter_urls: Optional[List] = None,
overwrite_existing_files: Optional[bool] = None) -> List[Path]:
"""
Craw URL(s), extract the text from the HTML, create a Haystack Document object out of it and save it (one JSON
file per URL, including text and basic meta data).
You can optionally specify via `filter_urls` to only crawl URLs that match a certain pattern.
All parameters are optional here and only meant to overwrite instance attributes at runtime.
If no parameters are provided to this method, the instance attributes that were passed during __init__ will be used.
:param output_dir: Path for the directory to store files
:param urls: List of http addresses or single http address
:param crawler_depth: How many sublinks to follow from the initial list of URLs. Current options:
0: Only initial list of urls
1: Follow links found on the initial URLs (but no further)
:param filter_urls: Optional list of regular expressions that the crawled URLs must comply with.
All URLs not matching at least one of the regular expressions will be dropped.
:param overwrite_existing_files: Whether to overwrite existing files in output_dir with new content
:return: List of paths where the crawled webpages got stored
"""
# use passed params or fallback to instance attributes
urls = urls or self.urls
if urls is None:
raise ValueError("Got no urls to crawl. Set `urls` to a list of URLs in __init__(), crawl() or run(). `")
output_dir = output_dir or self.output_dir
filter_urls = filter_urls or self.filter_urls
if overwrite_existing_files is None:
overwrite_existing_files = self.overwrite_existing_files
if crawler_depth is None:
crawler_depth = self.crawler_depth
output_dir = Path(output_dir)
if not output_dir.exists():
output_dir.mkdir(parents=True)
is_not_empty = len(list(output_dir.rglob("*"))) > 0
if is_not_empty and not overwrite_existing_files:
logger.info(
f"Found data stored in `{output_dir}`. Delete this first if you really want to fetch new data."
)
return []
else:
logger.info(f"Fetching from {urls} to `{output_dir}`")
filepaths = []
sub_links: Dict[str, List] = {}
# don't go beyond the initial list of urls
if crawler_depth == 0:
filepaths += self._write_to_files(urls, output_dir=output_dir)
# follow one level of sublinks
elif crawler_depth == 1:
for url_ in urls:
existed_links: List = list(sum(list(sub_links.values()), []))
sub_links[url_] = list(self._extract_sublinks_from_url(base_url=url_, filter_urls=filter_urls,
existed_links=existed_links))
for url in sub_links:
filepaths += self._write_to_files(sub_links[url], output_dir=output_dir, base_url=url)
return filepaths | [
"def",
"crawl",
"(",
"self",
",",
"output_dir",
":",
"Union",
"[",
"str",
",",
"Path",
",",
"None",
"]",
"=",
"None",
",",
"urls",
":",
"Optional",
"[",
"List",
"[",
"str",
"]",
"]",
"=",
"None",
",",
"crawler_depth",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
",",
"filter_urls",
":",
"Optional",
"[",
"List",
"]",
"=",
"None",
",",
"overwrite_existing_files",
":",
"Optional",
"[",
"bool",
"]",
"=",
"None",
")",
"->",
"List",
"[",
"Path",
"]",
":",
"# use passed params or fallback to instance attributes",
"urls",
"=",
"urls",
"or",
"self",
".",
"urls",
"if",
"urls",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Got no urls to crawl. Set `urls` to a list of URLs in __init__(), crawl() or run(). `\"",
")",
"output_dir",
"=",
"output_dir",
"or",
"self",
".",
"output_dir",
"filter_urls",
"=",
"filter_urls",
"or",
"self",
".",
"filter_urls",
"if",
"overwrite_existing_files",
"is",
"None",
":",
"overwrite_existing_files",
"=",
"self",
".",
"overwrite_existing_files",
"if",
"crawler_depth",
"is",
"None",
":",
"crawler_depth",
"=",
"self",
".",
"crawler_depth",
"output_dir",
"=",
"Path",
"(",
"output_dir",
")",
"if",
"not",
"output_dir",
".",
"exists",
"(",
")",
":",
"output_dir",
".",
"mkdir",
"(",
"parents",
"=",
"True",
")",
"is_not_empty",
"=",
"len",
"(",
"list",
"(",
"output_dir",
".",
"rglob",
"(",
"\"*\"",
")",
")",
")",
">",
"0",
"if",
"is_not_empty",
"and",
"not",
"overwrite_existing_files",
":",
"logger",
".",
"info",
"(",
"f\"Found data stored in `{output_dir}`. Delete this first if you really want to fetch new data.\"",
")",
"return",
"[",
"]",
"else",
":",
"logger",
".",
"info",
"(",
"f\"Fetching from {urls} to `{output_dir}`\"",
")",
"filepaths",
"=",
"[",
"]",
"sub_links",
":",
"Dict",
"[",
"str",
",",
"List",
"]",
"=",
"{",
"}",
"# don't go beyond the initial list of urls",
"if",
"crawler_depth",
"==",
"0",
":",
"filepaths",
"+=",
"self",
".",
"_write_to_files",
"(",
"urls",
",",
"output_dir",
"=",
"output_dir",
")",
"# follow one level of sublinks",
"elif",
"crawler_depth",
"==",
"1",
":",
"for",
"url_",
"in",
"urls",
":",
"existed_links",
":",
"List",
"=",
"list",
"(",
"sum",
"(",
"list",
"(",
"sub_links",
".",
"values",
"(",
")",
")",
",",
"[",
"]",
")",
")",
"sub_links",
"[",
"url_",
"]",
"=",
"list",
"(",
"self",
".",
"_extract_sublinks_from_url",
"(",
"base_url",
"=",
"url_",
",",
"filter_urls",
"=",
"filter_urls",
",",
"existed_links",
"=",
"existed_links",
")",
")",
"for",
"url",
"in",
"sub_links",
":",
"filepaths",
"+=",
"self",
".",
"_write_to_files",
"(",
"sub_links",
"[",
"url",
"]",
",",
"output_dir",
"=",
"output_dir",
",",
"base_url",
"=",
"url",
")",
"return",
"filepaths"
] | [
62,
4
] | [
125,
28
] | python | en | ['en', 'error', 'th'] | False |
Crawler.run | (self,
output_dir: Union[str, Path, None] = None,
urls: Optional[List[str]] = None,
crawler_depth: Optional[int] = None,
filter_urls: Optional[List] = None,
overwrite_existing_files: Optional[bool] = None,
**kwargs) |
Method to be executed when the Crawler is used as a Node within a Haystack pipeline.
:param output_dir: Path for the directory to store files
:param urls: List of http addresses or single http address
:param crawler_depth: How many sublinks to follow from the initial list of URLs. Current options:
0: Only initial list of urls
1: Follow links found on the initial URLs (but no further)
:param filter_urls: Optional list of regular expressions that the crawled URLs must comply with.
All URLs not matching at least one of the regular expressions will be dropped.
:param overwrite_existing_files: Whether to overwrite existing files in output_dir with new content
:return: Tuple({"paths": List of filepaths, ...}, Name of output edge)
|
Method to be executed when the Crawler is used as a Node within a Haystack pipeline. | def run(self,
output_dir: Union[str, Path, None] = None,
urls: Optional[List[str]] = None,
crawler_depth: Optional[int] = None,
filter_urls: Optional[List] = None,
overwrite_existing_files: Optional[bool] = None,
**kwargs) -> Tuple[Dict, str]:
"""
Method to be executed when the Crawler is used as a Node within a Haystack pipeline.
:param output_dir: Path for the directory to store files
:param urls: List of http addresses or single http address
:param crawler_depth: How many sublinks to follow from the initial list of URLs. Current options:
0: Only initial list of urls
1: Follow links found on the initial URLs (but no further)
:param filter_urls: Optional list of regular expressions that the crawled URLs must comply with.
All URLs not matching at least one of the regular expressions will be dropped.
:param overwrite_existing_files: Whether to overwrite existing files in output_dir with new content
:return: Tuple({"paths": List of filepaths, ...}, Name of output edge)
"""
filepaths = self.crawl(urls=urls, output_dir=output_dir, crawler_depth=crawler_depth, filter_urls=filter_urls,
overwrite_existing_files=overwrite_existing_files)
results = {"paths": filepaths}
results.update(**kwargs)
return results, "output_1" | [
"def",
"run",
"(",
"self",
",",
"output_dir",
":",
"Union",
"[",
"str",
",",
"Path",
",",
"None",
"]",
"=",
"None",
",",
"urls",
":",
"Optional",
"[",
"List",
"[",
"str",
"]",
"]",
"=",
"None",
",",
"crawler_depth",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
",",
"filter_urls",
":",
"Optional",
"[",
"List",
"]",
"=",
"None",
",",
"overwrite_existing_files",
":",
"Optional",
"[",
"bool",
"]",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
"->",
"Tuple",
"[",
"Dict",
",",
"str",
"]",
":",
"filepaths",
"=",
"self",
".",
"crawl",
"(",
"urls",
"=",
"urls",
",",
"output_dir",
"=",
"output_dir",
",",
"crawler_depth",
"=",
"crawler_depth",
",",
"filter_urls",
"=",
"filter_urls",
",",
"overwrite_existing_files",
"=",
"overwrite_existing_files",
")",
"results",
"=",
"{",
"\"paths\"",
":",
"filepaths",
"}",
"results",
".",
"update",
"(",
"*",
"*",
"kwargs",
")",
"return",
"results",
",",
"\"output_1\""
] | [
150,
4
] | [
176,
34
] | python | en | ['en', 'error', 'th'] | False |
xarray_sortby_coord | (dataset, coord) |
Sort an xarray.Dataset by a coordinate. xarray.Dataset.sortby() sometimes fails, so this is an alternative.
Credit to https://stackoverflow.com/a/42600594/5449970.
|
Sort an xarray.Dataset by a coordinate. xarray.Dataset.sortby() sometimes fails, so this is an alternative.
Credit to https://stackoverflow.com/a/42600594/5449970.
| def xarray_sortby_coord(dataset, coord):
"""
Sort an xarray.Dataset by a coordinate. xarray.Dataset.sortby() sometimes fails, so this is an alternative.
Credit to https://stackoverflow.com/a/42600594/5449970.
"""
return dataset.loc[{coord:np.sort(dataset.coords[coord].values)}] | [
"def",
"xarray_sortby_coord",
"(",
"dataset",
",",
"coord",
")",
":",
"return",
"dataset",
".",
"loc",
"[",
"{",
"coord",
":",
"np",
".",
"sort",
"(",
"dataset",
".",
"coords",
"[",
"coord",
"]",
".",
"values",
")",
"}",
"]"
] | [
2,
0
] | [
7,
69
] | python | en | ['en', 'ja', 'th'] | False |
declaration_algs_cache_t.cmp_data | (self) | Data used for comparison between declarations. | Data used for comparison between declarations. | def cmp_data(self):
"""Data used for comparison between declarations."""
return self._cmp_data | [
"def",
"cmp_data",
"(",
"self",
")",
":",
"return",
"self",
".",
"_cmp_data"
] | [
172,
4
] | [
174,
29
] | python | en | ['en', 'en', 'en'] | True |
declaration_algs_cache_t.cmp_data | (self, cmp_data) | Data used for comparison between declarations. | Data used for comparison between declarations. | def cmp_data(self, cmp_data):
"""Data used for comparison between declarations."""
if not self.enabled:
cmp_data = None
self._cmp_data = cmp_data | [
"def",
"cmp_data",
"(",
"self",
",",
"cmp_data",
")",
":",
"if",
"not",
"self",
".",
"enabled",
":",
"cmp_data",
"=",
"None",
"self",
".",
"_cmp_data",
"=",
"cmp_data"
] | [
177,
4
] | [
181,
33
] | python | en | ['en', 'en', 'en'] | True |
execute_shell_command | (command: str) |
Wrap subprocess command in a try/except block to provide a convenient method for pip installing dependencies.
:param command: bash command -- as if typed in a shell/Terminal window
:return: status code -- 0 if successful; all other values (1 is the most common) indicate an error
|
Wrap subprocess command in a try/except block to provide a convenient method for pip installing dependencies. | def execute_shell_command(command: str) -> int:
"""
Wrap subprocess command in a try/except block to provide a convenient method for pip installing dependencies.
:param command: bash command -- as if typed in a shell/Terminal window
:return: status code -- 0 if successful; all other values (1 is the most common) indicate an error
"""
cwd: str = os.getcwd()
path_env_var: str = os.pathsep.join([os.environ.get("PATH", os.defpath), cwd])
env: dict = dict(os.environ, PATH=path_env_var)
status_code: int = 0
try:
res: CompletedProcess = run(
args=["bash", "-c", command],
stdin=None,
input=None,
# stdout=None, # commenting out to prevent issues with `subprocess.run` in python <3.7.4
# stderr=None, # commenting out to prevent issues with `subprocess.run` in python <3.7.4
capture_output=True,
shell=False,
cwd=cwd,
timeout=None,
check=True,
encoding=None,
errors=None,
text=None,
env=env,
universal_newlines=True,
)
sh_out: str = res.stdout.strip()
logger.info(sh_out)
except CalledProcessError as cpe:
status_code = cpe.returncode
sys.stderr.write(cpe.output)
sys.stderr.flush()
exception_message: str = "A Sub-Process call Exception occurred.\n"
exception_traceback: str = traceback.format_exc()
exception_message += (
f'{type(cpe).__name__}: "{str(cpe)}". Traceback: "{exception_traceback}".'
)
logger.error(exception_message)
return status_code | [
"def",
"execute_shell_command",
"(",
"command",
":",
"str",
")",
"->",
"int",
":",
"cwd",
":",
"str",
"=",
"os",
".",
"getcwd",
"(",
")",
"path_env_var",
":",
"str",
"=",
"os",
".",
"pathsep",
".",
"join",
"(",
"[",
"os",
".",
"environ",
".",
"get",
"(",
"\"PATH\"",
",",
"os",
".",
"defpath",
")",
",",
"cwd",
"]",
")",
"env",
":",
"dict",
"=",
"dict",
"(",
"os",
".",
"environ",
",",
"PATH",
"=",
"path_env_var",
")",
"status_code",
":",
"int",
"=",
"0",
"try",
":",
"res",
":",
"CompletedProcess",
"=",
"run",
"(",
"args",
"=",
"[",
"\"bash\"",
",",
"\"-c\"",
",",
"command",
"]",
",",
"stdin",
"=",
"None",
",",
"input",
"=",
"None",
",",
"# stdout=None, # commenting out to prevent issues with `subprocess.run` in python <3.7.4",
"# stderr=None, # commenting out to prevent issues with `subprocess.run` in python <3.7.4",
"capture_output",
"=",
"True",
",",
"shell",
"=",
"False",
",",
"cwd",
"=",
"cwd",
",",
"timeout",
"=",
"None",
",",
"check",
"=",
"True",
",",
"encoding",
"=",
"None",
",",
"errors",
"=",
"None",
",",
"text",
"=",
"None",
",",
"env",
"=",
"env",
",",
"universal_newlines",
"=",
"True",
",",
")",
"sh_out",
":",
"str",
"=",
"res",
".",
"stdout",
".",
"strip",
"(",
")",
"logger",
".",
"info",
"(",
"sh_out",
")",
"except",
"CalledProcessError",
"as",
"cpe",
":",
"status_code",
"=",
"cpe",
".",
"returncode",
"sys",
".",
"stderr",
".",
"write",
"(",
"cpe",
".",
"output",
")",
"sys",
".",
"stderr",
".",
"flush",
"(",
")",
"exception_message",
":",
"str",
"=",
"\"A Sub-Process call Exception occurred.\\n\"",
"exception_traceback",
":",
"str",
"=",
"traceback",
".",
"format_exc",
"(",
")",
"exception_message",
"+=",
"(",
"f'{type(cpe).__name__}: \"{str(cpe)}\". Traceback: \"{exception_traceback}\".'",
")",
"logger",
".",
"error",
"(",
"exception_message",
")",
"return",
"status_code"
] | [
22,
0
] | [
66,
22
] | python | en | ['en', 'error', 'th'] | False |
get_contrib_requirements | (filepath: str) |
Parse the python file from filepath to identify a "library_metadata" dictionary in any defined classes, and return a requirements_info object that includes a list of pip-installable requirements for each class that defines them.
Note, currently we are handling all dependencies at the module level. To support future expandability and detail, this method also returns per-class requirements in addition to the concatenated list.
Args:
filepath: the path to the file to parse and analyze
Returns:
A dictionary:
{
"requirements": [ all_requirements_found_in_any_library_metadata_in_file ],
class_name: [ requirements ]
}
|
Parse the python file from filepath to identify a "library_metadata" dictionary in any defined classes, and return a requirements_info object that includes a list of pip-installable requirements for each class that defines them. | def get_contrib_requirements(filepath: str) -> Dict:
"""
Parse the python file from filepath to identify a "library_metadata" dictionary in any defined classes, and return a requirements_info object that includes a list of pip-installable requirements for each class that defines them.
Note, currently we are handling all dependencies at the module level. To support future expandability and detail, this method also returns per-class requirements in addition to the concatenated list.
Args:
filepath: the path to the file to parse and analyze
Returns:
A dictionary:
{
"requirements": [ all_requirements_found_in_any_library_metadata_in_file ],
class_name: [ requirements ]
}
"""
with open(filepath) as file:
tree = ast.parse(file.read())
requirements_info = {"requirements": []}
for child in ast.iter_child_nodes(tree):
if not isinstance(child, ast.ClassDef):
continue
current_class = child.name
for node in ast.walk(child):
if isinstance(node, ast.Assign):
try:
target_ids = [target.id for target in node.targets]
except (ValueError, AttributeError):
# some assignment types assign to non-node objects (e.g. Tuple)
target_ids = []
if "library_metadata" in target_ids:
library_metadata = ast.literal_eval(node.value)
requirements = library_metadata.get("requirements", [])
requirements_info[current_class] = requirements
requirements_info["requirements"] += requirements
return requirements_info | [
"def",
"get_contrib_requirements",
"(",
"filepath",
":",
"str",
")",
"->",
"Dict",
":",
"with",
"open",
"(",
"filepath",
")",
"as",
"file",
":",
"tree",
"=",
"ast",
".",
"parse",
"(",
"file",
".",
"read",
"(",
")",
")",
"requirements_info",
"=",
"{",
"\"requirements\"",
":",
"[",
"]",
"}",
"for",
"child",
"in",
"ast",
".",
"iter_child_nodes",
"(",
"tree",
")",
":",
"if",
"not",
"isinstance",
"(",
"child",
",",
"ast",
".",
"ClassDef",
")",
":",
"continue",
"current_class",
"=",
"child",
".",
"name",
"for",
"node",
"in",
"ast",
".",
"walk",
"(",
"child",
")",
":",
"if",
"isinstance",
"(",
"node",
",",
"ast",
".",
"Assign",
")",
":",
"try",
":",
"target_ids",
"=",
"[",
"target",
".",
"id",
"for",
"target",
"in",
"node",
".",
"targets",
"]",
"except",
"(",
"ValueError",
",",
"AttributeError",
")",
":",
"# some assignment types assign to non-node objects (e.g. Tuple)",
"target_ids",
"=",
"[",
"]",
"if",
"\"library_metadata\"",
"in",
"target_ids",
":",
"library_metadata",
"=",
"ast",
".",
"literal_eval",
"(",
"node",
".",
"value",
")",
"requirements",
"=",
"library_metadata",
".",
"get",
"(",
"\"requirements\"",
",",
"[",
"]",
")",
"requirements_info",
"[",
"current_class",
"]",
"=",
"requirements",
"requirements_info",
"[",
"\"requirements\"",
"]",
"+=",
"requirements",
"return",
"requirements_info"
] | [
69,
0
] | [
107,
28
] | python | en | ['en', 'error', 'th'] | False |
build_gallery | (
include_core: bool = True, include_contrib_experimental: bool = True
) |
Build the gallery object by running diagnostics for each Expectation and returning the resulting reports.
Args:
include_core: if true, include Expectations defined in the core module
include_contrib_experimental: if true, include Expectations defined in contrib_experimental:
Returns:
None
|
Build the gallery object by running diagnostics for each Expectation and returning the resulting reports. | def build_gallery(
include_core: bool = True, include_contrib_experimental: bool = True
) -> Dict:
"""
Build the gallery object by running diagnostics for each Expectation and returning the resulting reports.
Args:
include_core: if true, include Expectations defined in the core module
include_contrib_experimental: if true, include Expectations defined in contrib_experimental:
Returns:
None
"""
gallery_info = dict()
built_expectations = set()
logger.info("Loading great_expectations library.")
installed_packages = pkg_resources.working_set
installed_packages_names = [i.key for i in installed_packages]
installed_packages_txt = sorted(
[f"{i.key}=={i.version}" for i in installed_packages]
)
logger.debug(f"Found the following packages: {installed_packages_txt}")
import great_expectations
logger.info("Getting base registered expectations list")
core_expectations = (
great_expectations.expectations.registry.list_registered_expectation_implementations()
)
if include_core:
for expectation in core_expectations:
logger.debug(f"Running diagnostics for expectation: {expectation}")
impl = great_expectations.expectations.registry.get_expectation_impl(
expectation
)
diagnostics = impl().run_diagnostics()
gallery_info[expectation] = diagnostics
built_expectations.add(expectation)
else:
built_expectations = set(core_expectations)
if include_contrib_experimental:
logger.info("Finding contrib modules")
contrib_experimental_dir = os.path.join(
os.path.dirname(__file__),
"..",
"..",
"contrib",
"experimental",
"great_expectations_experimental",
)
sys.path.append(contrib_experimental_dir)
expectations_module = importlib.import_module(
"expectations", "great_expectations_experimental"
)
requirements_dict = {}
for root, dirs, files in os.walk(contrib_experimental_dir):
for file in files:
if file.endswith(".py") and not file == "__init__.py":
logger.debug(f"Getting requirements for module {file}")
requirements_dict[file[:-3]] = get_contrib_requirements(
os.path.join(root, file)
)
# Use a brute-force approach: install all requirements for each module as we import it
for expectation_module in expectations_module.__all__:
just_installed = set()
if expectation_module in requirements_dict:
logger.info(f"Loading dependencies for module {expectation_module}")
for req in requirements_dict[expectation_module]["requirements"]:
req_package = [
package.key for package in pkg_resources.parse_requirements(req)
]
if req_package in installed_packages_names:
continue
just_installed.add(req)
logger.debug(f"Executing command: 'pip install \"{req}\"'")
execute_shell_command(f'pip install "{req}"')
logger.debug(f"Importing {expectation_module}")
importlib.import_module(
f"expectations.{expectation_module}", "great_expectations_experimental"
)
available_expectations = (
great_expectations.expectations.registry.list_registered_expectation_implementations()
)
new_expectations = set(available_expectations) - built_expectations
for expectation in new_expectations:
logger.debug(f"Running diagnostics for expectation: {expectation}")
impl = great_expectations.expectations.registry.get_expectation_impl(
expectation
)
diagnostics = impl().run_diagnostics()
gallery_info[expectation] = diagnostics
built_expectations.add(expectation)
logger.info(f"Unloading just-installed for module {expectation_module}")
for req in just_installed:
logger.debug(f"Executing command: 'pip uninstall -y \"{req}\"'")
execute_shell_command(f'pip uninstall -y "{req}"')
metrics_module = importlib.import_module(
"metrics", "great_expectations_experimental"
)
for metrics_module in metrics_module.__all__:
if metrics_module in requirements_dict:
logger.warning(
f"Independent metrics module {metrics_module} not being processed."
)
return gallery_info | [
"def",
"build_gallery",
"(",
"include_core",
":",
"bool",
"=",
"True",
",",
"include_contrib_experimental",
":",
"bool",
"=",
"True",
")",
"->",
"Dict",
":",
"gallery_info",
"=",
"dict",
"(",
")",
"built_expectations",
"=",
"set",
"(",
")",
"logger",
".",
"info",
"(",
"\"Loading great_expectations library.\"",
")",
"installed_packages",
"=",
"pkg_resources",
".",
"working_set",
"installed_packages_names",
"=",
"[",
"i",
".",
"key",
"for",
"i",
"in",
"installed_packages",
"]",
"installed_packages_txt",
"=",
"sorted",
"(",
"[",
"f\"{i.key}=={i.version}\"",
"for",
"i",
"in",
"installed_packages",
"]",
")",
"logger",
".",
"debug",
"(",
"f\"Found the following packages: {installed_packages_txt}\"",
")",
"import",
"great_expectations",
"logger",
".",
"info",
"(",
"\"Getting base registered expectations list\"",
")",
"core_expectations",
"=",
"(",
"great_expectations",
".",
"expectations",
".",
"registry",
".",
"list_registered_expectation_implementations",
"(",
")",
")",
"if",
"include_core",
":",
"for",
"expectation",
"in",
"core_expectations",
":",
"logger",
".",
"debug",
"(",
"f\"Running diagnostics for expectation: {expectation}\"",
")",
"impl",
"=",
"great_expectations",
".",
"expectations",
".",
"registry",
".",
"get_expectation_impl",
"(",
"expectation",
")",
"diagnostics",
"=",
"impl",
"(",
")",
".",
"run_diagnostics",
"(",
")",
"gallery_info",
"[",
"expectation",
"]",
"=",
"diagnostics",
"built_expectations",
".",
"add",
"(",
"expectation",
")",
"else",
":",
"built_expectations",
"=",
"set",
"(",
"core_expectations",
")",
"if",
"include_contrib_experimental",
":",
"logger",
".",
"info",
"(",
"\"Finding contrib modules\"",
")",
"contrib_experimental_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
",",
"\"..\"",
",",
"\"..\"",
",",
"\"contrib\"",
",",
"\"experimental\"",
",",
"\"great_expectations_experimental\"",
",",
")",
"sys",
".",
"path",
".",
"append",
"(",
"contrib_experimental_dir",
")",
"expectations_module",
"=",
"importlib",
".",
"import_module",
"(",
"\"expectations\"",
",",
"\"great_expectations_experimental\"",
")",
"requirements_dict",
"=",
"{",
"}",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"contrib_experimental_dir",
")",
":",
"for",
"file",
"in",
"files",
":",
"if",
"file",
".",
"endswith",
"(",
"\".py\"",
")",
"and",
"not",
"file",
"==",
"\"__init__.py\"",
":",
"logger",
".",
"debug",
"(",
"f\"Getting requirements for module {file}\"",
")",
"requirements_dict",
"[",
"file",
"[",
":",
"-",
"3",
"]",
"]",
"=",
"get_contrib_requirements",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"file",
")",
")",
"# Use a brute-force approach: install all requirements for each module as we import it",
"for",
"expectation_module",
"in",
"expectations_module",
".",
"__all__",
":",
"just_installed",
"=",
"set",
"(",
")",
"if",
"expectation_module",
"in",
"requirements_dict",
":",
"logger",
".",
"info",
"(",
"f\"Loading dependencies for module {expectation_module}\"",
")",
"for",
"req",
"in",
"requirements_dict",
"[",
"expectation_module",
"]",
"[",
"\"requirements\"",
"]",
":",
"req_package",
"=",
"[",
"package",
".",
"key",
"for",
"package",
"in",
"pkg_resources",
".",
"parse_requirements",
"(",
"req",
")",
"]",
"if",
"req_package",
"in",
"installed_packages_names",
":",
"continue",
"just_installed",
".",
"add",
"(",
"req",
")",
"logger",
".",
"debug",
"(",
"f\"Executing command: 'pip install \\\"{req}\\\"'\"",
")",
"execute_shell_command",
"(",
"f'pip install \"{req}\"'",
")",
"logger",
".",
"debug",
"(",
"f\"Importing {expectation_module}\"",
")",
"importlib",
".",
"import_module",
"(",
"f\"expectations.{expectation_module}\"",
",",
"\"great_expectations_experimental\"",
")",
"available_expectations",
"=",
"(",
"great_expectations",
".",
"expectations",
".",
"registry",
".",
"list_registered_expectation_implementations",
"(",
")",
")",
"new_expectations",
"=",
"set",
"(",
"available_expectations",
")",
"-",
"built_expectations",
"for",
"expectation",
"in",
"new_expectations",
":",
"logger",
".",
"debug",
"(",
"f\"Running diagnostics for expectation: {expectation}\"",
")",
"impl",
"=",
"great_expectations",
".",
"expectations",
".",
"registry",
".",
"get_expectation_impl",
"(",
"expectation",
")",
"diagnostics",
"=",
"impl",
"(",
")",
".",
"run_diagnostics",
"(",
")",
"gallery_info",
"[",
"expectation",
"]",
"=",
"diagnostics",
"built_expectations",
".",
"add",
"(",
"expectation",
")",
"logger",
".",
"info",
"(",
"f\"Unloading just-installed for module {expectation_module}\"",
")",
"for",
"req",
"in",
"just_installed",
":",
"logger",
".",
"debug",
"(",
"f\"Executing command: 'pip uninstall -y \\\"{req}\\\"'\"",
")",
"execute_shell_command",
"(",
"f'pip uninstall -y \"{req}\"'",
")",
"metrics_module",
"=",
"importlib",
".",
"import_module",
"(",
"\"metrics\"",
",",
"\"great_expectations_experimental\"",
")",
"for",
"metrics_module",
"in",
"metrics_module",
".",
"__all__",
":",
"if",
"metrics_module",
"in",
"requirements_dict",
":",
"logger",
".",
"warning",
"(",
"f\"Independent metrics module {metrics_module} not being processed.\"",
")",
"return",
"gallery_info"
] | [
110,
0
] | [
221,
23
] | python | en | ['en', 'error', 'th'] | False |
objective | (baselexer) |
Generate a subclass of baselexer that accepts the Objective-C syntax
extensions.
|
Generate a subclass of baselexer that accepts the Objective-C syntax
extensions.
| def objective(baselexer):
"""
Generate a subclass of baselexer that accepts the Objective-C syntax
extensions.
"""
# Have to be careful not to accidentally match JavaDoc/Doxygen syntax here,
# since that's quite common in ordinary C/C++ files. It's OK to match
# JavaDoc/Doxygen keywords that only apply to Objective-C, mind.
#
# The upshot of this is that we CANNOT match @class or @interface
_oc_keywords = re.compile(r'@(?:end|implementation|protocol)')
# Matches [ <ws>? identifier <ws> ( identifier <ws>? ] | identifier? : )
# (note the identifier is *optional* when there is a ':'!)
_oc_message = re.compile(r'\[\s*[a-zA-Z_]\w*\s+'
r'(?:[a-zA-Z_]\w*\s*\]|'
r'(?:[a-zA-Z_]\w*)?:)')
class GeneratedObjectiveCVariant(baselexer):
"""
Implements Objective-C syntax on top of an existing C family lexer.
"""
tokens = {
'statements': [
(r'@"', String, 'string'),
(r'@(YES|NO)', Number),
(r"@'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'@(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'@(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'@0x[0-9a-fA-F]+[Ll]?', Number.Hex),
(r'@0[0-7]+[Ll]?', Number.Oct),
(r'@\d+[Ll]?', Number.Integer),
(r'@\(', Literal, 'literal_number'),
(r'@\[', Literal, 'literal_array'),
(r'@\{', Literal, 'literal_dictionary'),
(words((
'@selector', '@private', '@protected', '@public', '@encode',
'@synchronized', '@try', '@throw', '@catch', '@finally',
'@end', '@property', '@synthesize', '__bridge', '__bridge_transfer',
'__autoreleasing', '__block', '__weak', '__strong', 'weak', 'strong',
'copy', 'retain', 'assign', 'unsafe_unretained', 'atomic', 'nonatomic',
'readonly', 'readwrite', 'setter', 'getter', 'typeof', 'in',
'out', 'inout', 'release', 'class', '@dynamic', '@optional',
'@required', '@autoreleasepool'), suffix=r'\b'),
Keyword),
(words(('id', 'instancetype', 'Class', 'IMP', 'SEL', 'BOOL',
'IBOutlet', 'IBAction', 'unichar'), suffix=r'\b'),
Keyword.Type),
(r'@(true|false|YES|NO)\n', Name.Builtin),
(r'(YES|NO|nil|self|super)\b', Name.Builtin),
# Carbon types
(r'(Boolean|UInt8|SInt8|UInt16|SInt16|UInt32|SInt32)\b', Keyword.Type),
# Carbon built-ins
(r'(TRUE|FALSE)\b', Name.Builtin),
(r'(@interface|@implementation)(\s+)', bygroups(Keyword, Text),
('#pop', 'oc_classname')),
(r'(@class|@protocol)(\s+)', bygroups(Keyword, Text),
('#pop', 'oc_forward_classname')),
# @ can also prefix other expressions like @{...} or @(...)
(r'@', Punctuation),
inherit,
],
'oc_classname': [
# interface definition that inherits
('([a-zA-Z$_][\w$]*)(\s*:\s*)([a-zA-Z$_][\w$]*)?(\s*)(\{)',
bygroups(Name.Class, Text, Name.Class, Text, Punctuation),
('#pop', 'oc_ivars')),
('([a-zA-Z$_][\w$]*)(\s*:\s*)([a-zA-Z$_][\w$]*)?',
bygroups(Name.Class, Text, Name.Class), '#pop'),
# interface definition for a category
('([a-zA-Z$_][\w$]*)(\s*)(\([a-zA-Z$_][\w$]*\))(\s*)(\{)',
bygroups(Name.Class, Text, Name.Label, Text, Punctuation),
('#pop', 'oc_ivars')),
('([a-zA-Z$_][\w$]*)(\s*)(\([a-zA-Z$_][\w$]*\))',
bygroups(Name.Class, Text, Name.Label), '#pop'),
# simple interface / implementation
('([a-zA-Z$_][\w$]*)(\s*)(\{)',
bygroups(Name.Class, Text, Punctuation), ('#pop', 'oc_ivars')),
('([a-zA-Z$_][\w$]*)', Name.Class, '#pop')
],
'oc_forward_classname': [
('([a-zA-Z$_][\w$]*)(\s*,\s*)',
bygroups(Name.Class, Text), 'oc_forward_classname'),
('([a-zA-Z$_][\w$]*)(\s*;?)',
bygroups(Name.Class, Text), '#pop')
],
'oc_ivars': [
include('whitespace'),
include('statements'),
(';', Punctuation),
(r'\{', Punctuation, '#push'),
(r'\}', Punctuation, '#pop'),
],
'root': [
# methods
(r'^([-+])(\s*)' # method marker
r'(\(.*?\))?(\s*)' # return type
r'([a-zA-Z$_][\w$]*:?)', # begin of method name
bygroups(Punctuation, Text, using(this),
Text, Name.Function),
'method'),
inherit,
],
'method': [
include('whitespace'),
# TODO unsure if ellipses are allowed elsewhere, see
# discussion in Issue 789
(r',', Punctuation),
(r'\.\.\.', Punctuation),
(r'(\(.*?\))(\s*)([a-zA-Z$_][\w$]*)',
bygroups(using(this), Text, Name.Variable)),
(r'[a-zA-Z$_][\w$]*:', Name.Function),
(';', Punctuation, '#pop'),
(r'\{', Punctuation, 'function'),
default('#pop'),
],
'literal_number': [
(r'\(', Punctuation, 'literal_number_inner'),
(r'\)', Literal, '#pop'),
include('statement'),
],
'literal_number_inner': [
(r'\(', Punctuation, '#push'),
(r'\)', Punctuation, '#pop'),
include('statement'),
],
'literal_array': [
(r'\[', Punctuation, 'literal_array_inner'),
(r'\]', Literal, '#pop'),
include('statement'),
],
'literal_array_inner': [
(r'\[', Punctuation, '#push'),
(r'\]', Punctuation, '#pop'),
include('statement'),
],
'literal_dictionary': [
(r'\}', Literal, '#pop'),
include('statement'),
],
}
def analyse_text(text):
if _oc_keywords.search(text):
return 1.0
elif '@"' in text: # strings
return 0.8
elif re.search('@[0-9]+', text):
return 0.7
elif _oc_message.search(text):
return 0.8
return 0
def get_tokens_unprocessed(self, text):
from pygments.lexers._cocoa_builtins import COCOA_INTERFACES, \
COCOA_PROTOCOLS, COCOA_PRIMITIVES
for index, token, value in \
baselexer.get_tokens_unprocessed(self, text):
if token is Name or token is Name.Class:
if value in COCOA_INTERFACES or value in COCOA_PROTOCOLS \
or value in COCOA_PRIMITIVES:
token = Name.Builtin.Pseudo
yield index, token, value
return GeneratedObjectiveCVariant | [
"def",
"objective",
"(",
"baselexer",
")",
":",
"# Have to be careful not to accidentally match JavaDoc/Doxygen syntax here,",
"# since that's quite common in ordinary C/C++ files. It's OK to match",
"# JavaDoc/Doxygen keywords that only apply to Objective-C, mind.",
"#",
"# The upshot of this is that we CANNOT match @class or @interface",
"_oc_keywords",
"=",
"re",
".",
"compile",
"(",
"r'@(?:end|implementation|protocol)'",
")",
"# Matches [ <ws>? identifier <ws> ( identifier <ws>? ] | identifier? : )",
"# (note the identifier is *optional* when there is a ':'!)",
"_oc_message",
"=",
"re",
".",
"compile",
"(",
"r'\\[\\s*[a-zA-Z_]\\w*\\s+'",
"r'(?:[a-zA-Z_]\\w*\\s*\\]|'",
"r'(?:[a-zA-Z_]\\w*)?:)'",
")",
"class",
"GeneratedObjectiveCVariant",
"(",
"baselexer",
")",
":",
"\"\"\"\n Implements Objective-C syntax on top of an existing C family lexer.\n \"\"\"",
"tokens",
"=",
"{",
"'statements'",
":",
"[",
"(",
"r'@\"'",
",",
"String",
",",
"'string'",
")",
",",
"(",
"r'@(YES|NO)'",
",",
"Number",
")",
",",
"(",
"r\"@'(\\\\.|\\\\[0-7]{1,3}|\\\\x[a-fA-F0-9]{1,2}|[^\\\\\\'\\n])'\"",
",",
"String",
".",
"Char",
")",
",",
"(",
"r'@(\\d+\\.\\d*|\\.\\d+|\\d+)[eE][+-]?\\d+[lL]?'",
",",
"Number",
".",
"Float",
")",
",",
"(",
"r'@(\\d+\\.\\d*|\\.\\d+|\\d+[fF])[fF]?'",
",",
"Number",
".",
"Float",
")",
",",
"(",
"r'@0x[0-9a-fA-F]+[Ll]?'",
",",
"Number",
".",
"Hex",
")",
",",
"(",
"r'@0[0-7]+[Ll]?'",
",",
"Number",
".",
"Oct",
")",
",",
"(",
"r'@\\d+[Ll]?'",
",",
"Number",
".",
"Integer",
")",
",",
"(",
"r'@\\('",
",",
"Literal",
",",
"'literal_number'",
")",
",",
"(",
"r'@\\['",
",",
"Literal",
",",
"'literal_array'",
")",
",",
"(",
"r'@\\{'",
",",
"Literal",
",",
"'literal_dictionary'",
")",
",",
"(",
"words",
"(",
"(",
"'@selector'",
",",
"'@private'",
",",
"'@protected'",
",",
"'@public'",
",",
"'@encode'",
",",
"'@synchronized'",
",",
"'@try'",
",",
"'@throw'",
",",
"'@catch'",
",",
"'@finally'",
",",
"'@end'",
",",
"'@property'",
",",
"'@synthesize'",
",",
"'__bridge'",
",",
"'__bridge_transfer'",
",",
"'__autoreleasing'",
",",
"'__block'",
",",
"'__weak'",
",",
"'__strong'",
",",
"'weak'",
",",
"'strong'",
",",
"'copy'",
",",
"'retain'",
",",
"'assign'",
",",
"'unsafe_unretained'",
",",
"'atomic'",
",",
"'nonatomic'",
",",
"'readonly'",
",",
"'readwrite'",
",",
"'setter'",
",",
"'getter'",
",",
"'typeof'",
",",
"'in'",
",",
"'out'",
",",
"'inout'",
",",
"'release'",
",",
"'class'",
",",
"'@dynamic'",
",",
"'@optional'",
",",
"'@required'",
",",
"'@autoreleasepool'",
")",
",",
"suffix",
"=",
"r'\\b'",
")",
",",
"Keyword",
")",
",",
"(",
"words",
"(",
"(",
"'id'",
",",
"'instancetype'",
",",
"'Class'",
",",
"'IMP'",
",",
"'SEL'",
",",
"'BOOL'",
",",
"'IBOutlet'",
",",
"'IBAction'",
",",
"'unichar'",
")",
",",
"suffix",
"=",
"r'\\b'",
")",
",",
"Keyword",
".",
"Type",
")",
",",
"(",
"r'@(true|false|YES|NO)\\n'",
",",
"Name",
".",
"Builtin",
")",
",",
"(",
"r'(YES|NO|nil|self|super)\\b'",
",",
"Name",
".",
"Builtin",
")",
",",
"# Carbon types",
"(",
"r'(Boolean|UInt8|SInt8|UInt16|SInt16|UInt32|SInt32)\\b'",
",",
"Keyword",
".",
"Type",
")",
",",
"# Carbon built-ins",
"(",
"r'(TRUE|FALSE)\\b'",
",",
"Name",
".",
"Builtin",
")",
",",
"(",
"r'(@interface|@implementation)(\\s+)'",
",",
"bygroups",
"(",
"Keyword",
",",
"Text",
")",
",",
"(",
"'#pop'",
",",
"'oc_classname'",
")",
")",
",",
"(",
"r'(@class|@protocol)(\\s+)'",
",",
"bygroups",
"(",
"Keyword",
",",
"Text",
")",
",",
"(",
"'#pop'",
",",
"'oc_forward_classname'",
")",
")",
",",
"# @ can also prefix other expressions like @{...} or @(...)",
"(",
"r'@'",
",",
"Punctuation",
")",
",",
"inherit",
",",
"]",
",",
"'oc_classname'",
":",
"[",
"# interface definition that inherits",
"(",
"'([a-zA-Z$_][\\w$]*)(\\s*:\\s*)([a-zA-Z$_][\\w$]*)?(\\s*)(\\{)'",
",",
"bygroups",
"(",
"Name",
".",
"Class",
",",
"Text",
",",
"Name",
".",
"Class",
",",
"Text",
",",
"Punctuation",
")",
",",
"(",
"'#pop'",
",",
"'oc_ivars'",
")",
")",
",",
"(",
"'([a-zA-Z$_][\\w$]*)(\\s*:\\s*)([a-zA-Z$_][\\w$]*)?'",
",",
"bygroups",
"(",
"Name",
".",
"Class",
",",
"Text",
",",
"Name",
".",
"Class",
")",
",",
"'#pop'",
")",
",",
"# interface definition for a category",
"(",
"'([a-zA-Z$_][\\w$]*)(\\s*)(\\([a-zA-Z$_][\\w$]*\\))(\\s*)(\\{)'",
",",
"bygroups",
"(",
"Name",
".",
"Class",
",",
"Text",
",",
"Name",
".",
"Label",
",",
"Text",
",",
"Punctuation",
")",
",",
"(",
"'#pop'",
",",
"'oc_ivars'",
")",
")",
",",
"(",
"'([a-zA-Z$_][\\w$]*)(\\s*)(\\([a-zA-Z$_][\\w$]*\\))'",
",",
"bygroups",
"(",
"Name",
".",
"Class",
",",
"Text",
",",
"Name",
".",
"Label",
")",
",",
"'#pop'",
")",
",",
"# simple interface / implementation",
"(",
"'([a-zA-Z$_][\\w$]*)(\\s*)(\\{)'",
",",
"bygroups",
"(",
"Name",
".",
"Class",
",",
"Text",
",",
"Punctuation",
")",
",",
"(",
"'#pop'",
",",
"'oc_ivars'",
")",
")",
",",
"(",
"'([a-zA-Z$_][\\w$]*)'",
",",
"Name",
".",
"Class",
",",
"'#pop'",
")",
"]",
",",
"'oc_forward_classname'",
":",
"[",
"(",
"'([a-zA-Z$_][\\w$]*)(\\s*,\\s*)'",
",",
"bygroups",
"(",
"Name",
".",
"Class",
",",
"Text",
")",
",",
"'oc_forward_classname'",
")",
",",
"(",
"'([a-zA-Z$_][\\w$]*)(\\s*;?)'",
",",
"bygroups",
"(",
"Name",
".",
"Class",
",",
"Text",
")",
",",
"'#pop'",
")",
"]",
",",
"'oc_ivars'",
":",
"[",
"include",
"(",
"'whitespace'",
")",
",",
"include",
"(",
"'statements'",
")",
",",
"(",
"';'",
",",
"Punctuation",
")",
",",
"(",
"r'\\{'",
",",
"Punctuation",
",",
"'#push'",
")",
",",
"(",
"r'\\}'",
",",
"Punctuation",
",",
"'#pop'",
")",
",",
"]",
",",
"'root'",
":",
"[",
"# methods",
"(",
"r'^([-+])(\\s*)'",
"# method marker",
"r'(\\(.*?\\))?(\\s*)'",
"# return type",
"r'([a-zA-Z$_][\\w$]*:?)'",
",",
"# begin of method name",
"bygroups",
"(",
"Punctuation",
",",
"Text",
",",
"using",
"(",
"this",
")",
",",
"Text",
",",
"Name",
".",
"Function",
")",
",",
"'method'",
")",
",",
"inherit",
",",
"]",
",",
"'method'",
":",
"[",
"include",
"(",
"'whitespace'",
")",
",",
"# TODO unsure if ellipses are allowed elsewhere, see",
"# discussion in Issue 789",
"(",
"r','",
",",
"Punctuation",
")",
",",
"(",
"r'\\.\\.\\.'",
",",
"Punctuation",
")",
",",
"(",
"r'(\\(.*?\\))(\\s*)([a-zA-Z$_][\\w$]*)'",
",",
"bygroups",
"(",
"using",
"(",
"this",
")",
",",
"Text",
",",
"Name",
".",
"Variable",
")",
")",
",",
"(",
"r'[a-zA-Z$_][\\w$]*:'",
",",
"Name",
".",
"Function",
")",
",",
"(",
"';'",
",",
"Punctuation",
",",
"'#pop'",
")",
",",
"(",
"r'\\{'",
",",
"Punctuation",
",",
"'function'",
")",
",",
"default",
"(",
"'#pop'",
")",
",",
"]",
",",
"'literal_number'",
":",
"[",
"(",
"r'\\('",
",",
"Punctuation",
",",
"'literal_number_inner'",
")",
",",
"(",
"r'\\)'",
",",
"Literal",
",",
"'#pop'",
")",
",",
"include",
"(",
"'statement'",
")",
",",
"]",
",",
"'literal_number_inner'",
":",
"[",
"(",
"r'\\('",
",",
"Punctuation",
",",
"'#push'",
")",
",",
"(",
"r'\\)'",
",",
"Punctuation",
",",
"'#pop'",
")",
",",
"include",
"(",
"'statement'",
")",
",",
"]",
",",
"'literal_array'",
":",
"[",
"(",
"r'\\['",
",",
"Punctuation",
",",
"'literal_array_inner'",
")",
",",
"(",
"r'\\]'",
",",
"Literal",
",",
"'#pop'",
")",
",",
"include",
"(",
"'statement'",
")",
",",
"]",
",",
"'literal_array_inner'",
":",
"[",
"(",
"r'\\['",
",",
"Punctuation",
",",
"'#push'",
")",
",",
"(",
"r'\\]'",
",",
"Punctuation",
",",
"'#pop'",
")",
",",
"include",
"(",
"'statement'",
")",
",",
"]",
",",
"'literal_dictionary'",
":",
"[",
"(",
"r'\\}'",
",",
"Literal",
",",
"'#pop'",
")",
",",
"include",
"(",
"'statement'",
")",
",",
"]",
",",
"}",
"def",
"analyse_text",
"(",
"text",
")",
":",
"if",
"_oc_keywords",
".",
"search",
"(",
"text",
")",
":",
"return",
"1.0",
"elif",
"'@\"'",
"in",
"text",
":",
"# strings",
"return",
"0.8",
"elif",
"re",
".",
"search",
"(",
"'@[0-9]+'",
",",
"text",
")",
":",
"return",
"0.7",
"elif",
"_oc_message",
".",
"search",
"(",
"text",
")",
":",
"return",
"0.8",
"return",
"0",
"def",
"get_tokens_unprocessed",
"(",
"self",
",",
"text",
")",
":",
"from",
"pygments",
".",
"lexers",
".",
"_cocoa_builtins",
"import",
"COCOA_INTERFACES",
",",
"COCOA_PROTOCOLS",
",",
"COCOA_PRIMITIVES",
"for",
"index",
",",
"token",
",",
"value",
"in",
"baselexer",
".",
"get_tokens_unprocessed",
"(",
"self",
",",
"text",
")",
":",
"if",
"token",
"is",
"Name",
"or",
"token",
"is",
"Name",
".",
"Class",
":",
"if",
"value",
"in",
"COCOA_INTERFACES",
"or",
"value",
"in",
"COCOA_PROTOCOLS",
"or",
"value",
"in",
"COCOA_PRIMITIVES",
":",
"token",
"=",
"Name",
".",
"Builtin",
".",
"Pseudo",
"yield",
"index",
",",
"token",
",",
"value",
"return",
"GeneratedObjectiveCVariant"
] | [
23,
0
] | [
191,
37
] | python | en | ['en', 'error', 'th'] | False |
set_custom_hostname_resolver | (hostname_resolver) | Set a custom hostname resolver.
By default, Trio's :func:`getaddrinfo` and :func:`getnameinfo` functions
use the standard system resolver functions. This function allows you to
customize that behavior. The main intended use case is for testing, but it
might also be useful for using third-party resolvers like `c-ares
<https://c-ares.haxx.se/>`__ (though be warned that these rarely make
perfect drop-in replacements for the system resolver). See
:class:`trio.abc.HostnameResolver` for more details.
Setting a custom hostname resolver affects all future calls to
:func:`getaddrinfo` and :func:`getnameinfo` within the enclosing call to
:func:`trio.run`. All other hostname resolution in Trio is implemented in
terms of these functions.
Generally you should call this function just once, right at the beginning
of your program.
Args:
hostname_resolver (trio.abc.HostnameResolver or None): The new custom
hostname resolver, or None to restore the default behavior.
Returns:
The previous hostname resolver (which may be None).
| Set a custom hostname resolver. | def set_custom_hostname_resolver(hostname_resolver):
"""Set a custom hostname resolver.
By default, Trio's :func:`getaddrinfo` and :func:`getnameinfo` functions
use the standard system resolver functions. This function allows you to
customize that behavior. The main intended use case is for testing, but it
might also be useful for using third-party resolvers like `c-ares
<https://c-ares.haxx.se/>`__ (though be warned that these rarely make
perfect drop-in replacements for the system resolver). See
:class:`trio.abc.HostnameResolver` for more details.
Setting a custom hostname resolver affects all future calls to
:func:`getaddrinfo` and :func:`getnameinfo` within the enclosing call to
:func:`trio.run`. All other hostname resolution in Trio is implemented in
terms of these functions.
Generally you should call this function just once, right at the beginning
of your program.
Args:
hostname_resolver (trio.abc.HostnameResolver or None): The new custom
hostname resolver, or None to restore the default behavior.
Returns:
The previous hostname resolver (which may be None).
"""
old = _resolver.get(None)
_resolver.set(hostname_resolver)
return old | [
"def",
"set_custom_hostname_resolver",
"(",
"hostname_resolver",
")",
":",
"old",
"=",
"_resolver",
".",
"get",
"(",
"None",
")",
"_resolver",
".",
"set",
"(",
"hostname_resolver",
")",
"return",
"old"
] | [
65,
0
] | [
94,
14
] | python | en | ['es', 'pt', 'en'] | False |
set_custom_socket_factory | (socket_factory) | Set a custom socket object factory.
This function allows you to replace Trio's normal socket class with a
custom class. This is very useful for testing, and probably a bad idea in
any other circumstance. See :class:`trio.abc.HostnameResolver` for more
details.
Setting a custom socket factory affects all future calls to :func:`socket`
within the enclosing call to :func:`trio.run`.
Generally you should call this function just once, right at the beginning
of your program.
Args:
socket_factory (trio.abc.SocketFactory or None): The new custom
socket factory, or None to restore the default behavior.
Returns:
The previous socket factory (which may be None).
| Set a custom socket object factory. | def set_custom_socket_factory(socket_factory):
"""Set a custom socket object factory.
This function allows you to replace Trio's normal socket class with a
custom class. This is very useful for testing, and probably a bad idea in
any other circumstance. See :class:`trio.abc.HostnameResolver` for more
details.
Setting a custom socket factory affects all future calls to :func:`socket`
within the enclosing call to :func:`trio.run`.
Generally you should call this function just once, right at the beginning
of your program.
Args:
socket_factory (trio.abc.SocketFactory or None): The new custom
socket factory, or None to restore the default behavior.
Returns:
The previous socket factory (which may be None).
"""
old = _socket_factory.get(None)
_socket_factory.set(socket_factory)
return old | [
"def",
"set_custom_socket_factory",
"(",
"socket_factory",
")",
":",
"old",
"=",
"_socket_factory",
".",
"get",
"(",
"None",
")",
"_socket_factory",
".",
"set",
"(",
"socket_factory",
")",
"return",
"old"
] | [
97,
0
] | [
121,
14
] | python | en | ['en', 'hu', 'en'] | True |
getaddrinfo | (host, port, family=0, type=0, proto=0, flags=0) | Look up a numeric address given a name.
Arguments and return values are identical to :func:`socket.getaddrinfo`,
except that this version is async.
Also, :func:`trio.socket.getaddrinfo` correctly uses IDNA 2008 to process
non-ASCII domain names. (:func:`socket.getaddrinfo` uses IDNA 2003, which
can give the wrong result in some cases and cause you to connect to a
different host than the one you intended; see `bpo-17305
<https://bugs.python.org/issue17305>`__.)
This function's behavior can be customized using
:func:`set_custom_hostname_resolver`.
| Look up a numeric address given a name. | async def getaddrinfo(host, port, family=0, type=0, proto=0, flags=0):
"""Look up a numeric address given a name.
Arguments and return values are identical to :func:`socket.getaddrinfo`,
except that this version is async.
Also, :func:`trio.socket.getaddrinfo` correctly uses IDNA 2008 to process
non-ASCII domain names. (:func:`socket.getaddrinfo` uses IDNA 2003, which
can give the wrong result in some cases and cause you to connect to a
different host than the one you intended; see `bpo-17305
<https://bugs.python.org/issue17305>`__.)
This function's behavior can be customized using
:func:`set_custom_hostname_resolver`.
"""
# If host and port are numeric, then getaddrinfo doesn't block and we can
# skip the whole thread thing, which seems worthwhile. So we try first
# with the _NUMERIC_ONLY flags set, and then only spawn a thread if that
# fails with EAI_NONAME:
def numeric_only_failure(exc):
return (
isinstance(exc, _stdlib_socket.gaierror)
and exc.errno == _stdlib_socket.EAI_NONAME
)
async with _try_sync(numeric_only_failure):
return _stdlib_socket.getaddrinfo(
host, port, family, type, proto, flags | _NUMERIC_ONLY
)
# That failed; it's a real hostname. We better use a thread.
#
# Also, it might be a unicode hostname, in which case we want to do our
# own encoding using the idna module, rather than letting Python do
# it. (Python will use the old IDNA 2003 standard, and possibly get the
# wrong answer - see bpo-17305). However, the idna module is picky, and
# will refuse to process some valid hostname strings, like "::1". So if
# it's already ascii, we pass it through; otherwise, we encode it to.
if isinstance(host, str):
try:
host = host.encode("ascii")
except UnicodeEncodeError:
# UTS-46 defines various normalizations; in particular, by default
# idna.encode will error out if the hostname has Capital Letters
# in it; with uts46=True it will lowercase them instead.
host = _idna.encode(host, uts46=True)
hr = _resolver.get(None)
if hr is not None:
return await hr.getaddrinfo(host, port, family, type, proto, flags)
else:
return await trio.to_thread.run_sync(
_stdlib_socket.getaddrinfo,
host,
port,
family,
type,
proto,
flags,
cancellable=True,
) | [
"async",
"def",
"getaddrinfo",
"(",
"host",
",",
"port",
",",
"family",
"=",
"0",
",",
"type",
"=",
"0",
",",
"proto",
"=",
"0",
",",
"flags",
"=",
"0",
")",
":",
"# If host and port are numeric, then getaddrinfo doesn't block and we can",
"# skip the whole thread thing, which seems worthwhile. So we try first",
"# with the _NUMERIC_ONLY flags set, and then only spawn a thread if that",
"# fails with EAI_NONAME:",
"def",
"numeric_only_failure",
"(",
"exc",
")",
":",
"return",
"(",
"isinstance",
"(",
"exc",
",",
"_stdlib_socket",
".",
"gaierror",
")",
"and",
"exc",
".",
"errno",
"==",
"_stdlib_socket",
".",
"EAI_NONAME",
")",
"async",
"with",
"_try_sync",
"(",
"numeric_only_failure",
")",
":",
"return",
"_stdlib_socket",
".",
"getaddrinfo",
"(",
"host",
",",
"port",
",",
"family",
",",
"type",
",",
"proto",
",",
"flags",
"|",
"_NUMERIC_ONLY",
")",
"# That failed; it's a real hostname. We better use a thread.",
"#",
"# Also, it might be a unicode hostname, in which case we want to do our",
"# own encoding using the idna module, rather than letting Python do",
"# it. (Python will use the old IDNA 2003 standard, and possibly get the",
"# wrong answer - see bpo-17305). However, the idna module is picky, and",
"# will refuse to process some valid hostname strings, like \"::1\". So if",
"# it's already ascii, we pass it through; otherwise, we encode it to.",
"if",
"isinstance",
"(",
"host",
",",
"str",
")",
":",
"try",
":",
"host",
"=",
"host",
".",
"encode",
"(",
"\"ascii\"",
")",
"except",
"UnicodeEncodeError",
":",
"# UTS-46 defines various normalizations; in particular, by default",
"# idna.encode will error out if the hostname has Capital Letters",
"# in it; with uts46=True it will lowercase them instead.",
"host",
"=",
"_idna",
".",
"encode",
"(",
"host",
",",
"uts46",
"=",
"True",
")",
"hr",
"=",
"_resolver",
".",
"get",
"(",
"None",
")",
"if",
"hr",
"is",
"not",
"None",
":",
"return",
"await",
"hr",
".",
"getaddrinfo",
"(",
"host",
",",
"port",
",",
"family",
",",
"type",
",",
"proto",
",",
"flags",
")",
"else",
":",
"return",
"await",
"trio",
".",
"to_thread",
".",
"run_sync",
"(",
"_stdlib_socket",
".",
"getaddrinfo",
",",
"host",
",",
"port",
",",
"family",
",",
"type",
",",
"proto",
",",
"flags",
",",
"cancellable",
"=",
"True",
",",
")"
] | [
131,
0
] | [
191,
9
] | python | en | ['en', 'en', 'en'] | True |
getnameinfo | (sockaddr, flags) | Look up a name given a numeric address.
Arguments and return values are identical to :func:`socket.getnameinfo`,
except that this version is async.
This function's behavior can be customized using
:func:`set_custom_hostname_resolver`.
| Look up a name given a numeric address. | async def getnameinfo(sockaddr, flags):
"""Look up a name given a numeric address.
Arguments and return values are identical to :func:`socket.getnameinfo`,
except that this version is async.
This function's behavior can be customized using
:func:`set_custom_hostname_resolver`.
"""
hr = _resolver.get(None)
if hr is not None:
return await hr.getnameinfo(sockaddr, flags)
else:
return await trio.to_thread.run_sync(
_stdlib_socket.getnameinfo, sockaddr, flags, cancellable=True
) | [
"async",
"def",
"getnameinfo",
"(",
"sockaddr",
",",
"flags",
")",
":",
"hr",
"=",
"_resolver",
".",
"get",
"(",
"None",
")",
"if",
"hr",
"is",
"not",
"None",
":",
"return",
"await",
"hr",
".",
"getnameinfo",
"(",
"sockaddr",
",",
"flags",
")",
"else",
":",
"return",
"await",
"trio",
".",
"to_thread",
".",
"run_sync",
"(",
"_stdlib_socket",
".",
"getnameinfo",
",",
"sockaddr",
",",
"flags",
",",
"cancellable",
"=",
"True",
")"
] | [
194,
0
] | [
210,
9
] | python | en | ['en', 'en', 'en'] | True |
getprotobyname | (name) | Look up a protocol number by name. (Rarely used.)
Like :func:`socket.getprotobyname`, but async.
| Look up a protocol number by name. (Rarely used.) | async def getprotobyname(name):
"""Look up a protocol number by name. (Rarely used.)
Like :func:`socket.getprotobyname`, but async.
"""
return await trio.to_thread.run_sync(
_stdlib_socket.getprotobyname, name, cancellable=True
) | [
"async",
"def",
"getprotobyname",
"(",
"name",
")",
":",
"return",
"await",
"trio",
".",
"to_thread",
".",
"run_sync",
"(",
"_stdlib_socket",
".",
"getprotobyname",
",",
"name",
",",
"cancellable",
"=",
"True",
")"
] | [
213,
0
] | [
221,
5
] | python | en | ['en', 'en', 'en'] | True |
from_stdlib_socket | (sock) | Convert a standard library :func:`socket.socket` object into a Trio
socket object.
| Convert a standard library :func:`socket.socket` object into a Trio
socket object. | def from_stdlib_socket(sock):
"""Convert a standard library :func:`socket.socket` object into a Trio
socket object.
"""
return _SocketType(sock) | [
"def",
"from_stdlib_socket",
"(",
"sock",
")",
":",
"return",
"_SocketType",
"(",
"sock",
")"
] | [
232,
0
] | [
237,
28
] | python | en | ['en', 'en', 'en'] | True |
fromfd | (fd, family, type, proto=0) | Like :func:`socket.fromfd`, but returns a Trio socket object. | Like :func:`socket.fromfd`, but returns a Trio socket object. | def fromfd(fd, family, type, proto=0):
"""Like :func:`socket.fromfd`, but returns a Trio socket object."""
family, type, proto = _sniff_sockopts_for_fileno(family, type, proto, fd)
return from_stdlib_socket(_stdlib_socket.fromfd(fd, family, type, proto)) | [
"def",
"fromfd",
"(",
"fd",
",",
"family",
",",
"type",
",",
"proto",
"=",
"0",
")",
":",
"family",
",",
"type",
",",
"proto",
"=",
"_sniff_sockopts_for_fileno",
"(",
"family",
",",
"type",
",",
"proto",
",",
"fd",
")",
"return",
"from_stdlib_socket",
"(",
"_stdlib_socket",
".",
"fromfd",
"(",
"fd",
",",
"family",
",",
"type",
",",
"proto",
")",
")"
] | [
241,
0
] | [
244,
77
] | python | en | ['en', 'fy', 'en'] | True |
socketpair | (*args, **kwargs) | Like :func:`socket.socketpair`, but returns a pair of Trio socket
objects.
| Like :func:`socket.socketpair`, but returns a pair of Trio socket
objects. | def socketpair(*args, **kwargs):
"""Like :func:`socket.socketpair`, but returns a pair of Trio socket
objects.
"""
left, right = _stdlib_socket.socketpair(*args, **kwargs)
return (from_stdlib_socket(left), from_stdlib_socket(right)) | [
"def",
"socketpair",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"left",
",",
"right",
"=",
"_stdlib_socket",
".",
"socketpair",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"(",
"from_stdlib_socket",
"(",
"left",
")",
",",
"from_stdlib_socket",
"(",
"right",
")",
")"
] | [
257,
0
] | [
263,
64
] | python | en | ['en', 'af', 'en'] | True |
socket | (
family=_stdlib_socket.AF_INET,
type=_stdlib_socket.SOCK_STREAM,
proto=0,
fileno=None,
) | Create a new Trio socket, like :func:`socket.socket`.
This function's behavior can be customized using
:func:`set_custom_socket_factory`.
| Create a new Trio socket, like :func:`socket.socket`. | def socket(
family=_stdlib_socket.AF_INET,
type=_stdlib_socket.SOCK_STREAM,
proto=0,
fileno=None,
):
"""Create a new Trio socket, like :func:`socket.socket`.
This function's behavior can be customized using
:func:`set_custom_socket_factory`.
"""
if fileno is None:
sf = _socket_factory.get(None)
if sf is not None:
return sf.socket(family, type, proto)
else:
family, type, proto = _sniff_sockopts_for_fileno(family, type, proto, fileno)
stdlib_socket = _stdlib_socket.socket(family, type, proto, fileno)
return from_stdlib_socket(stdlib_socket) | [
"def",
"socket",
"(",
"family",
"=",
"_stdlib_socket",
".",
"AF_INET",
",",
"type",
"=",
"_stdlib_socket",
".",
"SOCK_STREAM",
",",
"proto",
"=",
"0",
",",
"fileno",
"=",
"None",
",",
")",
":",
"if",
"fileno",
"is",
"None",
":",
"sf",
"=",
"_socket_factory",
".",
"get",
"(",
"None",
")",
"if",
"sf",
"is",
"not",
"None",
":",
"return",
"sf",
".",
"socket",
"(",
"family",
",",
"type",
",",
"proto",
")",
"else",
":",
"family",
",",
"type",
",",
"proto",
"=",
"_sniff_sockopts_for_fileno",
"(",
"family",
",",
"type",
",",
"proto",
",",
"fileno",
")",
"stdlib_socket",
"=",
"_stdlib_socket",
".",
"socket",
"(",
"family",
",",
"type",
",",
"proto",
",",
"fileno",
")",
"return",
"from_stdlib_socket",
"(",
"stdlib_socket",
")"
] | [
267,
0
] | [
286,
44
] | python | en | ['it', 'et', 'en'] | False |
_sniff_sockopts_for_fileno | (family, type, proto, fileno) | Correct SOCKOPTS for given fileno, falling back to provided values. | Correct SOCKOPTS for given fileno, falling back to provided values. | def _sniff_sockopts_for_fileno(family, type, proto, fileno):
"""Correct SOCKOPTS for given fileno, falling back to provided values."""
# Wrap the raw fileno into a Python socket object
# This object might have the wrong metadata, but it lets us easily call getsockopt
# and then we'll throw it away and construct a new one with the correct metadata.
if sys.platform != "linux":
return family, type, proto
from socket import SO_DOMAIN, SO_PROTOCOL, SOL_SOCKET, SO_TYPE
sockobj = _stdlib_socket.socket(family, type, proto, fileno=fileno)
try:
family = sockobj.getsockopt(SOL_SOCKET, SO_DOMAIN)
proto = sockobj.getsockopt(SOL_SOCKET, SO_PROTOCOL)
type = sockobj.getsockopt(SOL_SOCKET, SO_TYPE)
finally:
# Unwrap it again, so that sockobj.__del__ doesn't try to close our socket
sockobj.detach()
return family, type, proto | [
"def",
"_sniff_sockopts_for_fileno",
"(",
"family",
",",
"type",
",",
"proto",
",",
"fileno",
")",
":",
"# Wrap the raw fileno into a Python socket object",
"# This object might have the wrong metadata, but it lets us easily call getsockopt",
"# and then we'll throw it away and construct a new one with the correct metadata.",
"if",
"sys",
".",
"platform",
"!=",
"\"linux\"",
":",
"return",
"family",
",",
"type",
",",
"proto",
"from",
"socket",
"import",
"SO_DOMAIN",
",",
"SO_PROTOCOL",
",",
"SOL_SOCKET",
",",
"SO_TYPE",
"sockobj",
"=",
"_stdlib_socket",
".",
"socket",
"(",
"family",
",",
"type",
",",
"proto",
",",
"fileno",
"=",
"fileno",
")",
"try",
":",
"family",
"=",
"sockobj",
".",
"getsockopt",
"(",
"SOL_SOCKET",
",",
"SO_DOMAIN",
")",
"proto",
"=",
"sockobj",
".",
"getsockopt",
"(",
"SOL_SOCKET",
",",
"SO_PROTOCOL",
")",
"type",
"=",
"sockobj",
".",
"getsockopt",
"(",
"SOL_SOCKET",
",",
"SO_TYPE",
")",
"finally",
":",
"# Unwrap it again, so that sockobj.__del__ doesn't try to close our socket",
"sockobj",
".",
"detach",
"(",
")",
"return",
"family",
",",
"type",
",",
"proto"
] | [
289,
0
] | [
306,
30
] | python | en | ['en', 'en', 'en'] | True |
_SocketType.dup | (self) | Same as :meth:`socket.socket.dup`. | Same as :meth:`socket.socket.dup`. | def dup(self):
"""Same as :meth:`socket.socket.dup`."""
return _SocketType(self._sock.dup()) | [
"def",
"dup",
"(",
"self",
")",
":",
"return",
"_SocketType",
"(",
"self",
".",
"_sock",
".",
"dup",
"(",
")",
")"
] | [
436,
4
] | [
438,
44
] | python | en | ['en', 'af', 'en'] | True |
_SocketType.accept | (self) | Like :meth:`socket.socket.accept`, but async. | Like :meth:`socket.socket.accept`, but async. | async def accept(self):
"""Like :meth:`socket.socket.accept`, but async."""
sock, addr = await self._accept()
return from_stdlib_socket(sock), addr | [
"async",
"def",
"accept",
"(",
"self",
")",
":",
"sock",
",",
"addr",
"=",
"await",
"self",
".",
"_accept",
"(",
")",
"return",
"from_stdlib_socket",
"(",
"sock",
")",
",",
"addr"
] | [
604,
4
] | [
607,
45
] | python | en | ['en', 'sv', 'en'] | True |
_SocketType.sendto | (self, *args) | Similar to :meth:`socket.socket.sendto`, but async. | Similar to :meth:`socket.socket.sendto`, but async. | async def sendto(self, *args):
"""Similar to :meth:`socket.socket.sendto`, but async."""
# args is: data[, flags], address)
# and kwargs are not accepted
args = list(args)
args[-1] = await self._resolve_remote_address_nocp(args[-1])
return await self._nonblocking_helper(
_stdlib_socket.socket.sendto, args, {}, _core.wait_writable
) | [
"async",
"def",
"sendto",
"(",
"self",
",",
"*",
"args",
")",
":",
"# args is: data[, flags], address)",
"# and kwargs are not accepted",
"args",
"=",
"list",
"(",
"args",
")",
"args",
"[",
"-",
"1",
"]",
"=",
"await",
"self",
".",
"_resolve_remote_address_nocp",
"(",
"args",
"[",
"-",
"1",
"]",
")",
"return",
"await",
"self",
".",
"_nonblocking_helper",
"(",
"_stdlib_socket",
".",
"socket",
".",
"sendto",
",",
"args",
",",
"{",
"}",
",",
"_core",
".",
"wait_writable",
")"
] | [
738,
4
] | [
746,
9
] | python | en | ['en', 'sv', 'en'] | True |
argument_t.clone | (self, **keywd) | constructs new argument_t instance
return argument_t(
name=keywd.get('name', self.name),
decl_type=keywd.get('decl_type', self.decl_type),
default_value=keywd.get('default_value', self.default_value),
attributes=keywd.get('attributes', self.attributes ))
| constructs new argument_t instance | def clone(self, **keywd):
"""constructs new argument_t instance
return argument_t(
name=keywd.get('name', self.name),
decl_type=keywd.get('decl_type', self.decl_type),
default_value=keywd.get('default_value', self.default_value),
attributes=keywd.get('attributes', self.attributes ))
"""
return argument_t(
name=keywd.get('name', self.name),
decl_type=keywd.get('decl_type', self.decl_type),
default_value=keywd.get('default_value', self.default_value),
attributes=keywd.get('attributes', self.attributes)) | [
"def",
"clone",
"(",
"self",
",",
"*",
"*",
"keywd",
")",
":",
"return",
"argument_t",
"(",
"name",
"=",
"keywd",
".",
"get",
"(",
"'name'",
",",
"self",
".",
"name",
")",
",",
"decl_type",
"=",
"keywd",
".",
"get",
"(",
"'decl_type'",
",",
"self",
".",
"decl_type",
")",
",",
"default_value",
"=",
"keywd",
".",
"get",
"(",
"'default_value'",
",",
"self",
".",
"default_value",
")",
",",
"attributes",
"=",
"keywd",
".",
"get",
"(",
"'attributes'",
",",
"self",
".",
"attributes",
")",
")"
] | [
47,
4
] | [
61,
64
] | python | en | ['en', 'en', 'en'] | True |
argument_t.name | (self) | Argument name.
@type: str | Argument name.
| def name(self):
"""Argument name.
@type: str"""
return self._name | [
"def",
"name",
"(",
"self",
")",
":",
"return",
"self",
".",
"_name"
] | [
97,
4
] | [
100,
25
] | python | en | ['en', 'da', 'en'] | False |
argument_t.ellipsis | (self) | bool, if True argument represents ellipsis ( "..." )
in function definition | bool, if True argument represents ellipsis ( "..." )
in function definition | def ellipsis(self):
"""bool, if True argument represents ellipsis ( "..." )
in function definition"""
return isinstance(self.decl_type, cpptypes.ellipsis_t) | [
"def",
"ellipsis",
"(",
"self",
")",
":",
"return",
"isinstance",
"(",
"self",
".",
"decl_type",
",",
"cpptypes",
".",
"ellipsis_t",
")"
] | [
107,
4
] | [
110,
62
] | python | ca | ['ca', 'ca', 'en'] | True |
argument_t.default_value | (self) | Argument's default value or None.
@type: str | Argument's default value or None.
| def default_value(self):
"""Argument's default value or None.
@type: str"""
return self._default_value | [
"def",
"default_value",
"(",
"self",
")",
":",
"return",
"self",
".",
"_default_value"
] | [
113,
4
] | [
116,
34
] | python | en | ['en', 'fr', 'en'] | True |
argument_t.attributes | (self) | GCCXML attributes, set using __attribute__((gccxml("...")))
@type: str | GCCXML attributes, set using __attribute__((gccxml("...")))
| def attributes(self):
"""GCCXML attributes, set using __attribute__((gccxml("...")))
@type: str"""
return self._attributes | [
"def",
"attributes",
"(",
"self",
")",
":",
"return",
"self",
".",
"_attributes"
] | [
131,
4
] | [
134,
31
] | python | en | ['en', 'la', 'en'] | True |
calldef_t._get__cmp__call_items | (self) |
Implementation detail.
|
Implementation detail. | def _get__cmp__call_items(self):
"""
Implementation detail.
"""
raise NotImplementedError() | [
"def",
"_get__cmp__call_items",
"(",
"self",
")",
":",
"raise",
"NotImplementedError",
"(",
")"
] | [
169,
4
] | [
175,
35
] | python | en | ['en', 'error', 'th'] | False |
calldef_t._get__cmp__items | (self) |
Implementation detail.
|
Implementation detail. | def _get__cmp__items(self):
"""
Implementation detail.
"""
items = [
self.arguments,
self.return_type,
self.has_extern,
self.does_throw,
self.exceptions.sort(),
self.demangled_name,
self.has_inline]
items.extend(self._get__cmp__call_items())
return items | [
"def",
"_get__cmp__items",
"(",
"self",
")",
":",
"items",
"=",
"[",
"self",
".",
"arguments",
",",
"self",
".",
"return_type",
",",
"self",
".",
"has_extern",
",",
"self",
".",
"does_throw",
",",
"self",
".",
"exceptions",
".",
"sort",
"(",
")",
",",
"self",
".",
"demangled_name",
",",
"self",
".",
"has_inline",
"]",
"items",
".",
"extend",
"(",
"self",
".",
"_get__cmp__call_items",
"(",
")",
")",
"return",
"items"
] | [
177,
4
] | [
193,
20
] | python | en | ['en', 'error', 'th'] | False |
calldef_t.arguments | (self) | The argument list.
@type: list of :class:`argument_t` | The argument list.
| def arguments(self):
"""The argument list.
@type: list of :class:`argument_t`"""
return self._arguments | [
"def",
"arguments",
"(",
"self",
")",
":",
"return",
"self",
".",
"_arguments"
] | [
213,
4
] | [
216,
30
] | python | en | ['en', 'fr', 'en'] | True |
calldef_t.argument_types | (self) | list of all argument types | list of all argument types | def argument_types(self):
"""list of all argument types"""
return [arg.decl_type for arg in self.arguments] | [
"def",
"argument_types",
"(",
"self",
")",
":",
"return",
"[",
"arg",
".",
"decl_type",
"for",
"arg",
"in",
"self",
".",
"arguments",
"]"
] | [
227,
4
] | [
229,
56
] | python | en | ['en', 'en', 'en'] | True |
calldef_t.required_args | (self) | list of all required arguments | list of all required arguments | def required_args(self):
"""list of all required arguments"""
r_args = []
for arg in self.arguments:
if not arg.default_value:
r_args.append(arg)
else:
break
return r_args | [
"def",
"required_args",
"(",
"self",
")",
":",
"r_args",
"=",
"[",
"]",
"for",
"arg",
"in",
"self",
".",
"arguments",
":",
"if",
"not",
"arg",
".",
"default_value",
":",
"r_args",
".",
"append",
"(",
"arg",
")",
"else",
":",
"break",
"return",
"r_args"
] | [
232,
4
] | [
240,
21
] | python | en | ['en', 'en', 'en'] | True |
calldef_t.optional_args | (self) | list of all optional arguments, the arguments that have default
value | list of all optional arguments, the arguments that have default
value | def optional_args(self):
"""list of all optional arguments, the arguments that have default
value"""
return self.arguments[len(self.required_args):] | [
"def",
"optional_args",
"(",
"self",
")",
":",
"return",
"self",
".",
"arguments",
"[",
"len",
"(",
"self",
".",
"required_args",
")",
":",
"]"
] | [
243,
4
] | [
246,
55
] | python | en | ['en', 'fr', 'en'] | True |
calldef_t.does_throw | (self) | If False, than function does not throw any exception.
In this case, function was declared with empty throw
statement. | If False, than function does not throw any exception.
In this case, function was declared with empty throw
statement. | def does_throw(self):
"""If False, than function does not throw any exception.
In this case, function was declared with empty throw
statement."""
return self._does_throw | [
"def",
"does_throw",
"(",
"self",
")",
":",
"return",
"self",
".",
"_does_throw"
] | [
249,
4
] | [
253,
31
] | python | en | ['en', 'en', 'en'] | True |
calldef_t.exceptions | (self) | The list of exceptions.
@type: list of :class:`declaration_t` | The list of exceptions.
| def exceptions(self):
"""The list of exceptions.
@type: list of :class:`declaration_t`"""
return self._exceptions | [
"def",
"exceptions",
"(",
"self",
")",
":",
"return",
"self",
".",
"_exceptions"
] | [
260,
4
] | [
263,
31
] | python | en | ['en', 'en', 'en'] | True |
calldef_t.return_type | (self) | The type of the return value of the "callable" or None
(constructors).
@type: :class:`type_t` | The type of the return value of the "callable" or None
(constructors).
| def return_type(self):
"""The type of the return value of the "callable" or None
(constructors).
@type: :class:`type_t`"""
return self._return_type | [
"def",
"return_type",
"(",
"self",
")",
":",
"return",
"self",
".",
"_return_type"
] | [
270,
4
] | [
274,
32
] | python | en | ['en', 'en', 'en'] | True |
calldef_t.overloads | (self) | A list of overloaded "callables" (i.e. other callables with the
same name within the same scope.
@type: list of :class:`calldef_t`
| A list of overloaded "callables" (i.e. other callables with the
same name within the same scope. | def overloads(self):
"""A list of overloaded "callables" (i.e. other callables with the
same name within the same scope.
@type: list of :class:`calldef_t`
"""
if not self.parent:
return []
# finding all functions with the same name
return self.parent.calldefs(
name=self.name,
function=lambda decl: decl is not self,
allow_empty=True,
recursive=False) | [
"def",
"overloads",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"parent",
":",
"return",
"[",
"]",
"# finding all functions with the same name",
"return",
"self",
".",
"parent",
".",
"calldefs",
"(",
"name",
"=",
"self",
".",
"name",
",",
"function",
"=",
"lambda",
"decl",
":",
"decl",
"is",
"not",
"self",
",",
"allow_empty",
"=",
"True",
",",
"recursive",
"=",
"False",
")"
] | [
281,
4
] | [
294,
28
] | python | en | ['en', 'en', 'en'] | True |
calldef_t.has_extern | (self) | Was this callable declared as "extern"?
@type: bool | Was this callable declared as "extern"?
| def has_extern(self):
"""Was this callable declared as "extern"?
@type: bool"""
return self._has_extern | [
"def",
"has_extern",
"(",
"self",
")",
":",
"return",
"self",
".",
"_has_extern"
] | [
297,
4
] | [
300,
31
] | python | en | ['en', 'en', 'en'] | True |
calldef_t.has_inline | (self) | Was this callable declared with "inline" specifier
@type: bool | Was this callable declared with "inline" specifier
| def has_inline(self):
"""Was this callable declared with "inline" specifier
@type: bool"""
return self._has_inline | [
"def",
"has_inline",
"(",
"self",
")",
":",
"return",
"self",
".",
"_has_inline"
] | [
307,
4
] | [
310,
31
] | python | en | ['en', 'en', 'en'] | True |
calldef_t.__remove_parent_fname | (self, demangled) | implementation details | implementation details | def __remove_parent_fname(self, demangled):
"""implementation details"""
demangled = demangled.strip()
parent_fname = declaration_utils.full_name(self.parent)
if parent_fname.startswith('::') and not demangled.startswith('::'):
parent_fname = parent_fname[2:]
demangled = demangled[len(parent_fname):]
return demangled | [
"def",
"__remove_parent_fname",
"(",
"self",
",",
"demangled",
")",
":",
"demangled",
"=",
"demangled",
".",
"strip",
"(",
")",
"parent_fname",
"=",
"declaration_utils",
".",
"full_name",
"(",
"self",
".",
"parent",
")",
"if",
"parent_fname",
".",
"startswith",
"(",
"'::'",
")",
"and",
"not",
"demangled",
".",
"startswith",
"(",
"'::'",
")",
":",
"parent_fname",
"=",
"parent_fname",
"[",
"2",
":",
"]",
"demangled",
"=",
"demangled",
"[",
"len",
"(",
"parent_fname",
")",
":",
"]",
"return",
"demangled"
] | [
316,
4
] | [
323,
24
] | python | da | ['eo', 'da', 'en'] | False |
calldef_t.demangled_name | (self) | returns function demangled name. It can help you to deal with
function template instantiations | returns function demangled name. It can help you to deal with
function template instantiations | def demangled_name(self):
"""returns function demangled name. It can help you to deal with
function template instantiations"""
if not self.demangled:
self._demangled_name = ''
if self._demangled_name:
return self._demangled_name
if self._demangled_name == '':
return self.name
demangled = self.demangled
if self.return_type:
return_type = type_traits.remove_alias(
self.return_type).decl_string
if return_type.startswith('::') and not \
self.demangled.startswith('::'):
return_type = return_type[2:]
demangled = self.demangled
if demangled.startswith(return_type):
demangled = demangled[len(return_type):]
demangled = demangled.strip()
# removing scope
demangled_name = call_invocation.name(
self.__remove_parent_fname(demangled))
if demangled_name.startswith('::'):
demangled_name = demangled_name[2:]
# to be on the safe side
if demangled_name.startswith(self.name):
self._demangled_name = demangled_name
return self._demangled_name
# well, I am going to try an other strategy
fname = declaration_utils.full_name(self)
found = self.demangled.find(fname)
if found == -1:
if fname.startswith('::'):
fname = fname[2:]
found = self.demangled.find(fname)
if found == -1:
self._demangled_name = ''
return self.name
demangled_name = call_invocation.name(self.demangled[found:])
demangled_name = self.__remove_parent_fname(demangled_name)
if demangled_name.startswith('::'):
demangled_name = demangled_name[2:]
# to be on the safe side
if demangled_name.startswith(self.name):
self._demangled_name = demangled_name
return self._demangled_name
self._demangled_name = ''
return self.name | [
"def",
"demangled_name",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"demangled",
":",
"self",
".",
"_demangled_name",
"=",
"''",
"if",
"self",
".",
"_demangled_name",
":",
"return",
"self",
".",
"_demangled_name",
"if",
"self",
".",
"_demangled_name",
"==",
"''",
":",
"return",
"self",
".",
"name",
"demangled",
"=",
"self",
".",
"demangled",
"if",
"self",
".",
"return_type",
":",
"return_type",
"=",
"type_traits",
".",
"remove_alias",
"(",
"self",
".",
"return_type",
")",
".",
"decl_string",
"if",
"return_type",
".",
"startswith",
"(",
"'::'",
")",
"and",
"not",
"self",
".",
"demangled",
".",
"startswith",
"(",
"'::'",
")",
":",
"return_type",
"=",
"return_type",
"[",
"2",
":",
"]",
"demangled",
"=",
"self",
".",
"demangled",
"if",
"demangled",
".",
"startswith",
"(",
"return_type",
")",
":",
"demangled",
"=",
"demangled",
"[",
"len",
"(",
"return_type",
")",
":",
"]",
"demangled",
"=",
"demangled",
".",
"strip",
"(",
")",
"# removing scope",
"demangled_name",
"=",
"call_invocation",
".",
"name",
"(",
"self",
".",
"__remove_parent_fname",
"(",
"demangled",
")",
")",
"if",
"demangled_name",
".",
"startswith",
"(",
"'::'",
")",
":",
"demangled_name",
"=",
"demangled_name",
"[",
"2",
":",
"]",
"# to be on the safe side",
"if",
"demangled_name",
".",
"startswith",
"(",
"self",
".",
"name",
")",
":",
"self",
".",
"_demangled_name",
"=",
"demangled_name",
"return",
"self",
".",
"_demangled_name",
"# well, I am going to try an other strategy",
"fname",
"=",
"declaration_utils",
".",
"full_name",
"(",
"self",
")",
"found",
"=",
"self",
".",
"demangled",
".",
"find",
"(",
"fname",
")",
"if",
"found",
"==",
"-",
"1",
":",
"if",
"fname",
".",
"startswith",
"(",
"'::'",
")",
":",
"fname",
"=",
"fname",
"[",
"2",
":",
"]",
"found",
"=",
"self",
".",
"demangled",
".",
"find",
"(",
"fname",
")",
"if",
"found",
"==",
"-",
"1",
":",
"self",
".",
"_demangled_name",
"=",
"''",
"return",
"self",
".",
"name",
"demangled_name",
"=",
"call_invocation",
".",
"name",
"(",
"self",
".",
"demangled",
"[",
"found",
":",
"]",
")",
"demangled_name",
"=",
"self",
".",
"__remove_parent_fname",
"(",
"demangled_name",
")",
"if",
"demangled_name",
".",
"startswith",
"(",
"'::'",
")",
":",
"demangled_name",
"=",
"demangled_name",
"[",
"2",
":",
"]",
"# to be on the safe side",
"if",
"demangled_name",
".",
"startswith",
"(",
"self",
".",
"name",
")",
":",
"self",
".",
"_demangled_name",
"=",
"demangled_name",
"return",
"self",
".",
"_demangled_name",
"self",
".",
"_demangled_name",
"=",
"''",
"return",
"self",
".",
"name"
] | [
326,
4
] | [
381,
24
] | python | en | ['en', 'en', 'en'] | True |
calldef_t.guess_calling_convention | (self) | This function should be overriden in the derived classes and return
more-or-less successfull guess about calling convention | This function should be overriden in the derived classes and return
more-or-less successfull guess about calling convention | def guess_calling_convention(self):
"""This function should be overriden in the derived classes and return
more-or-less successfull guess about calling convention"""
return calldef_types.CALLING_CONVENTION_TYPES.UNKNOWN | [
"def",
"guess_calling_convention",
"(",
"self",
")",
":",
"return",
"calldef_types",
".",
"CALLING_CONVENTION_TYPES",
".",
"UNKNOWN"
] | [
397,
4
] | [
400,
61
] | python | en | ['en', 'en', 'en'] | True |
calldef_t.calling_convention | (self) | function calling convention. See
:class:CALLING_CONVENTION_TYPES class for possible values | function calling convention. See
:class:CALLING_CONVENTION_TYPES class for possible values | def calling_convention(self):
"""function calling convention. See
:class:CALLING_CONVENTION_TYPES class for possible values"""
if self._calling_convention is None:
self._calling_convention = \
calldef_types.CALLING_CONVENTION_TYPES.extract(self.attributes)
if not self._calling_convention:
self._calling_convention = self.guess_calling_convention()
return self._calling_convention | [
"def",
"calling_convention",
"(",
"self",
")",
":",
"if",
"self",
".",
"_calling_convention",
"is",
"None",
":",
"self",
".",
"_calling_convention",
"=",
"calldef_types",
".",
"CALLING_CONVENTION_TYPES",
".",
"extract",
"(",
"self",
".",
"attributes",
")",
"if",
"not",
"self",
".",
"_calling_convention",
":",
"self",
".",
"_calling_convention",
"=",
"self",
".",
"guess_calling_convention",
"(",
")",
"return",
"self",
".",
"_calling_convention"
] | [
403,
4
] | [
411,
39
] | python | en | ['en', 'en', 'en'] | True |
calldef_t.mangled | (self) |
Unique declaration name generated by the compiler.
:return: the mangled name
:rtype: str
|
Unique declaration name generated by the compiler. | def mangled(self):
"""
Unique declaration name generated by the compiler.
:return: the mangled name
:rtype: str
"""
return self.get_mangled_name() | [
"def",
"mangled",
"(",
"self",
")",
":",
"return",
"self",
".",
"get_mangled_name",
"(",
")"
] | [
418,
4
] | [
427,
38
] | python | en | ['en', 'error', 'th'] | False |
open_nursery | () | Returns an async context manager which must be used to create a
new `Nursery`.
It does not block on entry; on exit it blocks until all child tasks
have exited.
| Returns an async context manager which must be used to create a
new `Nursery`. | def open_nursery():
"""Returns an async context manager which must be used to create a
new `Nursery`.
It does not block on entry; on exit it blocks until all child tasks
have exited.
"""
return NurseryManager() | [
"def",
"open_nursery",
"(",
")",
":",
"return",
"NurseryManager",
"(",
")"
] | [
829,
0
] | [
837,
27
] | python | en | ['en', 'en', 'en'] | True |
setup_runner | (clock, instruments, restrict_keyboard_interrupt_to_checkpoints) | Create a Runner object and install it as the GLOBAL_RUN_CONTEXT. | Create a Runner object and install it as the GLOBAL_RUN_CONTEXT. | def setup_runner(clock, instruments, restrict_keyboard_interrupt_to_checkpoints):
"""Create a Runner object and install it as the GLOBAL_RUN_CONTEXT."""
# It wouldn't be *hard* to support nested calls to run(), but I can't
# think of a single good reason for it, so let's be conservative for
# now:
if hasattr(GLOBAL_RUN_CONTEXT, "runner"):
raise RuntimeError("Attempted to call run() from inside a run()")
if clock is None:
clock = SystemClock()
instruments = Instruments(instruments)
io_manager = TheIOManager()
system_context = copy_context()
system_context.run(current_async_library_cvar.set, "trio")
ki_manager = KIManager()
runner = Runner(
clock=clock,
instruments=instruments,
io_manager=io_manager,
system_context=system_context,
ki_manager=ki_manager,
)
runner.asyncgens.install_hooks(runner)
# This is where KI protection gets enabled, so we want to do it early - in
# particular before we start modifying global state like GLOBAL_RUN_CONTEXT
ki_manager.install(runner.deliver_ki, restrict_keyboard_interrupt_to_checkpoints)
GLOBAL_RUN_CONTEXT.runner = runner
return runner | [
"def",
"setup_runner",
"(",
"clock",
",",
"instruments",
",",
"restrict_keyboard_interrupt_to_checkpoints",
")",
":",
"# It wouldn't be *hard* to support nested calls to run(), but I can't",
"# think of a single good reason for it, so let's be conservative for",
"# now:",
"if",
"hasattr",
"(",
"GLOBAL_RUN_CONTEXT",
",",
"\"runner\"",
")",
":",
"raise",
"RuntimeError",
"(",
"\"Attempted to call run() from inside a run()\"",
")",
"if",
"clock",
"is",
"None",
":",
"clock",
"=",
"SystemClock",
"(",
")",
"instruments",
"=",
"Instruments",
"(",
"instruments",
")",
"io_manager",
"=",
"TheIOManager",
"(",
")",
"system_context",
"=",
"copy_context",
"(",
")",
"system_context",
".",
"run",
"(",
"current_async_library_cvar",
".",
"set",
",",
"\"trio\"",
")",
"ki_manager",
"=",
"KIManager",
"(",
")",
"runner",
"=",
"Runner",
"(",
"clock",
"=",
"clock",
",",
"instruments",
"=",
"instruments",
",",
"io_manager",
"=",
"io_manager",
",",
"system_context",
"=",
"system_context",
",",
"ki_manager",
"=",
"ki_manager",
",",
")",
"runner",
".",
"asyncgens",
".",
"install_hooks",
"(",
"runner",
")",
"# This is where KI protection gets enabled, so we want to do it early - in",
"# particular before we start modifying global state like GLOBAL_RUN_CONTEXT",
"ki_manager",
".",
"install",
"(",
"runner",
".",
"deliver_ki",
",",
"restrict_keyboard_interrupt_to_checkpoints",
")",
"GLOBAL_RUN_CONTEXT",
".",
"runner",
"=",
"runner",
"return",
"runner"
] | [
1809,
0
] | [
1839,
17
] | python | en | ['en', 'en', 'en'] | True |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.