Search is not available for this dataset
identifier
stringlengths 1
155
| parameters
stringlengths 2
6.09k
| docstring
stringlengths 11
63.4k
| docstring_summary
stringlengths 0
63.4k
| function
stringlengths 29
99.8k
| function_tokens
sequence | start_point
sequence | end_point
sequence | language
stringclasses 1
value | docstring_language
stringlengths 2
7
| docstring_language_predictions
stringlengths 18
23
| is_langid_reliable
stringclasses 2
values |
---|---|---|---|---|---|---|---|---|---|---|---|
mdecl_wrapper_t.__init__ | (self, decls) | :param decls: list of declarations to operate on.
:type decls: list of :class:`declaration wrappers <decl_wrapper_t>`
| :param decls: list of declarations to operate on.
:type decls: list of :class:`declaration wrappers <decl_wrapper_t>`
| def __init__(self, decls):
""":param decls: list of declarations to operate on.
:type decls: list of :class:`declaration wrappers <decl_wrapper_t>`
"""
object.__init__(self)
self.__dict__['declarations'] = decls | [
"def",
"__init__",
"(",
"self",
",",
"decls",
")",
":",
"object",
".",
"__init__",
"(",
"self",
")",
"self",
".",
"__dict__",
"[",
"'declarations'",
"]",
"=",
"decls"
] | [
64,
4
] | [
69,
45
] | python | en | ['en', 'en', 'en'] | True |
mdecl_wrapper_t.__len__ | (self) | returns the number of declarations | returns the number of declarations | def __len__(self):
"""returns the number of declarations"""
return len(self.declarations) | [
"def",
"__len__",
"(",
"self",
")",
":",
"return",
"len",
"(",
"self",
".",
"declarations",
")"
] | [
74,
4
] | [
76,
37
] | python | en | ['en', 'en', 'en'] | True |
mdecl_wrapper_t.__getitem__ | (self, index) | provides access to declaration | provides access to declaration | def __getitem__(self, index):
"""provides access to declaration"""
return self.declarations[index] | [
"def",
"__getitem__",
"(",
"self",
",",
"index",
")",
":",
"return",
"self",
".",
"declarations",
"[",
"index",
"]"
] | [
78,
4
] | [
80,
39
] | python | en | ['en', 'en', 'en'] | True |
mdecl_wrapper_t.__setattr__ | (self, name, value) | Updates the value of attribute on all declarations.
:param name: name of attribute
:param value: new value of attribute
| Updates the value of attribute on all declarations.
:param name: name of attribute
:param value: new value of attribute
| def __setattr__(self, name, value):
"""Updates the value of attribute on all declarations.
:param name: name of attribute
:param value: new value of attribute
"""
self.__ensure_attribute(name)
for d in self.declarations:
setattr(d, name, value) | [
"def",
"__setattr__",
"(",
"self",
",",
"name",
",",
"value",
")",
":",
"self",
".",
"__ensure_attribute",
"(",
"name",
")",
"for",
"d",
"in",
"self",
".",
"declarations",
":",
"setattr",
"(",
"d",
",",
"name",
",",
"value",
")"
] | [
93,
4
] | [
100,
35
] | python | en | ['en', 'en', 'en'] | True |
mdecl_wrapper_t.__getattr__ | (self, name) | :param name: name of method
| :param name: name of method
| def __getattr__(self, name):
""":param name: name of method
"""
return call_redirector_t(name, self.declarations) | [
"def",
"__getattr__",
"(",
"self",
",",
"name",
")",
":",
"return",
"call_redirector_t",
"(",
"name",
",",
"self",
".",
"declarations",
")"
] | [
102,
4
] | [
105,
57
] | python | en | ['en', 'jv', 'en'] | True |
FileToPathMapping.comment_out | (self,filename,remove_header) | Get rid of include lines that are redundant | Get rid of include lines that are redundant | def comment_out(self,filename,remove_header):
"""Get rid of include lines that are redundant"""
ff=open(self.filePathBaseDirs[filename]+"/"+filename)
outfile=open(self.filePathBaseDirs[filename]+"/"+filename+"_cleaned","w")
for line in ff:
if line.find(remove_header) != -1:
print(" Removing {0} from {1}".format(line,self.filePathBaseDirs[filename]+"/"+filename))
else:
outfile.write(line)
ff.close()
outfile.close()
os.rename(self.filePathBaseDirs[filename]+"/"+filename+"_cleaned",self.filePathBaseDirs[filename]+"/"+filename) | [
"def",
"comment_out",
"(",
"self",
",",
"filename",
",",
"remove_header",
")",
":",
"ff",
"=",
"open",
"(",
"self",
".",
"filePathBaseDirs",
"[",
"filename",
"]",
"+",
"\"/\"",
"+",
"filename",
")",
"outfile",
"=",
"open",
"(",
"self",
".",
"filePathBaseDirs",
"[",
"filename",
"]",
"+",
"\"/\"",
"+",
"filename",
"+",
"\"_cleaned\"",
",",
"\"w\"",
")",
"for",
"line",
"in",
"ff",
":",
"if",
"line",
".",
"find",
"(",
"remove_header",
")",
"!=",
"-",
"1",
":",
"print",
"(",
"\" Removing {0} from {1}\"",
".",
"format",
"(",
"line",
",",
"self",
".",
"filePathBaseDirs",
"[",
"filename",
"]",
"+",
"\"/\"",
"+",
"filename",
")",
")",
"else",
":",
"outfile",
".",
"write",
"(",
"line",
")",
"ff",
".",
"close",
"(",
")",
"outfile",
".",
"close",
"(",
")",
"os",
".",
"rename",
"(",
"self",
".",
"filePathBaseDirs",
"[",
"filename",
"]",
"+",
"\"/\"",
"+",
"filename",
"+",
"\"_cleaned\"",
",",
"self",
".",
"filePathBaseDirs",
"[",
"filename",
"]",
"+",
"\"/\"",
"+",
"filename",
")"
] | [
66,
4
] | [
77,
117
] | python | en | ['en', 'en', 'en'] | True |
test_test_yaml_config_usage_stats_custom_type | (
mock_emit, empty_data_context_stats_enabled
) |
What does this test and why?
We should be able to discern the GE parent class for a custom type and construct
a useful usage stats event message.
|
What does this test and why?
We should be able to discern the GE parent class for a custom type and construct
a useful usage stats event message.
| def test_test_yaml_config_usage_stats_custom_type(
mock_emit, empty_data_context_stats_enabled
):
"""
What does this test and why?
We should be able to discern the GE parent class for a custom type and construct
a useful usage stats event message.
"""
data_context: DataContext = empty_data_context_stats_enabled
_ = data_context.test_yaml_config(
yaml_config="""
module_name: tests.data_context.fixtures.plugins
class_name: MyCustomExpectationsStore
store_backend:
module_name: great_expectations.data_context.store.store_backend
class_name: InMemoryStoreBackend
"""
)
assert mock_emit.call_count == 1
# Substitute anonymized name & class since it changes for each run
anonymized_name = mock_emit.call_args_list[0][0][0]["event_payload"][
"anonymized_name"
]
anonymized_class = mock_emit.call_args_list[0][0][0]["event_payload"][
"anonymized_class"
]
assert mock_emit.call_args_list == [
mock.call(
{
"event": "data_context.test_yaml_config",
"event_payload": {
"anonymized_name": anonymized_name,
"parent_class": "ExpectationsStore",
"anonymized_class": anonymized_class,
"anonymized_store_backend": {
"parent_class": "InMemoryStoreBackend"
},
},
"success": True,
}
),
] | [
"def",
"test_test_yaml_config_usage_stats_custom_type",
"(",
"mock_emit",
",",
"empty_data_context_stats_enabled",
")",
":",
"data_context",
":",
"DataContext",
"=",
"empty_data_context_stats_enabled",
"_",
"=",
"data_context",
".",
"test_yaml_config",
"(",
"yaml_config",
"=",
"\"\"\"\nmodule_name: tests.data_context.fixtures.plugins\nclass_name: MyCustomExpectationsStore\nstore_backend:\n module_name: great_expectations.data_context.store.store_backend\n class_name: InMemoryStoreBackend\n\"\"\"",
")",
"assert",
"mock_emit",
".",
"call_count",
"==",
"1",
"# Substitute anonymized name & class since it changes for each run",
"anonymized_name",
"=",
"mock_emit",
".",
"call_args_list",
"[",
"0",
"]",
"[",
"0",
"]",
"[",
"0",
"]",
"[",
"\"event_payload\"",
"]",
"[",
"\"anonymized_name\"",
"]",
"anonymized_class",
"=",
"mock_emit",
".",
"call_args_list",
"[",
"0",
"]",
"[",
"0",
"]",
"[",
"0",
"]",
"[",
"\"event_payload\"",
"]",
"[",
"\"anonymized_class\"",
"]",
"assert",
"mock_emit",
".",
"call_args_list",
"==",
"[",
"mock",
".",
"call",
"(",
"{",
"\"event\"",
":",
"\"data_context.test_yaml_config\"",
",",
"\"event_payload\"",
":",
"{",
"\"anonymized_name\"",
":",
"anonymized_name",
",",
"\"parent_class\"",
":",
"\"ExpectationsStore\"",
",",
"\"anonymized_class\"",
":",
"anonymized_class",
",",
"\"anonymized_store_backend\"",
":",
"{",
"\"parent_class\"",
":",
"\"InMemoryStoreBackend\"",
"}",
",",
"}",
",",
"\"success\"",
":",
"True",
",",
"}",
")",
",",
"]"
] | [
64,
0
] | [
105,
5
] | python | en | ['en', 'error', 'th'] | False |
test_test_yaml_config_usage_stats_class_name_not_provided | (
mock_emit, empty_data_context_stats_enabled
) |
What does this test and why?
If a class_name is not provided, and we have run into an error state in test_yaml_config() (likely because of the missing class_name) then we should report descriptive diagnostic info.
|
What does this test and why?
If a class_name is not provided, and we have run into an error state in test_yaml_config() (likely because of the missing class_name) then we should report descriptive diagnostic info.
| def test_test_yaml_config_usage_stats_class_name_not_provided(
mock_emit, empty_data_context_stats_enabled
):
"""
What does this test and why?
If a class_name is not provided, and we have run into an error state in test_yaml_config() (likely because of the missing class_name) then we should report descriptive diagnostic info.
"""
with pytest.raises(Exception):
# noinspection PyUnusedLocal
my_expectation_store = empty_data_context_stats_enabled.test_yaml_config(
yaml_config="""
module_name: great_expectations.data_context.store.expectations_store
"""
)
assert mock_emit.call_count == 1
assert mock_emit.call_args_list == [
mock.call(
{
"event": "data_context.test_yaml_config",
"event_payload": {"diagnostic_info": ["__class_name_not_provided__"]},
"success": False,
}
),
] | [
"def",
"test_test_yaml_config_usage_stats_class_name_not_provided",
"(",
"mock_emit",
",",
"empty_data_context_stats_enabled",
")",
":",
"with",
"pytest",
".",
"raises",
"(",
"Exception",
")",
":",
"# noinspection PyUnusedLocal",
"my_expectation_store",
"=",
"empty_data_context_stats_enabled",
".",
"test_yaml_config",
"(",
"yaml_config",
"=",
"\"\"\"\nmodule_name: great_expectations.data_context.store.expectations_store\n\n \"\"\"",
")",
"assert",
"mock_emit",
".",
"call_count",
"==",
"1",
"assert",
"mock_emit",
".",
"call_args_list",
"==",
"[",
"mock",
".",
"call",
"(",
"{",
"\"event\"",
":",
"\"data_context.test_yaml_config\"",
",",
"\"event_payload\"",
":",
"{",
"\"diagnostic_info\"",
":",
"[",
"\"__class_name_not_provided__\"",
"]",
"}",
",",
"\"success\"",
":",
"False",
",",
"}",
")",
",",
"]"
] | [
111,
0
] | [
135,
5
] | python | en | ['en', 'error', 'th'] | False |
test_test_yaml_config_usage_stats_custom_config_class_name_not_provided | (
mock_emit, empty_data_context_stats_enabled
) |
What does this test and why?
If a class_name is not provided, and we have run into an error state in test_yaml_config() (likely because of the missing class_name) then we should report descriptive diagnostic info.
This should be the case even if we are passing in a custom config.
|
What does this test and why?
If a class_name is not provided, and we have run into an error state in test_yaml_config() (likely because of the missing class_name) then we should report descriptive diagnostic info.
This should be the case even if we are passing in a custom config.
| def test_test_yaml_config_usage_stats_custom_config_class_name_not_provided(
mock_emit, empty_data_context_stats_enabled
):
"""
What does this test and why?
If a class_name is not provided, and we have run into an error state in test_yaml_config() (likely because of the missing class_name) then we should report descriptive diagnostic info.
This should be the case even if we are passing in a custom config.
"""
data_context: DataContext = empty_data_context_stats_enabled
with pytest.raises(Exception):
_ = data_context.test_yaml_config(
yaml_config="""
module_name: tests.data_context.fixtures.plugins.my_custom_expectations_store
store_backend:
module_name: great_expectations.data_context.store.store_backend
class_name: InMemoryStoreBackend
"""
)
assert mock_emit.call_count == 1
assert mock_emit.call_args_list == [
mock.call(
{
"event": "data_context.test_yaml_config",
"event_payload": {
"diagnostic_info": ["__class_name_not_provided__"],
},
"success": False,
}
),
] | [
"def",
"test_test_yaml_config_usage_stats_custom_config_class_name_not_provided",
"(",
"mock_emit",
",",
"empty_data_context_stats_enabled",
")",
":",
"data_context",
":",
"DataContext",
"=",
"empty_data_context_stats_enabled",
"with",
"pytest",
".",
"raises",
"(",
"Exception",
")",
":",
"_",
"=",
"data_context",
".",
"test_yaml_config",
"(",
"yaml_config",
"=",
"\"\"\"\nmodule_name: tests.data_context.fixtures.plugins.my_custom_expectations_store\nstore_backend:\n module_name: great_expectations.data_context.store.store_backend\n class_name: InMemoryStoreBackend\n\"\"\"",
")",
"assert",
"mock_emit",
".",
"call_count",
"==",
"1",
"assert",
"mock_emit",
".",
"call_args_list",
"==",
"[",
"mock",
".",
"call",
"(",
"{",
"\"event\"",
":",
"\"data_context.test_yaml_config\"",
",",
"\"event_payload\"",
":",
"{",
"\"diagnostic_info\"",
":",
"[",
"\"__class_name_not_provided__\"",
"]",
",",
"}",
",",
"\"success\"",
":",
"False",
",",
"}",
")",
",",
"]"
] | [
141,
0
] | [
170,
5
] | python | en | ['en', 'error', 'th'] | False |
test_test_yaml_config_usage_stats_custom_type_not_ge_subclass | (
mock_emit, empty_data_context_stats_enabled
) |
What does this test and why?
We should be able to discern the GE parent class for a custom type and construct
a useful usage stats event message.
|
What does this test and why?
We should be able to discern the GE parent class for a custom type and construct
a useful usage stats event message.
| def test_test_yaml_config_usage_stats_custom_type_not_ge_subclass(
mock_emit, empty_data_context_stats_enabled
):
"""
What does this test and why?
We should be able to discern the GE parent class for a custom type and construct
a useful usage stats event message.
"""
data_context: DataContext = empty_data_context_stats_enabled
_ = data_context.test_yaml_config(
yaml_config="""
module_name: tests.data_context.fixtures.plugins
class_name: MyCustomNonCoreGeClass
"""
)
assert mock_emit.call_count == 1
assert mock_emit.call_args_list == [
mock.call(
{
"event": "data_context.test_yaml_config",
"event_payload": {
"diagnostic_info": ["__custom_subclass_not_core_ge__"]
},
"success": True,
}
),
] | [
"def",
"test_test_yaml_config_usage_stats_custom_type_not_ge_subclass",
"(",
"mock_emit",
",",
"empty_data_context_stats_enabled",
")",
":",
"data_context",
":",
"DataContext",
"=",
"empty_data_context_stats_enabled",
"_",
"=",
"data_context",
".",
"test_yaml_config",
"(",
"yaml_config",
"=",
"\"\"\"\nmodule_name: tests.data_context.fixtures.plugins\nclass_name: MyCustomNonCoreGeClass\n\"\"\"",
")",
"assert",
"mock_emit",
".",
"call_count",
"==",
"1",
"assert",
"mock_emit",
".",
"call_args_list",
"==",
"[",
"mock",
".",
"call",
"(",
"{",
"\"event\"",
":",
"\"data_context.test_yaml_config\"",
",",
"\"event_payload\"",
":",
"{",
"\"diagnostic_info\"",
":",
"[",
"\"__custom_subclass_not_core_ge__\"",
"]",
"}",
",",
"\"success\"",
":",
"True",
",",
"}",
")",
",",
"]"
] | [
176,
0
] | [
202,
5
] | python | en | ['en', 'error', 'th'] | False |
test_test_yaml_config_usage_stats_simple_sqlalchemy_datasource_subclass | (
mock_emit, sa, test_backends, empty_data_context_stats_enabled
) |
What does this test and why?
We should be able to discern the GE parent class for a custom type and construct
a useful usage stats event message. This should be true for SimpleSqlalchemyDatasources.
|
What does this test and why?
We should be able to discern the GE parent class for a custom type and construct
a useful usage stats event message. This should be true for SimpleSqlalchemyDatasources.
| def test_test_yaml_config_usage_stats_simple_sqlalchemy_datasource_subclass(
mock_emit, sa, test_backends, empty_data_context_stats_enabled
):
"""
What does this test and why?
We should be able to discern the GE parent class for a custom type and construct
a useful usage stats event message. This should be true for SimpleSqlalchemyDatasources.
"""
if "postgresql" not in test_backends:
pytest.skip(
"test_test_yaml_config_usage_stats_simple_sqlalchemy_datasource_subclass requires postgresql"
)
data_context: DataContext = empty_data_context_stats_enabled
_ = data_context.test_yaml_config(
yaml_config="""
module_name: tests.data_context.fixtures.plugins.my_custom_simple_sqlalchemy_datasource_class
class_name: MyCustomSimpleSqlalchemyDatasource
name: some_name
introspection:
whole_table:
data_asset_name_suffix: __whole_table
credentials:
drivername: postgresql
host: localhost
port: '5432'
username: postgres
password: ''
database: postgres
"""
)
assert mock_emit.call_count == 1
# Substitute anonymized name & class since it changes for each run
anonymized_name = mock_emit.call_args_list[0][0][0]["event_payload"][
"anonymized_name"
]
anonymized_class = mock_emit.call_args_list[0][0][0]["event_payload"][
"anonymized_class"
]
anonymized_data_connector_name = mock_emit.call_args_list[0][0][0]["event_payload"][
"anonymized_data_connectors"
][0]["anonymized_name"]
assert mock_emit.call_args_list == [
mock.call(
{
"event": "data_context.test_yaml_config",
"event_payload": {
"anonymized_name": anonymized_name,
"parent_class": "SimpleSqlalchemyDatasource",
"anonymized_class": anonymized_class,
"anonymized_execution_engine": {
"parent_class": "SqlAlchemyExecutionEngine"
},
"anonymized_data_connectors": [
{
"anonymized_name": anonymized_data_connector_name,
"parent_class": "InferredAssetSqlDataConnector",
}
],
},
"success": True,
}
),
] | [
"def",
"test_test_yaml_config_usage_stats_simple_sqlalchemy_datasource_subclass",
"(",
"mock_emit",
",",
"sa",
",",
"test_backends",
",",
"empty_data_context_stats_enabled",
")",
":",
"if",
"\"postgresql\"",
"not",
"in",
"test_backends",
":",
"pytest",
".",
"skip",
"(",
"\"test_test_yaml_config_usage_stats_simple_sqlalchemy_datasource_subclass requires postgresql\"",
")",
"data_context",
":",
"DataContext",
"=",
"empty_data_context_stats_enabled",
"_",
"=",
"data_context",
".",
"test_yaml_config",
"(",
"yaml_config",
"=",
"\"\"\"\nmodule_name: tests.data_context.fixtures.plugins.my_custom_simple_sqlalchemy_datasource_class\nclass_name: MyCustomSimpleSqlalchemyDatasource\nname: some_name\nintrospection:\n whole_table:\n data_asset_name_suffix: __whole_table\ncredentials:\n drivername: postgresql\n host: localhost\n port: '5432'\n username: postgres\n password: ''\n database: postgres\n\"\"\"",
")",
"assert",
"mock_emit",
".",
"call_count",
"==",
"1",
"# Substitute anonymized name & class since it changes for each run",
"anonymized_name",
"=",
"mock_emit",
".",
"call_args_list",
"[",
"0",
"]",
"[",
"0",
"]",
"[",
"0",
"]",
"[",
"\"event_payload\"",
"]",
"[",
"\"anonymized_name\"",
"]",
"anonymized_class",
"=",
"mock_emit",
".",
"call_args_list",
"[",
"0",
"]",
"[",
"0",
"]",
"[",
"0",
"]",
"[",
"\"event_payload\"",
"]",
"[",
"\"anonymized_class\"",
"]",
"anonymized_data_connector_name",
"=",
"mock_emit",
".",
"call_args_list",
"[",
"0",
"]",
"[",
"0",
"]",
"[",
"0",
"]",
"[",
"\"event_payload\"",
"]",
"[",
"\"anonymized_data_connectors\"",
"]",
"[",
"0",
"]",
"[",
"\"anonymized_name\"",
"]",
"assert",
"mock_emit",
".",
"call_args_list",
"==",
"[",
"mock",
".",
"call",
"(",
"{",
"\"event\"",
":",
"\"data_context.test_yaml_config\"",
",",
"\"event_payload\"",
":",
"{",
"\"anonymized_name\"",
":",
"anonymized_name",
",",
"\"parent_class\"",
":",
"\"SimpleSqlalchemyDatasource\"",
",",
"\"anonymized_class\"",
":",
"anonymized_class",
",",
"\"anonymized_execution_engine\"",
":",
"{",
"\"parent_class\"",
":",
"\"SqlAlchemyExecutionEngine\"",
"}",
",",
"\"anonymized_data_connectors\"",
":",
"[",
"{",
"\"anonymized_name\"",
":",
"anonymized_data_connector_name",
",",
"\"parent_class\"",
":",
"\"InferredAssetSqlDataConnector\"",
",",
"}",
"]",
",",
"}",
",",
"\"success\"",
":",
"True",
",",
"}",
")",
",",
"]"
] | [
208,
0
] | [
272,
5
] | python | en | ['en', 'error', 'th'] | False |
InferredAssetFilesystemDataConnector.__init__ | (
self,
name: str,
datasource_name: str,
base_directory: str,
execution_engine: Optional[ExecutionEngine] = None,
default_regex: Optional[dict] = None,
glob_directive: Optional[str] = "*",
sorters: Optional[list] = None,
batch_spec_passthrough: Optional[dict] = None,
) |
Base class for DataConnectors that connect to filesystem-like data. This class supports the configuration of default_regex
and sorters for filtering and sorting data_references.
Args:
name (str): name of InferredAssetFilesystemDataConnector
datasource_name (str): Name of datasource that this DataConnector is connected to
base_directory(str): base_directory for DataConnector to begin reading files
execution_engine (ExecutionEngine): ExecutionEngine object to actually read the data
default_regex (dict): Optional dict the filter and organize the data_references.
sorters (list): Optional list if you want to sort the data_references
batch_spec_passthrough (dict): dictionary with keys that will be added directly to batch_spec
|
Base class for DataConnectors that connect to filesystem-like data. This class supports the configuration of default_regex
and sorters for filtering and sorting data_references. | def __init__(
self,
name: str,
datasource_name: str,
base_directory: str,
execution_engine: Optional[ExecutionEngine] = None,
default_regex: Optional[dict] = None,
glob_directive: Optional[str] = "*",
sorters: Optional[list] = None,
batch_spec_passthrough: Optional[dict] = None,
):
"""
Base class for DataConnectors that connect to filesystem-like data. This class supports the configuration of default_regex
and sorters for filtering and sorting data_references.
Args:
name (str): name of InferredAssetFilesystemDataConnector
datasource_name (str): Name of datasource that this DataConnector is connected to
base_directory(str): base_directory for DataConnector to begin reading files
execution_engine (ExecutionEngine): ExecutionEngine object to actually read the data
default_regex (dict): Optional dict the filter and organize the data_references.
sorters (list): Optional list if you want to sort the data_references
batch_spec_passthrough (dict): dictionary with keys that will be added directly to batch_spec
"""
logger.debug(f'Constructing InferredAssetFilesystemDataConnector "{name}".')
super().__init__(
name=name,
datasource_name=datasource_name,
execution_engine=execution_engine,
default_regex=default_regex,
sorters=sorters,
batch_spec_passthrough=batch_spec_passthrough,
)
self._base_directory = base_directory
self._glob_directive = glob_directive | [
"def",
"__init__",
"(",
"self",
",",
"name",
":",
"str",
",",
"datasource_name",
":",
"str",
",",
"base_directory",
":",
"str",
",",
"execution_engine",
":",
"Optional",
"[",
"ExecutionEngine",
"]",
"=",
"None",
",",
"default_regex",
":",
"Optional",
"[",
"dict",
"]",
"=",
"None",
",",
"glob_directive",
":",
"Optional",
"[",
"str",
"]",
"=",
"\"*\"",
",",
"sorters",
":",
"Optional",
"[",
"list",
"]",
"=",
"None",
",",
"batch_spec_passthrough",
":",
"Optional",
"[",
"dict",
"]",
"=",
"None",
",",
")",
":",
"logger",
".",
"debug",
"(",
"f'Constructing InferredAssetFilesystemDataConnector \"{name}\".'",
")",
"super",
"(",
")",
".",
"__init__",
"(",
"name",
"=",
"name",
",",
"datasource_name",
"=",
"datasource_name",
",",
"execution_engine",
"=",
"execution_engine",
",",
"default_regex",
"=",
"default_regex",
",",
"sorters",
"=",
"sorters",
",",
"batch_spec_passthrough",
"=",
"batch_spec_passthrough",
",",
")",
"self",
".",
"_base_directory",
"=",
"base_directory",
"self",
".",
"_glob_directive",
"=",
"glob_directive"
] | [
29,
4
] | [
65,
45
] | python | en | ['en', 'error', 'th'] | False |
InferredAssetFilesystemDataConnector._get_data_reference_list | (
self, data_asset_name: Optional[str] = None
) |
List objects in the underlying data store to create a list of data_references.
This method is used to refresh the cache.
|
List objects in the underlying data store to create a list of data_references. | def _get_data_reference_list(
self, data_asset_name: Optional[str] = None
) -> List[str]:
"""
List objects in the underlying data store to create a list of data_references.
This method is used to refresh the cache.
"""
path_list: List[str] = get_filesystem_one_level_directory_glob_path_list(
base_directory_path=self.base_directory, glob_directive=self._glob_directive
)
return sorted(path_list) | [
"def",
"_get_data_reference_list",
"(",
"self",
",",
"data_asset_name",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
")",
"->",
"List",
"[",
"str",
"]",
":",
"path_list",
":",
"List",
"[",
"str",
"]",
"=",
"get_filesystem_one_level_directory_glob_path_list",
"(",
"base_directory_path",
"=",
"self",
".",
"base_directory",
",",
"glob_directive",
"=",
"self",
".",
"_glob_directive",
")",
"return",
"sorted",
"(",
"path_list",
")"
] | [
67,
4
] | [
78,
32
] | python | en | ['en', 'error', 'th'] | False |
InferredAssetFilesystemDataConnector.base_directory | (self) |
Accessor method for base_directory. If directory is a relative path, interpret it as relative to the
root directory. If it is absolute, then keep as-is.
|
Accessor method for base_directory. If directory is a relative path, interpret it as relative to the
root directory. If it is absolute, then keep as-is.
| def base_directory(self):
"""
Accessor method for base_directory. If directory is a relative path, interpret it as relative to the
root directory. If it is absolute, then keep as-is.
"""
return normalize_directory_path(
dir_path=self._base_directory,
root_directory_path=self.data_context_root_directory,
) | [
"def",
"base_directory",
"(",
"self",
")",
":",
"return",
"normalize_directory_path",
"(",
"dir_path",
"=",
"self",
".",
"_base_directory",
",",
"root_directory_path",
"=",
"self",
".",
"data_context_root_directory",
",",
")"
] | [
86,
4
] | [
94,
9
] | python | en | ['en', 'error', 'th'] | False |
HelloApiView.get | (self, request, format=None) | Returns a list of APIView features. | Returns a list of APIView features. | def get(self, request, format=None):
"""Returns a list of APIView features."""
an_apiview = [
'Uses HTTP methods as function (get, post, patch, put, delete)',
'It is similar to a traditional Django view',
'Gives you the most control over your logic',
'Is mapped manually to URLs'
]
return Response({'message': 'Hello!', 'an_apiview': an_apiview}) | [
"def",
"get",
"(",
"self",
",",
"request",
",",
"format",
"=",
"None",
")",
":",
"an_apiview",
"=",
"[",
"'Uses HTTP methods as function (get, post, patch, put, delete)'",
",",
"'It is similar to a traditional Django view'",
",",
"'Gives you the most control over your logic'",
",",
"'Is mapped manually to URLs'",
"]",
"return",
"Response",
"(",
"{",
"'message'",
":",
"'Hello!'",
",",
"'an_apiview'",
":",
"an_apiview",
"}",
")"
] | [
24,
4
] | [
34,
72
] | python | en | ['en', 'en', 'en'] | True |
HelloApiView.post | (self, request) | Create a hello message with our name. | Create a hello message with our name. | def post(self, request):
"""Create a hello message with our name."""
serializer = serializers.HelloSerializer(data=request.data)
if serializer.is_valid():
name = serializer.data.get('name')
message = 'Hello {0}'.format(name)
return Response({'message': message})
else:
return Response(
serializer.errors, status=status.HTTP_400_BAD_REQUEST) | [
"def",
"post",
"(",
"self",
",",
"request",
")",
":",
"serializer",
"=",
"serializers",
".",
"HelloSerializer",
"(",
"data",
"=",
"request",
".",
"data",
")",
"if",
"serializer",
".",
"is_valid",
"(",
")",
":",
"name",
"=",
"serializer",
".",
"data",
".",
"get",
"(",
"'name'",
")",
"message",
"=",
"'Hello {0}'",
".",
"format",
"(",
"name",
")",
"return",
"Response",
"(",
"{",
"'message'",
":",
"message",
"}",
")",
"else",
":",
"return",
"Response",
"(",
"serializer",
".",
"errors",
",",
"status",
"=",
"status",
".",
"HTTP_400_BAD_REQUEST",
")"
] | [
36,
4
] | [
47,
70
] | python | en | ['en', 'en', 'en'] | True |
HelloApiView.put | (self, request, pk=None) | Handles updating an object. | Handles updating an object. | def put(self, request, pk=None):
"""Handles updating an object."""
return Response({'method': 'PUT'}) | [
"def",
"put",
"(",
"self",
",",
"request",
",",
"pk",
"=",
"None",
")",
":",
"return",
"Response",
"(",
"{",
"'method'",
":",
"'PUT'",
"}",
")"
] | [
49,
4
] | [
52,
42
] | python | en | ['en', 'lb', 'en'] | True |
HelloApiView.patch | (self, request, pk=None) | Patch request, only updates fields provided in the request. | Patch request, only updates fields provided in the request. | def patch(self, request, pk=None):
"""Patch request, only updates fields provided in the request."""
return Response({'method': 'PATCH'}) | [
"def",
"patch",
"(",
"self",
",",
"request",
",",
"pk",
"=",
"None",
")",
":",
"return",
"Response",
"(",
"{",
"'method'",
":",
"'PATCH'",
"}",
")"
] | [
54,
4
] | [
57,
44
] | python | en | ['en', 'en', 'en'] | True |
HelloApiView.delete | (self, request, pk=None) | Deletes and object. | Deletes and object. | def delete(self, request, pk=None):
"""Deletes and object."""
return Response({'method': 'DELETE'}) | [
"def",
"delete",
"(",
"self",
",",
"request",
",",
"pk",
"=",
"None",
")",
":",
"return",
"Response",
"(",
"{",
"'method'",
":",
"'DELETE'",
"}",
")"
] | [
59,
4
] | [
62,
45
] | python | en | ['en', 'en', 'en'] | True |
HelloViewSet.list | (self, request) | Return a hello message. | Return a hello message. | def list(self, request):
"""Return a hello message."""
a_viewset = [
'Uses actions (list, create, retrieve, update, partial_update)',
'Automatically maps to URLs using Routers',
'Provides more functionality with less code.'
]
return Response({'message': 'Hello!', 'a_viewset': a_viewset}) | [
"def",
"list",
"(",
"self",
",",
"request",
")",
":",
"a_viewset",
"=",
"[",
"'Uses actions (list, create, retrieve, update, partial_update)'",
",",
"'Automatically maps to URLs using Routers'",
",",
"'Provides more functionality with less code.'",
"]",
"return",
"Response",
"(",
"{",
"'message'",
":",
"'Hello!'",
",",
"'a_viewset'",
":",
"a_viewset",
"}",
")"
] | [
70,
4
] | [
79,
70
] | python | en | ['it', 'en', 'en'] | True |
HelloViewSet.create | (self, request) | Create a new hello message. | Create a new hello message. | def create(self, request):
"""Create a new hello message."""
serializer = serializers.HelloSerializer(data=request.data)
if serializer.is_valid():
name = serializer.data.get('name')
message = 'Hello {0}'.format(name)
return Response({'message': message})
else:
return Response(
serializer.errors, status=status.HTTP_400_BAD_REQUEST) | [
"def",
"create",
"(",
"self",
",",
"request",
")",
":",
"serializer",
"=",
"serializers",
".",
"HelloSerializer",
"(",
"data",
"=",
"request",
".",
"data",
")",
"if",
"serializer",
".",
"is_valid",
"(",
")",
":",
"name",
"=",
"serializer",
".",
"data",
".",
"get",
"(",
"'name'",
")",
"message",
"=",
"'Hello {0}'",
".",
"format",
"(",
"name",
")",
"return",
"Response",
"(",
"{",
"'message'",
":",
"message",
"}",
")",
"else",
":",
"return",
"Response",
"(",
"serializer",
".",
"errors",
",",
"status",
"=",
"status",
".",
"HTTP_400_BAD_REQUEST",
")"
] | [
81,
4
] | [
92,
70
] | python | en | ['it', 'en', 'en'] | True |
HelloViewSet.retrieve | (self, request, pk=None) | Handles getting an object by its ID. | Handles getting an object by its ID. | def retrieve(self, request, pk=None):
"""Handles getting an object by its ID."""
return Response({'http_method': 'GET'}) | [
"def",
"retrieve",
"(",
"self",
",",
"request",
",",
"pk",
"=",
"None",
")",
":",
"return",
"Response",
"(",
"{",
"'http_method'",
":",
"'GET'",
"}",
")"
] | [
94,
4
] | [
97,
47
] | python | en | ['en', 'en', 'en'] | True |
HelloViewSet.update | (self, request, pk=None) | Handles updating an object. | Handles updating an object. | def update(self, request, pk=None):
"""Handles updating an object."""
return Response({'http_method': 'PUT'}) | [
"def",
"update",
"(",
"self",
",",
"request",
",",
"pk",
"=",
"None",
")",
":",
"return",
"Response",
"(",
"{",
"'http_method'",
":",
"'PUT'",
"}",
")"
] | [
99,
4
] | [
102,
47
] | python | en | ['en', 'lb', 'en'] | True |
HelloViewSet.partial_update | (self, request, pk=None) | Handles updating part of an object. | Handles updating part of an object. | def partial_update(self, request, pk=None):
"""Handles updating part of an object."""
return Response({'http_method': 'PATCH'}) | [
"def",
"partial_update",
"(",
"self",
",",
"request",
",",
"pk",
"=",
"None",
")",
":",
"return",
"Response",
"(",
"{",
"'http_method'",
":",
"'PATCH'",
"}",
")"
] | [
104,
4
] | [
107,
49
] | python | en | ['en', 'en', 'en'] | True |
HelloViewSet.destroy | (self, request, pk=None) | Handles removing an object. | Handles removing an object. | def destroy(self, request, pk=None):
"""Handles removing an object."""
return Response({'http_method': 'DELETE'}) | [
"def",
"destroy",
"(",
"self",
",",
"request",
",",
"pk",
"=",
"None",
")",
":",
"return",
"Response",
"(",
"{",
"'http_method'",
":",
"'DELETE'",
"}",
")"
] | [
109,
4
] | [
112,
50
] | python | en | ['en', 'en', 'en'] | True |
UserProfileFeedViewSet.perform_create | (self, serializer) | Set the user profile to the logged in the user | Set the user profile to the logged in the user | def perform_create(self, serializer):
"""Set the user profile to the logged in the user"""
serializer.save(user_profile=self.request.user) | [
"def",
"perform_create",
"(",
"self",
",",
"serializer",
")",
":",
"serializer",
".",
"save",
"(",
"user_profile",
"=",
"self",
".",
"request",
".",
"user",
")"
] | [
142,
4
] | [
145,
55
] | python | en | ['en', 'en', 'en'] | True |
ElasticsearchRetriever.__init__ | (self, document_store: ElasticsearchDocumentStore, custom_query: str = None) |
:param document_store: an instance of a DocumentStore to retrieve documents from.
:param custom_query: query string as per Elasticsearch DSL with a mandatory query placeholder(query).
Optionally, ES `filter` clause can be added where the values of `terms` are placeholders
that get substituted during runtime. The placeholder(${filter_name_1}, ${filter_name_2}..)
names must match with the filters dict supplied in self.retrieve().
::
**An example custom_query:**
```python
| {
| "size": 10,
| "query": {
| "bool": {
| "should": [{"multi_match": {
| "query": ${query}, // mandatory query placeholder
| "type": "most_fields",
| "fields": ["text", "title"]}}],
| "filter": [ // optional custom filters
| {"terms": {"year": ${years}}},
| {"terms": {"quarter": ${quarters}}},
| {"range": {"date": {"gte": ${date}}}}
| ],
| }
| },
| }
```
**For this custom_query, a sample retrieve() could be:**
```python
| self.retrieve(query="Why did the revenue increase?",
| filters={"years": ["2019"], "quarters": ["Q1", "Q2"]})
```
|
:param document_store: an instance of a DocumentStore to retrieve documents from.
:param custom_query: query string as per Elasticsearch DSL with a mandatory query placeholder(query). | def __init__(self, document_store: ElasticsearchDocumentStore, custom_query: str = None):
"""
:param document_store: an instance of a DocumentStore to retrieve documents from.
:param custom_query: query string as per Elasticsearch DSL with a mandatory query placeholder(query).
Optionally, ES `filter` clause can be added where the values of `terms` are placeholders
that get substituted during runtime. The placeholder(${filter_name_1}, ${filter_name_2}..)
names must match with the filters dict supplied in self.retrieve().
::
**An example custom_query:**
```python
| {
| "size": 10,
| "query": {
| "bool": {
| "should": [{"multi_match": {
| "query": ${query}, // mandatory query placeholder
| "type": "most_fields",
| "fields": ["text", "title"]}}],
| "filter": [ // optional custom filters
| {"terms": {"year": ${years}}},
| {"terms": {"quarter": ${quarters}}},
| {"range": {"date": {"gte": ${date}}}}
| ],
| }
| },
| }
```
**For this custom_query, a sample retrieve() could be:**
```python
| self.retrieve(query="Why did the revenue increase?",
| filters={"years": ["2019"], "quarters": ["Q1", "Q2"]})
```
"""
self.document_store: ElasticsearchDocumentStore = document_store
self.custom_query = custom_query | [
"def",
"__init__",
"(",
"self",
",",
"document_store",
":",
"ElasticsearchDocumentStore",
",",
"custom_query",
":",
"str",
"=",
"None",
")",
":",
"self",
".",
"document_store",
":",
"ElasticsearchDocumentStore",
"=",
"document_store",
"self",
".",
"custom_query",
"=",
"custom_query"
] | [
17,
4
] | [
54,
40
] | python | en | ['en', 'error', 'th'] | False |
ElasticsearchRetriever.retrieve | (self, query: str, filters: dict = None, top_k: int = 10, index: str = None) |
Scan through documents in DocumentStore and return a small number documents
that are most relevant to the query.
:param query: The query
:param filters: A dictionary where the keys specify a metadata field and the value is a list of accepted values for that field
:param top_k: How many documents to return per query.
:param index: The name of the index in the DocumentStore from which to retrieve documents
|
Scan through documents in DocumentStore and return a small number documents
that are most relevant to the query. | def retrieve(self, query: str, filters: dict = None, top_k: int = 10, index: str = None) -> List[Document]:
"""
Scan through documents in DocumentStore and return a small number documents
that are most relevant to the query.
:param query: The query
:param filters: A dictionary where the keys specify a metadata field and the value is a list of accepted values for that field
:param top_k: How many documents to return per query.
:param index: The name of the index in the DocumentStore from which to retrieve documents
"""
if index is None:
index = self.document_store.index
documents = self.document_store.query(query, filters, top_k, self.custom_query, index)
return documents | [
"def",
"retrieve",
"(",
"self",
",",
"query",
":",
"str",
",",
"filters",
":",
"dict",
"=",
"None",
",",
"top_k",
":",
"int",
"=",
"10",
",",
"index",
":",
"str",
"=",
"None",
")",
"->",
"List",
"[",
"Document",
"]",
":",
"if",
"index",
"is",
"None",
":",
"index",
"=",
"self",
".",
"document_store",
".",
"index",
"documents",
"=",
"self",
".",
"document_store",
".",
"query",
"(",
"query",
",",
"filters",
",",
"top_k",
",",
"self",
".",
"custom_query",
",",
"index",
")",
"return",
"documents"
] | [
56,
4
] | [
70,
24
] | python | en | ['en', 'error', 'th'] | False |
ElasticsearchFilterOnlyRetriever.retrieve | (self, query: str, filters: dict = None, top_k: int = 10, index: str = None) |
Scan through documents in DocumentStore and return a small number documents
that are most relevant to the query.
:param query: The query
:param filters: A dictionary where the keys specify a metadata field and the value is a list of accepted values for that field
:param top_k: How many documents to return per query.
:param index: The name of the index in the DocumentStore from which to retrieve documents
|
Scan through documents in DocumentStore and return a small number documents
that are most relevant to the query. | def retrieve(self, query: str, filters: dict = None, top_k: int = 10, index: str = None) -> List[Document]:
"""
Scan through documents in DocumentStore and return a small number documents
that are most relevant to the query.
:param query: The query
:param filters: A dictionary where the keys specify a metadata field and the value is a list of accepted values for that field
:param top_k: How many documents to return per query.
:param index: The name of the index in the DocumentStore from which to retrieve documents
"""
if index is None:
index = self.document_store.index
documents = self.document_store.query(query=None, filters=filters, top_k=top_k,
custom_query=self.custom_query, index=index)
return documents | [
"def",
"retrieve",
"(",
"self",
",",
"query",
":",
"str",
",",
"filters",
":",
"dict",
"=",
"None",
",",
"top_k",
":",
"int",
"=",
"10",
",",
"index",
":",
"str",
"=",
"None",
")",
"->",
"List",
"[",
"Document",
"]",
":",
"if",
"index",
"is",
"None",
":",
"index",
"=",
"self",
".",
"document_store",
".",
"index",
"documents",
"=",
"self",
".",
"document_store",
".",
"query",
"(",
"query",
"=",
"None",
",",
"filters",
"=",
"filters",
",",
"top_k",
"=",
"top_k",
",",
"custom_query",
"=",
"self",
".",
"custom_query",
",",
"index",
"=",
"index",
")",
"return",
"documents"
] | [
79,
4
] | [
93,
24
] | python | en | ['en', 'error', 'th'] | False |
TfidfRetriever._get_all_paragraphs | (self) |
Split the list of documents in paragraphs
|
Split the list of documents in paragraphs
| def _get_all_paragraphs(self) -> List[Paragraph]:
"""
Split the list of documents in paragraphs
"""
documents = self.document_store.get_all_documents()
paragraphs = []
p_id = 0
for doc in documents:
for p in doc.text.split("\n\n"): # TODO: this assumes paragraphs are separated by "\n\n". Can be switched to paragraph tokenizer.
if not p.strip(): # skip empty paragraphs
continue
paragraphs.append(
Paragraph(document_id=doc.id, paragraph_id=p_id, text=(p,), meta=doc.meta)
)
p_id += 1
logger.info(f"Found {len(paragraphs)} candidate paragraphs from {len(documents)} docs in DB")
return paragraphs | [
"def",
"_get_all_paragraphs",
"(",
"self",
")",
"->",
"List",
"[",
"Paragraph",
"]",
":",
"documents",
"=",
"self",
".",
"document_store",
".",
"get_all_documents",
"(",
")",
"paragraphs",
"=",
"[",
"]",
"p_id",
"=",
"0",
"for",
"doc",
"in",
"documents",
":",
"for",
"p",
"in",
"doc",
".",
"text",
".",
"split",
"(",
"\"\\n\\n\"",
")",
":",
"# TODO: this assumes paragraphs are separated by \"\\n\\n\". Can be switched to paragraph tokenizer.",
"if",
"not",
"p",
".",
"strip",
"(",
")",
":",
"# skip empty paragraphs",
"continue",
"paragraphs",
".",
"append",
"(",
"Paragraph",
"(",
"document_id",
"=",
"doc",
".",
"id",
",",
"paragraph_id",
"=",
"p_id",
",",
"text",
"=",
"(",
"p",
",",
")",
",",
"meta",
"=",
"doc",
".",
"meta",
")",
")",
"p_id",
"+=",
"1",
"logger",
".",
"info",
"(",
"f\"Found {len(paragraphs)} candidate paragraphs from {len(documents)} docs in DB\"",
")",
"return",
"paragraphs"
] | [
122,
4
] | [
139,
25
] | python | en | ['en', 'error', 'th'] | False |
TfidfRetriever.retrieve | (self, query: str, filters: dict = None, top_k: int = 10, index: str = None) |
Scan through documents in DocumentStore and return a small number documents
that are most relevant to the query.
:param query: The query
:param filters: A dictionary where the keys specify a metadata field and the value is a list of accepted values for that field
:param top_k: How many documents to return per query.
:param index: The name of the index in the DocumentStore from which to retrieve documents
|
Scan through documents in DocumentStore and return a small number documents
that are most relevant to the query. | def retrieve(self, query: str, filters: dict = None, top_k: int = 10, index: str = None) -> List[Document]:
"""
Scan through documents in DocumentStore and return a small number documents
that are most relevant to the query.
:param query: The query
:param filters: A dictionary where the keys specify a metadata field and the value is a list of accepted values for that field
:param top_k: How many documents to return per query.
:param index: The name of the index in the DocumentStore from which to retrieve documents
"""
if self.df is None:
raise Exception("fit() needs to called before retrieve()")
if filters:
raise NotImplementedError("Filters are not implemented in TfidfRetriever.")
if index:
raise NotImplementedError("Switching index is not supported in TfidfRetriever.")
# get scores
indices_and_scores = self._calc_scores(query)
# rank paragraphs
df_sliced = self.df.loc[indices_and_scores.keys()]
df_sliced = df_sliced[:top_k]
logger.debug(
f"Identified {df_sliced.shape[0]} candidates via retriever:\n {df_sliced.to_string(col_space=10, index=False)}"
)
# get actual content for the top candidates
paragraphs = list(df_sliced.text.values)
meta_data = [{"document_id": row["document_id"], "paragraph_id": row["paragraph_id"], "meta": row.get("meta", {})}
for idx, row in df_sliced.iterrows()]
documents = []
for para, meta in zip(paragraphs, meta_data):
documents.append(
Document(
id=meta["document_id"],
text=para,
meta=meta.get("meta", {})
))
return documents | [
"def",
"retrieve",
"(",
"self",
",",
"query",
":",
"str",
",",
"filters",
":",
"dict",
"=",
"None",
",",
"top_k",
":",
"int",
"=",
"10",
",",
"index",
":",
"str",
"=",
"None",
")",
"->",
"List",
"[",
"Document",
"]",
":",
"if",
"self",
".",
"df",
"is",
"None",
":",
"raise",
"Exception",
"(",
"\"fit() needs to called before retrieve()\"",
")",
"if",
"filters",
":",
"raise",
"NotImplementedError",
"(",
"\"Filters are not implemented in TfidfRetriever.\"",
")",
"if",
"index",
":",
"raise",
"NotImplementedError",
"(",
"\"Switching index is not supported in TfidfRetriever.\"",
")",
"# get scores",
"indices_and_scores",
"=",
"self",
".",
"_calc_scores",
"(",
"query",
")",
"# rank paragraphs",
"df_sliced",
"=",
"self",
".",
"df",
".",
"loc",
"[",
"indices_and_scores",
".",
"keys",
"(",
")",
"]",
"df_sliced",
"=",
"df_sliced",
"[",
":",
"top_k",
"]",
"logger",
".",
"debug",
"(",
"f\"Identified {df_sliced.shape[0]} candidates via retriever:\\n {df_sliced.to_string(col_space=10, index=False)}\"",
")",
"# get actual content for the top candidates",
"paragraphs",
"=",
"list",
"(",
"df_sliced",
".",
"text",
".",
"values",
")",
"meta_data",
"=",
"[",
"{",
"\"document_id\"",
":",
"row",
"[",
"\"document_id\"",
"]",
",",
"\"paragraph_id\"",
":",
"row",
"[",
"\"paragraph_id\"",
"]",
",",
"\"meta\"",
":",
"row",
".",
"get",
"(",
"\"meta\"",
",",
"{",
"}",
")",
"}",
"for",
"idx",
",",
"row",
"in",
"df_sliced",
".",
"iterrows",
"(",
")",
"]",
"documents",
"=",
"[",
"]",
"for",
"para",
",",
"meta",
"in",
"zip",
"(",
"paragraphs",
",",
"meta_data",
")",
":",
"documents",
".",
"append",
"(",
"Document",
"(",
"id",
"=",
"meta",
"[",
"\"document_id\"",
"]",
",",
"text",
"=",
"para",
",",
"meta",
"=",
"meta",
".",
"get",
"(",
"\"meta\"",
",",
"{",
"}",
")",
")",
")",
"return",
"documents"
] | [
151,
4
] | [
194,
24
] | python | en | ['en', 'error', 'th'] | False |
TfidfRetriever.fit | (self) |
Performing training on this class according to the TF-IDF algorithm.
|
Performing training on this class according to the TF-IDF algorithm.
| def fit(self):
"""
Performing training on this class according to the TF-IDF algorithm.
"""
if not self.paragraphs or len(self.paragraphs) == 0:
self.paragraphs = self._get_all_paragraphs()
if not self.paragraphs or len(self.paragraphs) == 0:
logger.warning("Fit method called with empty document store")
return
self.df = pd.DataFrame.from_dict(self.paragraphs)
self.df["text"] = self.df["text"].apply(lambda x: " ".join(x))
self.tfidf_matrix = self.vectorizer.fit_transform(self.df["text"]) | [
"def",
"fit",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"paragraphs",
"or",
"len",
"(",
"self",
".",
"paragraphs",
")",
"==",
"0",
":",
"self",
".",
"paragraphs",
"=",
"self",
".",
"_get_all_paragraphs",
"(",
")",
"if",
"not",
"self",
".",
"paragraphs",
"or",
"len",
"(",
"self",
".",
"paragraphs",
")",
"==",
"0",
":",
"logger",
".",
"warning",
"(",
"\"Fit method called with empty document store\"",
")",
"return",
"self",
".",
"df",
"=",
"pd",
".",
"DataFrame",
".",
"from_dict",
"(",
"self",
".",
"paragraphs",
")",
"self",
".",
"df",
"[",
"\"text\"",
"]",
"=",
"self",
".",
"df",
"[",
"\"text\"",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"\" \"",
".",
"join",
"(",
"x",
")",
")",
"self",
".",
"tfidf_matrix",
"=",
"self",
".",
"vectorizer",
".",
"fit_transform",
"(",
"self",
".",
"df",
"[",
"\"text\"",
"]",
")"
] | [
196,
4
] | [
208,
74
] | python | en | ['en', 'error', 'th'] | False |
synthetic_data | (mode=1, n=1000, p=5, sigma=1.0, adj=0.) | Synthetic data in Nie X. and Wager S. (2018) 'Quasi-Oracle Estimation of Heterogeneous Treatment Effects'
Args:
mode (int, optional): mode of the simulation: \
1 for difficult nuisance components and an easy treatment effect. \
2 for a randomized trial. \
3 for an easy propensity and a difficult baseline. \
4 for unrelated treatment and control groups. \
5 for a hidden confounder biasing treatment.
n (int, optional): number of observations
p (int optional): number of covariates (>=5)
sigma (float): standard deviation of the error term
adj (float): adjustment term for the distribution of propensity, e. Higher values shift the distribution to 0.
It does not apply to mode == 2 or 3.
Returns:
(tuple): Synthetically generated samples with the following outputs:
- y ((n,)-array): outcome variable.
- X ((n,p)-ndarray): independent variables.
- w ((n,)-array): treatment flag with value 0 or 1.
- tau ((n,)-array): individual treatment effect.
- b ((n,)-array): expected outcome.
- e ((n,)-array): propensity of receiving treatment.
| Synthetic data in Nie X. and Wager S. (2018) 'Quasi-Oracle Estimation of Heterogeneous Treatment Effects' | def synthetic_data(mode=1, n=1000, p=5, sigma=1.0, adj=0.):
''' Synthetic data in Nie X. and Wager S. (2018) 'Quasi-Oracle Estimation of Heterogeneous Treatment Effects'
Args:
mode (int, optional): mode of the simulation: \
1 for difficult nuisance components and an easy treatment effect. \
2 for a randomized trial. \
3 for an easy propensity and a difficult baseline. \
4 for unrelated treatment and control groups. \
5 for a hidden confounder biasing treatment.
n (int, optional): number of observations
p (int optional): number of covariates (>=5)
sigma (float): standard deviation of the error term
adj (float): adjustment term for the distribution of propensity, e. Higher values shift the distribution to 0.
It does not apply to mode == 2 or 3.
Returns:
(tuple): Synthetically generated samples with the following outputs:
- y ((n,)-array): outcome variable.
- X ((n,p)-ndarray): independent variables.
- w ((n,)-array): treatment flag with value 0 or 1.
- tau ((n,)-array): individual treatment effect.
- b ((n,)-array): expected outcome.
- e ((n,)-array): propensity of receiving treatment.
'''
catalog = {1: simulate_nuisance_and_easy_treatment,
2: simulate_randomized_trial,
3: simulate_easy_propensity_difficult_baseline,
4: simulate_unrelated_treatment_control,
5: simulate_hidden_confounder}
assert mode in catalog, 'Invalid mode {}. Should be one of {}'.format(mode, set(catalog))
return catalog[mode](n, p, sigma, adj) | [
"def",
"synthetic_data",
"(",
"mode",
"=",
"1",
",",
"n",
"=",
"1000",
",",
"p",
"=",
"5",
",",
"sigma",
"=",
"1.0",
",",
"adj",
"=",
"0.",
")",
":",
"catalog",
"=",
"{",
"1",
":",
"simulate_nuisance_and_easy_treatment",
",",
"2",
":",
"simulate_randomized_trial",
",",
"3",
":",
"simulate_easy_propensity_difficult_baseline",
",",
"4",
":",
"simulate_unrelated_treatment_control",
",",
"5",
":",
"simulate_hidden_confounder",
"}",
"assert",
"mode",
"in",
"catalog",
",",
"'Invalid mode {}. Should be one of {}'",
".",
"format",
"(",
"mode",
",",
"set",
"(",
"catalog",
")",
")",
"return",
"catalog",
"[",
"mode",
"]",
"(",
"n",
",",
"p",
",",
"sigma",
",",
"adj",
")"
] | [
8,
0
] | [
42,
42
] | python | en | ['en', 'en', 'en'] | True |
simulate_nuisance_and_easy_treatment | (n=1000, p=5, sigma=1.0, adj=0.) | Synthetic data with a difficult nuisance components and an easy treatment effect
From Setup A in Nie X. and Wager S. (2018) 'Quasi-Oracle Estimation of Heterogeneous Treatment Effects'
Args:
n (int, optional): number of observations
p (int optional): number of covariates (>=5)
sigma (float): standard deviation of the error term
adj (float): adjustment term for the distribution of propensity, e. Higher values shift the distribution to 0.
Returns:
(tuple): Synthetically generated samples with the following outputs:
- y ((n,)-array): outcome variable.
- X ((n,p)-ndarray): independent variables.
- w ((n,)-array): treatment flag with value 0 or 1.
- tau ((n,)-array): individual treatment effect.
- b ((n,)-array): expected outcome.
- e ((n,)-array): propensity of receiving treatment.
| Synthetic data with a difficult nuisance components and an easy treatment effect
From Setup A in Nie X. and Wager S. (2018) 'Quasi-Oracle Estimation of Heterogeneous Treatment Effects' | def simulate_nuisance_and_easy_treatment(n=1000, p=5, sigma=1.0, adj=0.):
''' Synthetic data with a difficult nuisance components and an easy treatment effect
From Setup A in Nie X. and Wager S. (2018) 'Quasi-Oracle Estimation of Heterogeneous Treatment Effects'
Args:
n (int, optional): number of observations
p (int optional): number of covariates (>=5)
sigma (float): standard deviation of the error term
adj (float): adjustment term for the distribution of propensity, e. Higher values shift the distribution to 0.
Returns:
(tuple): Synthetically generated samples with the following outputs:
- y ((n,)-array): outcome variable.
- X ((n,p)-ndarray): independent variables.
- w ((n,)-array): treatment flag with value 0 or 1.
- tau ((n,)-array): individual treatment effect.
- b ((n,)-array): expected outcome.
- e ((n,)-array): propensity of receiving treatment.
'''
X = np.random.uniform(size=n*p).reshape((n, -1))
b = np.sin(np.pi * X[:, 0] * X[:, 1]) + 2 * (X[:, 2] - 0.5) ** 2 + X[:, 3] + 0.5 * X[:, 4]
eta = 0.1
e = np.maximum(np.repeat(eta, n), np.minimum(np.sin(np.pi * X[:, 0] * X[:, 1]), np.repeat(1-eta, n)))
e = expit(logit(e) - adj)
tau = (X[:, 0] + X[:, 1]) / 2
w = np.random.binomial(1, e, size=n)
y = b + (w - 0.5) * tau + sigma * np.random.normal(size=n)
return y, X, w, tau, b, e | [
"def",
"simulate_nuisance_and_easy_treatment",
"(",
"n",
"=",
"1000",
",",
"p",
"=",
"5",
",",
"sigma",
"=",
"1.0",
",",
"adj",
"=",
"0.",
")",
":",
"X",
"=",
"np",
".",
"random",
".",
"uniform",
"(",
"size",
"=",
"n",
"*",
"p",
")",
".",
"reshape",
"(",
"(",
"n",
",",
"-",
"1",
")",
")",
"b",
"=",
"np",
".",
"sin",
"(",
"np",
".",
"pi",
"*",
"X",
"[",
":",
",",
"0",
"]",
"*",
"X",
"[",
":",
",",
"1",
"]",
")",
"+",
"2",
"*",
"(",
"X",
"[",
":",
",",
"2",
"]",
"-",
"0.5",
")",
"**",
"2",
"+",
"X",
"[",
":",
",",
"3",
"]",
"+",
"0.5",
"*",
"X",
"[",
":",
",",
"4",
"]",
"eta",
"=",
"0.1",
"e",
"=",
"np",
".",
"maximum",
"(",
"np",
".",
"repeat",
"(",
"eta",
",",
"n",
")",
",",
"np",
".",
"minimum",
"(",
"np",
".",
"sin",
"(",
"np",
".",
"pi",
"*",
"X",
"[",
":",
",",
"0",
"]",
"*",
"X",
"[",
":",
",",
"1",
"]",
")",
",",
"np",
".",
"repeat",
"(",
"1",
"-",
"eta",
",",
"n",
")",
")",
")",
"e",
"=",
"expit",
"(",
"logit",
"(",
"e",
")",
"-",
"adj",
")",
"tau",
"=",
"(",
"X",
"[",
":",
",",
"0",
"]",
"+",
"X",
"[",
":",
",",
"1",
"]",
")",
"/",
"2",
"w",
"=",
"np",
".",
"random",
".",
"binomial",
"(",
"1",
",",
"e",
",",
"size",
"=",
"n",
")",
"y",
"=",
"b",
"+",
"(",
"w",
"-",
"0.5",
")",
"*",
"tau",
"+",
"sigma",
"*",
"np",
".",
"random",
".",
"normal",
"(",
"size",
"=",
"n",
")",
"return",
"y",
",",
"X",
",",
"w",
",",
"tau",
",",
"b",
",",
"e"
] | [
45,
0
] | [
76,
29
] | python | en | ['en', 'en', 'en'] | True |
simulate_randomized_trial | (n=1000, p=5, sigma=1.0, adj=0.) | Synthetic data of a randomized trial
From Setup B in Nie X. and Wager S. (2018) 'Quasi-Oracle Estimation of Heterogeneous Treatment Effects'
Args:
n (int, optional): number of observations
p (int optional): number of covariates (>=5)
sigma (float): standard deviation of the error term
adj (float): no effect. added for consistency
Returns:
(tuple): Synthetically generated samples with the following outputs:
- y ((n,)-array): outcome variable.
- X ((n,p)-ndarray): independent variables.
- w ((n,)-array): treatment flag with value 0 or 1.
- tau ((n,)-array): individual treatment effect.
- b ((n,)-array): expected outcome.
- e ((n,)-array): propensity of receiving treatment.
| Synthetic data of a randomized trial
From Setup B in Nie X. and Wager S. (2018) 'Quasi-Oracle Estimation of Heterogeneous Treatment Effects' | def simulate_randomized_trial(n=1000, p=5, sigma=1.0, adj=0.):
''' Synthetic data of a randomized trial
From Setup B in Nie X. and Wager S. (2018) 'Quasi-Oracle Estimation of Heterogeneous Treatment Effects'
Args:
n (int, optional): number of observations
p (int optional): number of covariates (>=5)
sigma (float): standard deviation of the error term
adj (float): no effect. added for consistency
Returns:
(tuple): Synthetically generated samples with the following outputs:
- y ((n,)-array): outcome variable.
- X ((n,p)-ndarray): independent variables.
- w ((n,)-array): treatment flag with value 0 or 1.
- tau ((n,)-array): individual treatment effect.
- b ((n,)-array): expected outcome.
- e ((n,)-array): propensity of receiving treatment.
'''
X = np.random.normal(size=n*p).reshape((n, -1))
b = np.maximum(np.repeat(0.0, n), X[:, 0] + X[:, 1], X[:, 2]) + np.maximum(np.repeat(0.0, n), X[:, 3] + X[:, 4])
e = np.repeat(0.5, n)
tau = X[:, 0] + np.log1p(np.exp(X[:, 1]))
w = np.random.binomial(1, e, size=n)
y = b + (w - 0.5) * tau + sigma * np.random.normal(size=n)
return y, X, w, tau, b, e | [
"def",
"simulate_randomized_trial",
"(",
"n",
"=",
"1000",
",",
"p",
"=",
"5",
",",
"sigma",
"=",
"1.0",
",",
"adj",
"=",
"0.",
")",
":",
"X",
"=",
"np",
".",
"random",
".",
"normal",
"(",
"size",
"=",
"n",
"*",
"p",
")",
".",
"reshape",
"(",
"(",
"n",
",",
"-",
"1",
")",
")",
"b",
"=",
"np",
".",
"maximum",
"(",
"np",
".",
"repeat",
"(",
"0.0",
",",
"n",
")",
",",
"X",
"[",
":",
",",
"0",
"]",
"+",
"X",
"[",
":",
",",
"1",
"]",
",",
"X",
"[",
":",
",",
"2",
"]",
")",
"+",
"np",
".",
"maximum",
"(",
"np",
".",
"repeat",
"(",
"0.0",
",",
"n",
")",
",",
"X",
"[",
":",
",",
"3",
"]",
"+",
"X",
"[",
":",
",",
"4",
"]",
")",
"e",
"=",
"np",
".",
"repeat",
"(",
"0.5",
",",
"n",
")",
"tau",
"=",
"X",
"[",
":",
",",
"0",
"]",
"+",
"np",
".",
"log1p",
"(",
"np",
".",
"exp",
"(",
"X",
"[",
":",
",",
"1",
"]",
")",
")",
"w",
"=",
"np",
".",
"random",
".",
"binomial",
"(",
"1",
",",
"e",
",",
"size",
"=",
"n",
")",
"y",
"=",
"b",
"+",
"(",
"w",
"-",
"0.5",
")",
"*",
"tau",
"+",
"sigma",
"*",
"np",
".",
"random",
".",
"normal",
"(",
"size",
"=",
"n",
")",
"return",
"y",
",",
"X",
",",
"w",
",",
"tau",
",",
"b",
",",
"e"
] | [
79,
0
] | [
109,
29
] | python | en | ['en', 'en', 'en'] | True |
simulate_easy_propensity_difficult_baseline | (n=1000, p=5, sigma=1.0, adj=0.) | Synthetic data with easy propensity and a difficult baseline
From Setup C in Nie X. and Wager S. (2018) 'Quasi-Oracle Estimation of Heterogeneous Treatment Effects'
Args:
n (int, optional): number of observations
p (int optional): number of covariates (>=3)
sigma (float): standard deviation of the error term
adj (float): no effect. added for consistency
Returns:
(tuple): Synthetically generated samples with the following outputs:
- y ((n,)-array): outcome variable.
- X ((n,p)-ndarray): independent variables.
- w ((n,)-array): treatment flag with value 0 or 1.
- tau ((n,)-array): individual treatment effect.
- b ((n,)-array): expected outcome.
- e ((n,)-array): propensity of receiving treatment.
| Synthetic data with easy propensity and a difficult baseline
From Setup C in Nie X. and Wager S. (2018) 'Quasi-Oracle Estimation of Heterogeneous Treatment Effects' | def simulate_easy_propensity_difficult_baseline(n=1000, p=5, sigma=1.0, adj=0.):
''' Synthetic data with easy propensity and a difficult baseline
From Setup C in Nie X. and Wager S. (2018) 'Quasi-Oracle Estimation of Heterogeneous Treatment Effects'
Args:
n (int, optional): number of observations
p (int optional): number of covariates (>=3)
sigma (float): standard deviation of the error term
adj (float): no effect. added for consistency
Returns:
(tuple): Synthetically generated samples with the following outputs:
- y ((n,)-array): outcome variable.
- X ((n,p)-ndarray): independent variables.
- w ((n,)-array): treatment flag with value 0 or 1.
- tau ((n,)-array): individual treatment effect.
- b ((n,)-array): expected outcome.
- e ((n,)-array): propensity of receiving treatment.
'''
X = np.random.normal(size=n*p).reshape((n, -1))
b = 2 * np.log1p(np.exp(X[:, 0] + X[:, 1] + X[:, 2]))
e = 1/(1 + np.exp(X[:, 1] + X[:, 2]))
tau = np.repeat(1.0, n)
w = np.random.binomial(1, e, size=n)
y = b + (w - 0.5) * tau + sigma * np.random.normal(size=n)
return y, X, w, tau, b, e | [
"def",
"simulate_easy_propensity_difficult_baseline",
"(",
"n",
"=",
"1000",
",",
"p",
"=",
"5",
",",
"sigma",
"=",
"1.0",
",",
"adj",
"=",
"0.",
")",
":",
"X",
"=",
"np",
".",
"random",
".",
"normal",
"(",
"size",
"=",
"n",
"*",
"p",
")",
".",
"reshape",
"(",
"(",
"n",
",",
"-",
"1",
")",
")",
"b",
"=",
"2",
"*",
"np",
".",
"log1p",
"(",
"np",
".",
"exp",
"(",
"X",
"[",
":",
",",
"0",
"]",
"+",
"X",
"[",
":",
",",
"1",
"]",
"+",
"X",
"[",
":",
",",
"2",
"]",
")",
")",
"e",
"=",
"1",
"/",
"(",
"1",
"+",
"np",
".",
"exp",
"(",
"X",
"[",
":",
",",
"1",
"]",
"+",
"X",
"[",
":",
",",
"2",
"]",
")",
")",
"tau",
"=",
"np",
".",
"repeat",
"(",
"1.0",
",",
"n",
")",
"w",
"=",
"np",
".",
"random",
".",
"binomial",
"(",
"1",
",",
"e",
",",
"size",
"=",
"n",
")",
"y",
"=",
"b",
"+",
"(",
"w",
"-",
"0.5",
")",
"*",
"tau",
"+",
"sigma",
"*",
"np",
".",
"random",
".",
"normal",
"(",
"size",
"=",
"n",
")",
"return",
"y",
",",
"X",
",",
"w",
",",
"tau",
",",
"b",
",",
"e"
] | [
112,
0
] | [
141,
29
] | python | en | ['en', 'en', 'en'] | True |
simulate_unrelated_treatment_control | (n=1000, p=5, sigma=1.0, adj=0.) | Synthetic data with unrelated treatment and control groups.
From Setup D in Nie X. and Wager S. (2018) 'Quasi-Oracle Estimation of Heterogeneous Treatment Effects'
Args:
n (int, optional): number of observations
p (int optional): number of covariates (>=3)
sigma (float): standard deviation of the error term
adj (float): adjustment term for the distribution of propensity, e. Higher values shift the distribution to 0.
Returns:
(tuple): Synthetically generated samples with the following outputs:
- y ((n,)-array): outcome variable.
- X ((n,p)-ndarray): independent variables.
- w ((n,)-array): treatment flag with value 0 or 1.
- tau ((n,)-array): individual treatment effect.
- b ((n,)-array): expected outcome.
- e ((n,)-array): propensity of receiving treatment.
| Synthetic data with unrelated treatment and control groups.
From Setup D in Nie X. and Wager S. (2018) 'Quasi-Oracle Estimation of Heterogeneous Treatment Effects' | def simulate_unrelated_treatment_control(n=1000, p=5, sigma=1.0, adj=0.):
''' Synthetic data with unrelated treatment and control groups.
From Setup D in Nie X. and Wager S. (2018) 'Quasi-Oracle Estimation of Heterogeneous Treatment Effects'
Args:
n (int, optional): number of observations
p (int optional): number of covariates (>=3)
sigma (float): standard deviation of the error term
adj (float): adjustment term for the distribution of propensity, e. Higher values shift the distribution to 0.
Returns:
(tuple): Synthetically generated samples with the following outputs:
- y ((n,)-array): outcome variable.
- X ((n,p)-ndarray): independent variables.
- w ((n,)-array): treatment flag with value 0 or 1.
- tau ((n,)-array): individual treatment effect.
- b ((n,)-array): expected outcome.
- e ((n,)-array): propensity of receiving treatment.
'''
X = np.random.normal(size=n*p).reshape((n, -1))
b = (np.maximum(np.repeat(0.0, n), X[:, 0] + X[:, 1] + X[:, 2])
+ np.maximum(np.repeat(0.0, n), X[:, 3] + X[:, 4])) / 2
e = 1/(1 + np.exp(-X[:, 0]) + np.exp(-X[:, 1]))
e = expit(logit(e) - adj)
tau = np.maximum(np.repeat(0.0, n), X[:, 0] + X[:, 1] + X[:, 2]) - np.maximum(np.repeat(0.0, n), X[:, 3] + X[:, 4])
w = np.random.binomial(1, e, size=n)
y = b + (w - 0.5) * tau + sigma * np.random.normal(size=n)
return y, X, w, tau, b, e | [
"def",
"simulate_unrelated_treatment_control",
"(",
"n",
"=",
"1000",
",",
"p",
"=",
"5",
",",
"sigma",
"=",
"1.0",
",",
"adj",
"=",
"0.",
")",
":",
"X",
"=",
"np",
".",
"random",
".",
"normal",
"(",
"size",
"=",
"n",
"*",
"p",
")",
".",
"reshape",
"(",
"(",
"n",
",",
"-",
"1",
")",
")",
"b",
"=",
"(",
"np",
".",
"maximum",
"(",
"np",
".",
"repeat",
"(",
"0.0",
",",
"n",
")",
",",
"X",
"[",
":",
",",
"0",
"]",
"+",
"X",
"[",
":",
",",
"1",
"]",
"+",
"X",
"[",
":",
",",
"2",
"]",
")",
"+",
"np",
".",
"maximum",
"(",
"np",
".",
"repeat",
"(",
"0.0",
",",
"n",
")",
",",
"X",
"[",
":",
",",
"3",
"]",
"+",
"X",
"[",
":",
",",
"4",
"]",
")",
")",
"/",
"2",
"e",
"=",
"1",
"/",
"(",
"1",
"+",
"np",
".",
"exp",
"(",
"-",
"X",
"[",
":",
",",
"0",
"]",
")",
"+",
"np",
".",
"exp",
"(",
"-",
"X",
"[",
":",
",",
"1",
"]",
")",
")",
"e",
"=",
"expit",
"(",
"logit",
"(",
"e",
")",
"-",
"adj",
")",
"tau",
"=",
"np",
".",
"maximum",
"(",
"np",
".",
"repeat",
"(",
"0.0",
",",
"n",
")",
",",
"X",
"[",
":",
",",
"0",
"]",
"+",
"X",
"[",
":",
",",
"1",
"]",
"+",
"X",
"[",
":",
",",
"2",
"]",
")",
"-",
"np",
".",
"maximum",
"(",
"np",
".",
"repeat",
"(",
"0.0",
",",
"n",
")",
",",
"X",
"[",
":",
",",
"3",
"]",
"+",
"X",
"[",
":",
",",
"4",
"]",
")",
"w",
"=",
"np",
".",
"random",
".",
"binomial",
"(",
"1",
",",
"e",
",",
"size",
"=",
"n",
")",
"y",
"=",
"b",
"+",
"(",
"w",
"-",
"0.5",
")",
"*",
"tau",
"+",
"sigma",
"*",
"np",
".",
"random",
".",
"normal",
"(",
"size",
"=",
"n",
")",
"return",
"y",
",",
"X",
",",
"w",
",",
"tau",
",",
"b",
",",
"e"
] | [
144,
0
] | [
175,
29
] | python | en | ['en', 'en', 'en'] | True |
simulate_hidden_confounder | (n=10000, p=5, sigma=1.0, adj=0.) | Synthetic dataset with a hidden confounder biasing treatment.
From Louizos et al. (2018) "Causal Effect Inference with Deep Latent-Variable Models"
Args:
n (int, optional): number of observations
p (int optional): number of covariates (>=3)
sigma (float): standard deviation of the error term
adj (float): no effect. added for consistency
Returns:
(tuple): Synthetically generated samples with the following outputs:
- y ((n,)-array): outcome variable.
- X ((n,p)-ndarray): independent variables.
- w ((n,)-array): treatment flag with value 0 or 1.
- tau ((n,)-array): individual treatment effect.
- b ((n,)-array): expected outcome.
- e ((n,)-array): propensity of receiving treatment.
| Synthetic dataset with a hidden confounder biasing treatment.
From Louizos et al. (2018) "Causal Effect Inference with Deep Latent-Variable Models" | def simulate_hidden_confounder(n=10000, p=5, sigma=1.0, adj=0.):
''' Synthetic dataset with a hidden confounder biasing treatment.
From Louizos et al. (2018) "Causal Effect Inference with Deep Latent-Variable Models"
Args:
n (int, optional): number of observations
p (int optional): number of covariates (>=3)
sigma (float): standard deviation of the error term
adj (float): no effect. added for consistency
Returns:
(tuple): Synthetically generated samples with the following outputs:
- y ((n,)-array): outcome variable.
- X ((n,p)-ndarray): independent variables.
- w ((n,)-array): treatment flag with value 0 or 1.
- tau ((n,)-array): individual treatment effect.
- b ((n,)-array): expected outcome.
- e ((n,)-array): propensity of receiving treatment.
'''
z = np.random.binomial(1, 0.5, size=n).astype(np.double)
X = np.random.normal(z, 5 * z + 3 * (1 - z), size=(p, n)).T
e = 0.75 * z + 0.25 * (1 - z)
w = np.random.binomial(1, e)
b = expit(3 * (z + 2 * (2 * w - 2)))
y = np.random.binomial(1, b)
# Compute true ite tau for evaluation (via Monte Carlo approximation).
t0_t1 = np.array([[0.], [1.]])
y_t0, y_t1 = expit(3 * (z + 2 * (2 * t0_t1 - 2)))
tau = y_t1 - y_t0
return y, X, w, tau, b, e | [
"def",
"simulate_hidden_confounder",
"(",
"n",
"=",
"10000",
",",
"p",
"=",
"5",
",",
"sigma",
"=",
"1.0",
",",
"adj",
"=",
"0.",
")",
":",
"z",
"=",
"np",
".",
"random",
".",
"binomial",
"(",
"1",
",",
"0.5",
",",
"size",
"=",
"n",
")",
".",
"astype",
"(",
"np",
".",
"double",
")",
"X",
"=",
"np",
".",
"random",
".",
"normal",
"(",
"z",
",",
"5",
"*",
"z",
"+",
"3",
"*",
"(",
"1",
"-",
"z",
")",
",",
"size",
"=",
"(",
"p",
",",
"n",
")",
")",
".",
"T",
"e",
"=",
"0.75",
"*",
"z",
"+",
"0.25",
"*",
"(",
"1",
"-",
"z",
")",
"w",
"=",
"np",
".",
"random",
".",
"binomial",
"(",
"1",
",",
"e",
")",
"b",
"=",
"expit",
"(",
"3",
"*",
"(",
"z",
"+",
"2",
"*",
"(",
"2",
"*",
"w",
"-",
"2",
")",
")",
")",
"y",
"=",
"np",
".",
"random",
".",
"binomial",
"(",
"1",
",",
"b",
")",
"# Compute true ite tau for evaluation (via Monte Carlo approximation).",
"t0_t1",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"0.",
"]",
",",
"[",
"1.",
"]",
"]",
")",
"y_t0",
",",
"y_t1",
"=",
"expit",
"(",
"3",
"*",
"(",
"z",
"+",
"2",
"*",
"(",
"2",
"*",
"t0_t1",
"-",
"2",
")",
")",
")",
"tau",
"=",
"y_t1",
"-",
"y_t0",
"return",
"y",
",",
"X",
",",
"w",
",",
"tau",
",",
"b",
",",
"e"
] | [
178,
0
] | [
209,
29
] | python | en | ['en', 'en', 'en'] | True |
test_cli_datasource_list | (caplog, empty_data_context, filesystem_csv_2) | Test an empty project and after adding a single datasource. | Test an empty project and after adding a single datasource. | def test_cli_datasource_list(caplog, empty_data_context, filesystem_csv_2):
"""Test an empty project and after adding a single datasource."""
project_root_dir = empty_data_context.root_directory
context = DataContext(project_root_dir)
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli, ["datasource", "list", "-d", project_root_dir], catch_exceptions=False
)
stdout = result.stdout.strip()
assert "No Datasources found" in stdout
assert context.list_datasources() == []
base_directory = str(filesystem_csv_2)
context.add_datasource(
"wow_a_datasource",
module_name="great_expectations.datasource",
class_name="PandasDatasource",
batch_kwargs_generators={
"subdir_reader": {
"class_name": "SubdirReaderBatchKwargsGenerator",
"base_directory": base_directory,
}
},
)
datasources = context.list_datasources()
assert datasources == [
{
"name": "wow_a_datasource",
"class_name": "PandasDatasource",
"data_asset_type": {
"class_name": "PandasDataset",
"module_name": "great_expectations.dataset",
},
"batch_kwargs_generators": {
"subdir_reader": {
"base_directory": base_directory,
"class_name": "SubdirReaderBatchKwargsGenerator",
}
},
"module_name": "great_expectations.datasource",
}
]
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli, ["datasource", "list", "-d", project_root_dir], catch_exceptions=False
)
expected_output = """
1 Datasource found:[0m
[0m
- [36mname:[0m wow_a_datasource[0m
[36mmodule_name:[0m great_expectations.datasource[0m
[36mclass_name:[0m PandasDatasource[0m
[36mbatch_kwargs_generators:[0m[0m
[36msubdir_reader:[0m[0m
[36mclass_name:[0m SubdirReaderBatchKwargsGenerator[0m
[36mbase_directory:[0m {}[0m
[36mdata_asset_type:[0m[0m
[36mmodule_name:[0m great_expectations.dataset[0m
[36mclass_name:[0m PandasDataset[0m""".format(
base_directory
).strip()
stdout = result.stdout.strip()
assert stdout == expected_output
assert_no_logging_messages_or_tracebacks(caplog, result) | [
"def",
"test_cli_datasource_list",
"(",
"caplog",
",",
"empty_data_context",
",",
"filesystem_csv_2",
")",
":",
"project_root_dir",
"=",
"empty_data_context",
".",
"root_directory",
"context",
"=",
"DataContext",
"(",
"project_root_dir",
")",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"datasource\"",
",",
"\"list\"",
",",
"\"-d\"",
",",
"project_root_dir",
"]",
",",
"catch_exceptions",
"=",
"False",
")",
"stdout",
"=",
"result",
".",
"stdout",
".",
"strip",
"(",
")",
"assert",
"\"No Datasources found\"",
"in",
"stdout",
"assert",
"context",
".",
"list_datasources",
"(",
")",
"==",
"[",
"]",
"base_directory",
"=",
"str",
"(",
"filesystem_csv_2",
")",
"context",
".",
"add_datasource",
"(",
"\"wow_a_datasource\"",
",",
"module_name",
"=",
"\"great_expectations.datasource\"",
",",
"class_name",
"=",
"\"PandasDatasource\"",
",",
"batch_kwargs_generators",
"=",
"{",
"\"subdir_reader\"",
":",
"{",
"\"class_name\"",
":",
"\"SubdirReaderBatchKwargsGenerator\"",
",",
"\"base_directory\"",
":",
"base_directory",
",",
"}",
"}",
",",
")",
"datasources",
"=",
"context",
".",
"list_datasources",
"(",
")",
"assert",
"datasources",
"==",
"[",
"{",
"\"name\"",
":",
"\"wow_a_datasource\"",
",",
"\"class_name\"",
":",
"\"PandasDatasource\"",
",",
"\"data_asset_type\"",
":",
"{",
"\"class_name\"",
":",
"\"PandasDataset\"",
",",
"\"module_name\"",
":",
"\"great_expectations.dataset\"",
",",
"}",
",",
"\"batch_kwargs_generators\"",
":",
"{",
"\"subdir_reader\"",
":",
"{",
"\"base_directory\"",
":",
"base_directory",
",",
"\"class_name\"",
":",
"\"SubdirReaderBatchKwargsGenerator\"",
",",
"}",
"}",
",",
"\"module_name\"",
":",
"\"great_expectations.datasource\"",
",",
"}",
"]",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"datasource\"",
",",
"\"list\"",
",",
"\"-d\"",
",",
"project_root_dir",
"]",
",",
"catch_exceptions",
"=",
"False",
")",
"expected_output",
"=",
"\"\"\"\n1 Datasource found:\u001b[0m\n\u001b[0m\n - \u001b[36mname:\u001b[0m wow_a_datasource\u001b[0m\n \u001b[36mmodule_name:\u001b[0m great_expectations.datasource\u001b[0m\n \u001b[36mclass_name:\u001b[0m PandasDatasource\u001b[0m\n \u001b[36mbatch_kwargs_generators:\u001b[0m\u001b[0m\n \u001b[36msubdir_reader:\u001b[0m\u001b[0m\n \u001b[36mclass_name:\u001b[0m SubdirReaderBatchKwargsGenerator\u001b[0m\n \u001b[36mbase_directory:\u001b[0m {}\u001b[0m\n \u001b[36mdata_asset_type:\u001b[0m\u001b[0m\n \u001b[36mmodule_name:\u001b[0m great_expectations.dataset\u001b[0m\n \u001b[36mclass_name:\u001b[0m PandasDataset\u001b[0m\"\"\"",
".",
"format",
"(",
"base_directory",
")",
".",
"strip",
"(",
")",
"stdout",
"=",
"result",
".",
"stdout",
".",
"strip",
"(",
")",
"assert",
"stdout",
"==",
"expected_output",
"assert_no_logging_messages_or_tracebacks",
"(",
"caplog",
",",
"result",
")"
] | [
14,
0
] | [
83,
60
] | python | en | ['en', 'en', 'en'] | True |
TesteAvaliacaoUpdateView.form_valid | (self, form, **kwargs) |
Preenchimentos dos campos após o utilizador guardar o formulário
|
Preenchimentos dos campos após o utilizador guardar o formulário
| def form_valid(self, form, **kwargs):
"""
Preenchimentos dos campos após o utilizador guardar o formulário
"""
form.instance.utilizador = self.request.user
form.instance.realizado = True
return super().form_valid(form) | [
"def",
"form_valid",
"(",
"self",
",",
"form",
",",
"*",
"*",
"kwargs",
")",
":",
"form",
".",
"instance",
".",
"utilizador",
"=",
"self",
".",
"request",
".",
"user",
"form",
".",
"instance",
".",
"realizado",
"=",
"True",
"return",
"super",
"(",
")",
".",
"form_valid",
"(",
"form",
")"
] | [
326,
4
] | [
333,
39
] | python | en | ['en', 'ja', 'th'] | False |
_run_on_dask | (jobs, verbose) | Run the tasks in parallel using dask. | Run the tasks in parallel using dask. | def _run_on_dask(jobs, verbose):
"""Run the tasks in parallel using dask."""
try:
import dask
except ImportError as ie:
ie.msg += (
'\n\nIt seems like `dask` is not installed.\n'
'Please install `dask` and `distributed` using:\n'
'\n pip install dask distributed'
)
raise
scorer = dask.delayed(_run_job)
persisted = dask.persist(*[scorer(args) for args in jobs])
if verbose:
try:
progress(persisted)
except ValueError:
pass
return dask.compute(*persisted) | [
"def",
"_run_on_dask",
"(",
"jobs",
",",
"verbose",
")",
":",
"try",
":",
"import",
"dask",
"except",
"ImportError",
"as",
"ie",
":",
"ie",
".",
"msg",
"+=",
"(",
"'\\n\\nIt seems like `dask` is not installed.\\n'",
"'Please install `dask` and `distributed` using:\\n'",
"'\\n pip install dask distributed'",
")",
"raise",
"scorer",
"=",
"dask",
".",
"delayed",
"(",
"_run_job",
")",
"persisted",
"=",
"dask",
".",
"persist",
"(",
"*",
"[",
"scorer",
"(",
"args",
")",
"for",
"args",
"in",
"jobs",
"]",
")",
"if",
"verbose",
":",
"try",
":",
"progress",
"(",
"persisted",
")",
"except",
"ValueError",
":",
"pass",
"return",
"dask",
".",
"compute",
"(",
"*",
"persisted",
")"
] | [
216,
0
] | [
236,
35
] | python | en | ['en', 'en', 'en'] | True |
benchmark | (pipelines=None, datasets=None, hyperparameters=None, metrics=METRICS, rank='f1',
test_split=False, detrend=False, iterations=1, workers=1, show_progress=False,
cache_dir=None, output_path=None, pipeline_dir=None) | Run pipelines on the given datasets and evaluate the performance.
The pipelines are used to analyze the given signals and later on the
detected anomalies are scored against the known anomalies using the
indicated metrics.
Finally, the scores obtained with each metric are averaged accross all the signals,
ranked by the indicated metric and returned on a ``pandas.DataFrame``.
Args:
pipelines (dict or list): dictionary with pipeline names as keys and their
JSON paths as values. If a list is given, it should be of JSON paths,
and the paths themselves will be used as names. If not give, all verified
pipelines will be used for evaluation.
datasets (dict or list): dictionary of dataset name as keys and list of signals as
values. If a list is given then it will be under a generic name ``dataset``.
If not given, all benchmark datasets will be used used.
hyperparameters (dict or list): dictionary with pipeline names as keys
and their hyperparameter JSON paths or dictionaries as values. If a list is
given, it should be of corresponding order to pipelines.
metrics (dict or list): dictionary with metric names as keys and
scoring functions as values. If a list is given, it should be of scoring
functions, and they ``__name__`` value will be used as the metric name.
If not given, all the available metrics will be used.
rank (str): Sort and rank the pipelines based on the given metric.
If not given, rank using the first metric.
test_split (bool or float): Whether to use the prespecified train-test split. If
float, then it should be between 0.0 and 1.0 and represent the proportion of
the signal to include in the test split. If not given, use ``False``.
detrend (bool): Whether to use ``scipy.detrend``. If not given, use ``False``.
iterations (int):
Number of iterations to perform over each signal and pipeline. Defaults to 1.
workers (int or str):
If ``workers`` is given as an integer value other than 0 or 1, a multiprocessing
Pool is used to distribute the computation across the indicated number of workers.
If the string ``dask`` is given, the computation is distributed using ``dask``.
In this case, setting up the ``dask`` cluster and client is expected to be handled
outside of this function.
show_progress (bool):
Whether to use tqdm to keep track of the progress. Defaults to ``True``.
cache_dir (str):
If a ``cache_dir`` is given, intermediate results are stored in the indicated directory
as CSV files as they get computted. This allows inspecting results while the benchmark
is still running and also recovering results in case the process does not finish
properly. Defaults to ``None``.
output_path (str): Location to save the intermediatry results. If not given,
intermediatry results will not be saved.
pipeline_dir (str):
If a ``pipeline_dir`` is given, pipelines will get dumped in the specificed directory
as pickle files. Defaults to ``None``.
Returns:
pandas.DataFrame:
A table containing the scores obtained with each scoring function accross
all the signals for each pipeline.
| Run pipelines on the given datasets and evaluate the performance. | def benchmark(pipelines=None, datasets=None, hyperparameters=None, metrics=METRICS, rank='f1',
test_split=False, detrend=False, iterations=1, workers=1, show_progress=False,
cache_dir=None, output_path=None, pipeline_dir=None):
"""Run pipelines on the given datasets and evaluate the performance.
The pipelines are used to analyze the given signals and later on the
detected anomalies are scored against the known anomalies using the
indicated metrics.
Finally, the scores obtained with each metric are averaged accross all the signals,
ranked by the indicated metric and returned on a ``pandas.DataFrame``.
Args:
pipelines (dict or list): dictionary with pipeline names as keys and their
JSON paths as values. If a list is given, it should be of JSON paths,
and the paths themselves will be used as names. If not give, all verified
pipelines will be used for evaluation.
datasets (dict or list): dictionary of dataset name as keys and list of signals as
values. If a list is given then it will be under a generic name ``dataset``.
If not given, all benchmark datasets will be used used.
hyperparameters (dict or list): dictionary with pipeline names as keys
and their hyperparameter JSON paths or dictionaries as values. If a list is
given, it should be of corresponding order to pipelines.
metrics (dict or list): dictionary with metric names as keys and
scoring functions as values. If a list is given, it should be of scoring
functions, and they ``__name__`` value will be used as the metric name.
If not given, all the available metrics will be used.
rank (str): Sort and rank the pipelines based on the given metric.
If not given, rank using the first metric.
test_split (bool or float): Whether to use the prespecified train-test split. If
float, then it should be between 0.0 and 1.0 and represent the proportion of
the signal to include in the test split. If not given, use ``False``.
detrend (bool): Whether to use ``scipy.detrend``. If not given, use ``False``.
iterations (int):
Number of iterations to perform over each signal and pipeline. Defaults to 1.
workers (int or str):
If ``workers`` is given as an integer value other than 0 or 1, a multiprocessing
Pool is used to distribute the computation across the indicated number of workers.
If the string ``dask`` is given, the computation is distributed using ``dask``.
In this case, setting up the ``dask`` cluster and client is expected to be handled
outside of this function.
show_progress (bool):
Whether to use tqdm to keep track of the progress. Defaults to ``True``.
cache_dir (str):
If a ``cache_dir`` is given, intermediate results are stored in the indicated directory
as CSV files as they get computted. This allows inspecting results while the benchmark
is still running and also recovering results in case the process does not finish
properly. Defaults to ``None``.
output_path (str): Location to save the intermediatry results. If not given,
intermediatry results will not be saved.
pipeline_dir (str):
If a ``pipeline_dir`` is given, pipelines will get dumped in the specificed directory
as pickle files. Defaults to ``None``.
Returns:
pandas.DataFrame:
A table containing the scores obtained with each scoring function accross
all the signals for each pipeline.
"""
pipelines = pipelines or VERIFIED_PIPELINES
datasets = datasets or BENCHMARK_DATA
run_id = os.getenv('RUN_ID') or str(uuid.uuid4())[:10]
if isinstance(pipelines, list):
pipelines = {pipeline: pipeline for pipeline in pipelines}
if isinstance(datasets, list):
datasets = {'dataset': datasets}
if isinstance(hyperparameters, list):
hyperparameters = {pipeline: hyperparameter for pipeline, hyperparameter in
zip(pipelines.keys(), hyperparameters)}
if isinstance(metrics, list):
metrics_ = dict()
for metric in metrics:
if callable(metric):
metrics_[metric.__name__] = metric
elif metric in METRICS:
metrics_[metric] = METRICS[metric]
else:
raise ValueError('Unknown metric: {}'.format(metric))
metrics = metrics_
if cache_dir:
cache_dir = Path(cache_dir)
os.makedirs(cache_dir, exist_ok=True)
if pipeline_dir:
pipeline_dir = Path(pipeline_dir)
os.makedirs(pipeline_dir, exist_ok=True)
jobs = list()
for dataset, signals in datasets.items():
for pipeline_name, pipeline in pipelines.items():
hyperparameter = _get_pipeline_hyperparameter(hyperparameters, dataset, pipeline_name)
parameters = BENCHMARK_PARAMS.get(dataset)
if parameters is not None:
detrend, test_split = parameters.values()
for signal in signals:
for iteration in range(iterations):
args = (
pipeline,
pipeline_name,
dataset,
signal,
hyperparameter,
metrics,
test_split,
detrend,
iteration,
cache_dir,
pipeline_dir,
run_id,
)
jobs.append(args)
if workers == 'dask':
scores = _run_on_dask(jobs, show_progress)
else:
if workers in (0, 1):
scores = map(_run_job, jobs)
else:
pool = concurrent.futures.ProcessPoolExecutor(workers)
scores = pool.map(_run_job, jobs)
scores = tqdm.tqdm(scores, total=len(jobs), file=TqdmLogger())
if show_progress:
scores = tqdm.tqdm(scores, total=len(jobs))
scores = pd.concat(scores)
if output_path:
LOGGER.info('Saving benchmark report to %s', output_path)
scores.to_csv(output_path, index=False)
return _sort_leaderboard(scores, rank, metrics) | [
"def",
"benchmark",
"(",
"pipelines",
"=",
"None",
",",
"datasets",
"=",
"None",
",",
"hyperparameters",
"=",
"None",
",",
"metrics",
"=",
"METRICS",
",",
"rank",
"=",
"'f1'",
",",
"test_split",
"=",
"False",
",",
"detrend",
"=",
"False",
",",
"iterations",
"=",
"1",
",",
"workers",
"=",
"1",
",",
"show_progress",
"=",
"False",
",",
"cache_dir",
"=",
"None",
",",
"output_path",
"=",
"None",
",",
"pipeline_dir",
"=",
"None",
")",
":",
"pipelines",
"=",
"pipelines",
"or",
"VERIFIED_PIPELINES",
"datasets",
"=",
"datasets",
"or",
"BENCHMARK_DATA",
"run_id",
"=",
"os",
".",
"getenv",
"(",
"'RUN_ID'",
")",
"or",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
"[",
":",
"10",
"]",
"if",
"isinstance",
"(",
"pipelines",
",",
"list",
")",
":",
"pipelines",
"=",
"{",
"pipeline",
":",
"pipeline",
"for",
"pipeline",
"in",
"pipelines",
"}",
"if",
"isinstance",
"(",
"datasets",
",",
"list",
")",
":",
"datasets",
"=",
"{",
"'dataset'",
":",
"datasets",
"}",
"if",
"isinstance",
"(",
"hyperparameters",
",",
"list",
")",
":",
"hyperparameters",
"=",
"{",
"pipeline",
":",
"hyperparameter",
"for",
"pipeline",
",",
"hyperparameter",
"in",
"zip",
"(",
"pipelines",
".",
"keys",
"(",
")",
",",
"hyperparameters",
")",
"}",
"if",
"isinstance",
"(",
"metrics",
",",
"list",
")",
":",
"metrics_",
"=",
"dict",
"(",
")",
"for",
"metric",
"in",
"metrics",
":",
"if",
"callable",
"(",
"metric",
")",
":",
"metrics_",
"[",
"metric",
".",
"__name__",
"]",
"=",
"metric",
"elif",
"metric",
"in",
"METRICS",
":",
"metrics_",
"[",
"metric",
"]",
"=",
"METRICS",
"[",
"metric",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"'Unknown metric: {}'",
".",
"format",
"(",
"metric",
")",
")",
"metrics",
"=",
"metrics_",
"if",
"cache_dir",
":",
"cache_dir",
"=",
"Path",
"(",
"cache_dir",
")",
"os",
".",
"makedirs",
"(",
"cache_dir",
",",
"exist_ok",
"=",
"True",
")",
"if",
"pipeline_dir",
":",
"pipeline_dir",
"=",
"Path",
"(",
"pipeline_dir",
")",
"os",
".",
"makedirs",
"(",
"pipeline_dir",
",",
"exist_ok",
"=",
"True",
")",
"jobs",
"=",
"list",
"(",
")",
"for",
"dataset",
",",
"signals",
"in",
"datasets",
".",
"items",
"(",
")",
":",
"for",
"pipeline_name",
",",
"pipeline",
"in",
"pipelines",
".",
"items",
"(",
")",
":",
"hyperparameter",
"=",
"_get_pipeline_hyperparameter",
"(",
"hyperparameters",
",",
"dataset",
",",
"pipeline_name",
")",
"parameters",
"=",
"BENCHMARK_PARAMS",
".",
"get",
"(",
"dataset",
")",
"if",
"parameters",
"is",
"not",
"None",
":",
"detrend",
",",
"test_split",
"=",
"parameters",
".",
"values",
"(",
")",
"for",
"signal",
"in",
"signals",
":",
"for",
"iteration",
"in",
"range",
"(",
"iterations",
")",
":",
"args",
"=",
"(",
"pipeline",
",",
"pipeline_name",
",",
"dataset",
",",
"signal",
",",
"hyperparameter",
",",
"metrics",
",",
"test_split",
",",
"detrend",
",",
"iteration",
",",
"cache_dir",
",",
"pipeline_dir",
",",
"run_id",
",",
")",
"jobs",
".",
"append",
"(",
"args",
")",
"if",
"workers",
"==",
"'dask'",
":",
"scores",
"=",
"_run_on_dask",
"(",
"jobs",
",",
"show_progress",
")",
"else",
":",
"if",
"workers",
"in",
"(",
"0",
",",
"1",
")",
":",
"scores",
"=",
"map",
"(",
"_run_job",
",",
"jobs",
")",
"else",
":",
"pool",
"=",
"concurrent",
".",
"futures",
".",
"ProcessPoolExecutor",
"(",
"workers",
")",
"scores",
"=",
"pool",
".",
"map",
"(",
"_run_job",
",",
"jobs",
")",
"scores",
"=",
"tqdm",
".",
"tqdm",
"(",
"scores",
",",
"total",
"=",
"len",
"(",
"jobs",
")",
",",
"file",
"=",
"TqdmLogger",
"(",
")",
")",
"if",
"show_progress",
":",
"scores",
"=",
"tqdm",
".",
"tqdm",
"(",
"scores",
",",
"total",
"=",
"len",
"(",
"jobs",
")",
")",
"scores",
"=",
"pd",
".",
"concat",
"(",
"scores",
")",
"if",
"output_path",
":",
"LOGGER",
".",
"info",
"(",
"'Saving benchmark report to %s'",
",",
"output_path",
")",
"scores",
".",
"to_csv",
"(",
"output_path",
",",
"index",
"=",
"False",
")",
"return",
"_sort_leaderboard",
"(",
"scores",
",",
"rank",
",",
"metrics",
")"
] | [
239,
0
] | [
375,
51
] | python | en | ['en', 'en', 'en'] | True |
git_add | (filename: str) |
Use `git add` to stage a single file.
|
Use `git add` to stage a single file.
| def git_add(filename: str) -> None:
"""
Use `git add` to stage a single file.
"""
run(['git', 'add', '--', filename]) | [
"def",
"git_add",
"(",
"filename",
":",
"str",
")",
"->",
"None",
":",
"run",
"(",
"[",
"'git'",
",",
"'add'",
",",
"'--'",
",",
"filename",
"]",
")"
] | [
20,
0
] | [
25,
39
] | python | en | ['en', 'error', 'th'] | False |
git_check_clean | (allow_staged: bool = False) |
Use `git status --porcelain` to check if the working tree is dirty.
If allow_staged is True, allow staged files, but no unstaged changes.
|
Use `git status --porcelain` to check if the working tree is dirty.
If allow_staged is True, allow staged files, but no unstaged changes.
| def git_check_clean(allow_staged: bool = False) -> None:
"""
Use `git status --porcelain` to check if the working tree is dirty.
If allow_staged is True, allow staged files, but no unstaged changes.
"""
out = run_capture(['git', 'status', '--porcelain'])
if out:
# Can we allow staged changes?
if not allow_staged:
# Nope. ANY changes are unacceptable, so we can short-circuit
# here.
raise Exception(out)
# If here, staged changes are OK, and unstaged changes are not.
# In the porcelain output, staged changes start with a change
# character followed by a space, and unstaged changes start with a
# space followed by a change character. So any lines with a non-space
# in the second column are a problem here.
lines = out.split('\n')
problems = [line for line in lines if line[1] != ' ']
if problems:
raise Exception("\n".join(problems)) | [
"def",
"git_check_clean",
"(",
"allow_staged",
":",
"bool",
"=",
"False",
")",
"->",
"None",
":",
"out",
"=",
"run_capture",
"(",
"[",
"'git'",
",",
"'status'",
",",
"'--porcelain'",
"]",
")",
"if",
"out",
":",
"# Can we allow staged changes?",
"if",
"not",
"allow_staged",
":",
"# Nope. ANY changes are unacceptable, so we can short-circuit",
"# here.",
"raise",
"Exception",
"(",
"out",
")",
"# If here, staged changes are OK, and unstaged changes are not.",
"# In the porcelain output, staged changes start with a change",
"# character followed by a space, and unstaged changes start with a",
"# space followed by a change character. So any lines with a non-space",
"# in the second column are a problem here.",
"lines",
"=",
"out",
".",
"split",
"(",
"'\\n'",
")",
"problems",
"=",
"[",
"line",
"for",
"line",
"in",
"lines",
"if",
"line",
"[",
"1",
"]",
"!=",
"' '",
"]",
"if",
"problems",
":",
"raise",
"Exception",
"(",
"\"\\n\"",
".",
"join",
"(",
"problems",
")",
")"
] | [
28,
0
] | [
52,
48
] | python | en | ['en', 'error', 'th'] | False |
create_proxy_zip | (proxy_string, proxy_user, proxy_pass) | Implementation of https://stackoverflow.com/a/35293284 for
https://stackoverflow.com/questions/12848327/
(Run Selenium on a proxy server that requires authentication.)
Solution involves creating & adding a Chrome extension on the fly.
* CHROME-ONLY for now! *
| Implementation of https://stackoverflow.com/a/35293284 for
https://stackoverflow.com/questions/12848327/
(Run Selenium on a proxy server that requires authentication.)
Solution involves creating & adding a Chrome extension on the fly.
* CHROME-ONLY for now! *
| def create_proxy_zip(proxy_string, proxy_user, proxy_pass):
""" Implementation of https://stackoverflow.com/a/35293284 for
https://stackoverflow.com/questions/12848327/
(Run Selenium on a proxy server that requires authentication.)
Solution involves creating & adding a Chrome extension on the fly.
* CHROME-ONLY for now! *
"""
proxy_host = proxy_string.split(':')[0]
proxy_port = proxy_string.split(':')[1]
background_js = (
"""var config = {\n"""
""" mode: "fixed_servers",\n"""
""" rules: {\n"""
""" singleProxy: {\n"""
""" scheme: "http",\n"""
""" host: "%s",\n"""
""" port: parseInt("%s")\n"""
""" },\n"""
""" }\n"""
""" };\n"""
"""chrome.proxy.settings.set("""
"""{value: config, scope: "regular"}, function() {"""
"""});\n"""
"""function callbackFn(details) {\n"""
""" return {\n"""
""" authCredentials: {\n"""
""" username: "%s",\n"""
""" password: "%s"\n"""
""" }\n"""
""" };\n"""
"""}\n"""
"""chrome.webRequest.onAuthRequired.addListener(\n"""
""" callbackFn,\n"""
""" {urls: ["<all_urls>"]},\n"""
""" ['blocking']\n"""
""");""" % (proxy_host, proxy_port, proxy_user, proxy_pass))
manifest_json = (
'''{\n'''
'''"version": "1.0.0",\n'''
'''"manifest_version": 2,\n'''
'''"name": "Chrome Proxy",\n'''
'''"permissions": [\n'''
''' "proxy",\n'''
''' "tabs",\n'''
''' "unlimitedStorage",\n'''
''' "storage",\n'''
''' "<all_urls>",\n'''
''' "webRequest",\n'''
''' "webRequestBlocking"\n'''
'''],\n'''
'''"background": {\n'''
''' "scripts": ["background.js"]\n'''
'''},\n'''
'''"minimum_chrome_version":"22.0.0"\n'''
'''}''')
lock = threading.RLock() # Support multi-threaded test runs with Pytest
with lock:
try:
zf = zipfile.ZipFile(PROXY_ZIP_PATH, mode='w')
except IOError:
# Handle "Permission denied" on the default proxy.zip path
abs_path = os.path.abspath('.')
downloads_path = os.path.join(abs_path, DOWNLOADS_DIR)
if not os.path.exists(downloads_path):
os.mkdir(downloads_path)
zf = zipfile.ZipFile(PROXY_ZIP_PATH_2, mode='w')
zf.writestr("background.js", background_js)
zf.writestr("manifest.json", manifest_json)
zf.close() | [
"def",
"create_proxy_zip",
"(",
"proxy_string",
",",
"proxy_user",
",",
"proxy_pass",
")",
":",
"proxy_host",
"=",
"proxy_string",
".",
"split",
"(",
"':'",
")",
"[",
"0",
"]",
"proxy_port",
"=",
"proxy_string",
".",
"split",
"(",
"':'",
")",
"[",
"1",
"]",
"background_js",
"=",
"(",
"\"\"\"var config = {\\n\"\"\"",
"\"\"\" mode: \"fixed_servers\",\\n\"\"\"",
"\"\"\" rules: {\\n\"\"\"",
"\"\"\" singleProxy: {\\n\"\"\"",
"\"\"\" scheme: \"http\",\\n\"\"\"",
"\"\"\" host: \"%s\",\\n\"\"\"",
"\"\"\" port: parseInt(\"%s\")\\n\"\"\"",
"\"\"\" },\\n\"\"\"",
"\"\"\" }\\n\"\"\"",
"\"\"\" };\\n\"\"\"",
"\"\"\"chrome.proxy.settings.set(\"\"\"",
"\"\"\"{value: config, scope: \"regular\"}, function() {\"\"\"",
"\"\"\"});\\n\"\"\"",
"\"\"\"function callbackFn(details) {\\n\"\"\"",
"\"\"\" return {\\n\"\"\"",
"\"\"\" authCredentials: {\\n\"\"\"",
"\"\"\" username: \"%s\",\\n\"\"\"",
"\"\"\" password: \"%s\"\\n\"\"\"",
"\"\"\" }\\n\"\"\"",
"\"\"\" };\\n\"\"\"",
"\"\"\"}\\n\"\"\"",
"\"\"\"chrome.webRequest.onAuthRequired.addListener(\\n\"\"\"",
"\"\"\" callbackFn,\\n\"\"\"",
"\"\"\" {urls: [\"<all_urls>\"]},\\n\"\"\"",
"\"\"\" ['blocking']\\n\"\"\"",
"\"\"\");\"\"\"",
"%",
"(",
"proxy_host",
",",
"proxy_port",
",",
"proxy_user",
",",
"proxy_pass",
")",
")",
"manifest_json",
"=",
"(",
"'''{\\n'''",
"'''\"version\": \"1.0.0\",\\n'''",
"'''\"manifest_version\": 2,\\n'''",
"'''\"name\": \"Chrome Proxy\",\\n'''",
"'''\"permissions\": [\\n'''",
"''' \"proxy\",\\n'''",
"''' \"tabs\",\\n'''",
"''' \"unlimitedStorage\",\\n'''",
"''' \"storage\",\\n'''",
"''' \"<all_urls>\",\\n'''",
"''' \"webRequest\",\\n'''",
"''' \"webRequestBlocking\"\\n'''",
"'''],\\n'''",
"'''\"background\": {\\n'''",
"''' \"scripts\": [\"background.js\"]\\n'''",
"'''},\\n'''",
"'''\"minimum_chrome_version\":\"22.0.0\"\\n'''",
"'''}'''",
")",
"lock",
"=",
"threading",
".",
"RLock",
"(",
")",
"# Support multi-threaded test runs with Pytest",
"with",
"lock",
":",
"try",
":",
"zf",
"=",
"zipfile",
".",
"ZipFile",
"(",
"PROXY_ZIP_PATH",
",",
"mode",
"=",
"'w'",
")",
"except",
"IOError",
":",
"# Handle \"Permission denied\" on the default proxy.zip path",
"abs_path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"'.'",
")",
"downloads_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"abs_path",
",",
"DOWNLOADS_DIR",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"downloads_path",
")",
":",
"os",
".",
"mkdir",
"(",
"downloads_path",
")",
"zf",
"=",
"zipfile",
".",
"ZipFile",
"(",
"PROXY_ZIP_PATH_2",
",",
"mode",
"=",
"'w'",
")",
"zf",
".",
"writestr",
"(",
"\"background.js\"",
",",
"background_js",
")",
"zf",
".",
"writestr",
"(",
"\"manifest.json\"",
",",
"manifest_json",
")",
"zf",
".",
"close",
"(",
")"
] | [
11,
0
] | [
79,
18
] | python | en | ['en', 'en', 'en'] | True |
remove_proxy_zip_if_present | () | Remove Chrome extension zip file used for proxy server authentication.
Used in the implementation of https://stackoverflow.com/a/35293284
for https://stackoverflow.com/questions/12848327/
| Remove Chrome extension zip file used for proxy server authentication.
Used in the implementation of https://stackoverflow.com/a/35293284
for https://stackoverflow.com/questions/12848327/
| def remove_proxy_zip_if_present():
""" Remove Chrome extension zip file used for proxy server authentication.
Used in the implementation of https://stackoverflow.com/a/35293284
for https://stackoverflow.com/questions/12848327/
"""
try:
if os.path.exists(PROXY_ZIP_PATH):
os.remove(PROXY_ZIP_PATH)
elif os.path.exists(PROXY_ZIP_PATH_2):
os.remove(PROXY_ZIP_PATH_2)
except Exception:
pass | [
"def",
"remove_proxy_zip_if_present",
"(",
")",
":",
"try",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"PROXY_ZIP_PATH",
")",
":",
"os",
".",
"remove",
"(",
"PROXY_ZIP_PATH",
")",
"elif",
"os",
".",
"path",
".",
"exists",
"(",
"PROXY_ZIP_PATH_2",
")",
":",
"os",
".",
"remove",
"(",
"PROXY_ZIP_PATH_2",
")",
"except",
"Exception",
":",
"pass"
] | [
82,
0
] | [
93,
12
] | python | en | ['en', 'en', 'en'] | True |
RebolLexer.analyse_text | (text) |
Check if code contains REBOL header and so it probably not R code
|
Check if code contains REBOL header and so it probably not R code
| def analyse_text(text):
"""
Check if code contains REBOL header and so it probably not R code
"""
if re.match(r'^\s*REBOL\s*\[', text, re.IGNORECASE):
# The code starts with REBOL header
return 1.0
elif re.search(r'\s*REBOL\s*[', text, re.IGNORECASE):
# The code contains REBOL header but also some text before it
return 0.5 | [
"def",
"analyse_text",
"(",
"text",
")",
":",
"if",
"re",
".",
"match",
"(",
"r'^\\s*REBOL\\s*\\['",
",",
"text",
",",
"re",
".",
"IGNORECASE",
")",
":",
"# The code starts with REBOL header",
"return",
"1.0",
"elif",
"re",
".",
"search",
"(",
"r'\\s*REBOL\\s*['",
",",
"text",
",",
"re",
".",
"IGNORECASE",
")",
":",
"# The code contains REBOL header but also some text before it",
"return",
"0.5"
] | [
234,
4
] | [
243,
22
] | python | en | ['en', 'error', 'th'] | False |
get_index | (subjects, ra) |
Usage: sort input poses by the distance to [mean pose] from train data
sorted from large to small
:param subjects: e.g. Test set
:return: Reversed Index in the Test set
|
Usage: sort input poses by the distance to [mean pose] from train data
sorted from large to small
:param subjects: e.g. Test set
:return: Reversed Index in the Test set
| def get_index(subjects, ra):
"""
Usage: sort input poses by the distance to [mean pose] from train data
sorted from large to small
:param subjects: e.g. Test set
:return: Reversed Index in the Test set
"""
train_pose_3d = []
for subject in subjects:
# print('subject',subject)
for action in dataset[subject].keys():
# print('action',action)
# poses_2d = keypoints[subject][action]
poses_3d = dataset[subject][action]['positions_3d']
# out = []
for i in range(len(poses_3d)):
# Remove global offset, but keep trajectory in first position
poses_3d[i] -= poses_3d[i][:, :1]
if cal_mean:
mean_3d_1 = np.mean(poses_3d[i], axis=0)
elif cal_distance:
ext_mean_pose = np.repeat(mean_pose[np.newaxis, :, :], poses_3d[i].shape[0], axis=0)
assert ext_mean_pose.shape == poses_3d[i].shape
pose_dis = np.linalg.norm((ext_mean_pose - poses_3d[i]), axis=-1)
pose_dis_mean = np.mean(pose_dis, axis=-1)
# out.append(pose_dis_mean)
train_pose_3d.append(pose_dis_mean)
# plot17j(out, subject, action, show_animation=False)
full_pose = np.concatenate(train_pose_3d, axis=0)
# Sorted from large to small distance
sorted_index = np.argsort(-full_pose)
full_pose.tolist()
# sorted_dis = sorted(full_pose, reverse=True)
# print('From large to small value:',sorted_dis)
print('index', sorted_index)
num = len(full_pose)
print('Total pose:', num)
ratio = ra
pick_num = int(ratio * num)
print('Picked number:', pick_num)
pick_index = sorted_index[:pick_num]
np.set_printoptions(threshold=np.inf)
# print(pick_index)
rerank = sorted(pick_index)
print('rerank', len(rerank))
return rerank | [
"def",
"get_index",
"(",
"subjects",
",",
"ra",
")",
":",
"train_pose_3d",
"=",
"[",
"]",
"for",
"subject",
"in",
"subjects",
":",
"# print('subject',subject)",
"for",
"action",
"in",
"dataset",
"[",
"subject",
"]",
".",
"keys",
"(",
")",
":",
"# print('action',action)",
"# poses_2d = keypoints[subject][action]",
"poses_3d",
"=",
"dataset",
"[",
"subject",
"]",
"[",
"action",
"]",
"[",
"'positions_3d'",
"]",
"# out = []",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"poses_3d",
")",
")",
":",
"# Remove global offset, but keep trajectory in first position",
"poses_3d",
"[",
"i",
"]",
"-=",
"poses_3d",
"[",
"i",
"]",
"[",
":",
",",
":",
"1",
"]",
"if",
"cal_mean",
":",
"mean_3d_1",
"=",
"np",
".",
"mean",
"(",
"poses_3d",
"[",
"i",
"]",
",",
"axis",
"=",
"0",
")",
"elif",
"cal_distance",
":",
"ext_mean_pose",
"=",
"np",
".",
"repeat",
"(",
"mean_pose",
"[",
"np",
".",
"newaxis",
",",
":",
",",
":",
"]",
",",
"poses_3d",
"[",
"i",
"]",
".",
"shape",
"[",
"0",
"]",
",",
"axis",
"=",
"0",
")",
"assert",
"ext_mean_pose",
".",
"shape",
"==",
"poses_3d",
"[",
"i",
"]",
".",
"shape",
"pose_dis",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"(",
"ext_mean_pose",
"-",
"poses_3d",
"[",
"i",
"]",
")",
",",
"axis",
"=",
"-",
"1",
")",
"pose_dis_mean",
"=",
"np",
".",
"mean",
"(",
"pose_dis",
",",
"axis",
"=",
"-",
"1",
")",
"# out.append(pose_dis_mean)",
"train_pose_3d",
".",
"append",
"(",
"pose_dis_mean",
")",
"# plot17j(out, subject, action, show_animation=False)",
"full_pose",
"=",
"np",
".",
"concatenate",
"(",
"train_pose_3d",
",",
"axis",
"=",
"0",
")",
"# Sorted from large to small distance",
"sorted_index",
"=",
"np",
".",
"argsort",
"(",
"-",
"full_pose",
")",
"full_pose",
".",
"tolist",
"(",
")",
"# sorted_dis = sorted(full_pose, reverse=True)",
"# print('From large to small value:',sorted_dis)",
"print",
"(",
"'index'",
",",
"sorted_index",
")",
"num",
"=",
"len",
"(",
"full_pose",
")",
"print",
"(",
"'Total pose:'",
",",
"num",
")",
"ratio",
"=",
"ra",
"pick_num",
"=",
"int",
"(",
"ratio",
"*",
"num",
")",
"print",
"(",
"'Picked number:'",
",",
"pick_num",
")",
"pick_index",
"=",
"sorted_index",
"[",
":",
"pick_num",
"]",
"np",
".",
"set_printoptions",
"(",
"threshold",
"=",
"np",
".",
"inf",
")",
"# print(pick_index)",
"rerank",
"=",
"sorted",
"(",
"pick_index",
")",
"print",
"(",
"'rerank'",
",",
"len",
"(",
"rerank",
")",
")",
"return",
"rerank"
] | [
119,
0
] | [
165,
17
] | python | en | ['en', 'error', 'th'] | False |
split_data | (index) |
Partition index into a list, make one more dimension
:param index: a so long list
:return out: splited index, type: List
|
Partition index into a list, make one more dimension
:param index: a so long list
:return out: splited index, type: List
| def split_data(index):
"""
Partition index into a list, make one more dimension
:param index: a so long list
:return out: splited index, type: List
"""
out = []
j = 0
for i in index:
if i < len(index) - 1:
if index[i + 1] - index[i] > 5:
print('Split index into smaller groups:', j, i)
out.append(index[j:(i + 1)])
j = i + 1
elif i == len(index) - 1:
out.append(index[j:])
print('Split group:', len(out))
return out | [
"def",
"split_data",
"(",
"index",
")",
":",
"out",
"=",
"[",
"]",
"j",
"=",
"0",
"for",
"i",
"in",
"index",
":",
"if",
"i",
"<",
"len",
"(",
"index",
")",
"-",
"1",
":",
"if",
"index",
"[",
"i",
"+",
"1",
"]",
"-",
"index",
"[",
"i",
"]",
">",
"5",
":",
"print",
"(",
"'Split index into smaller groups:'",
",",
"j",
",",
"i",
")",
"out",
".",
"append",
"(",
"index",
"[",
"j",
":",
"(",
"i",
"+",
"1",
")",
"]",
")",
"j",
"=",
"i",
"+",
"1",
"elif",
"i",
"==",
"len",
"(",
"index",
")",
"-",
"1",
":",
"out",
".",
"append",
"(",
"index",
"[",
"j",
":",
"]",
")",
"print",
"(",
"'Split group:'",
",",
"len",
"(",
"out",
")",
")",
"return",
"out"
] | [
187,
0
] | [
204,
14
] | python | en | ['en', 'error', 'th'] | False |
Clock.start_clock | (self) | Do any setup this clock might need.
Called at the beginning of the run.
| Do any setup this clock might need. | def start_clock(self):
"""Do any setup this clock might need.
Called at the beginning of the run.
""" | [
"def",
"start_clock",
"(",
"self",
")",
":"
] | [
15,
4
] | [
20,
11
] | python | en | ['en', 'mg', 'en'] | True |
Clock.current_time | (self) | Return the current time, according to this clock.
This is used to implement functions like :func:`trio.current_time` and
:func:`trio.move_on_after`.
Returns:
float: The current time.
| Return the current time, according to this clock. | def current_time(self):
"""Return the current time, according to this clock.
This is used to implement functions like :func:`trio.current_time` and
:func:`trio.move_on_after`.
Returns:
float: The current time.
""" | [
"def",
"current_time",
"(",
"self",
")",
":"
] | [
23,
4
] | [
32,
11
] | python | en | ['en', 'en', 'en'] | True |
Clock.deadline_to_sleep_time | (self, deadline) | Compute the real time until the given deadline.
This is called before we enter a system-specific wait function like
:func:`select.select`, to get the timeout to pass.
For a clock using wall-time, this should be something like::
return deadline - self.current_time()
but of course it may be different if you're implementing some kind of
virtual clock.
Args:
deadline (float): The absolute time of the next deadline,
according to this clock.
Returns:
float: The number of real seconds to sleep until the given
deadline. May be :data:`math.inf`.
| Compute the real time until the given deadline. | def deadline_to_sleep_time(self, deadline):
"""Compute the real time until the given deadline.
This is called before we enter a system-specific wait function like
:func:`select.select`, to get the timeout to pass.
For a clock using wall-time, this should be something like::
return deadline - self.current_time()
but of course it may be different if you're implementing some kind of
virtual clock.
Args:
deadline (float): The absolute time of the next deadline,
according to this clock.
Returns:
float: The number of real seconds to sleep until the given
deadline. May be :data:`math.inf`.
""" | [
"def",
"deadline_to_sleep_time",
"(",
"self",
",",
"deadline",
")",
":"
] | [
35,
4
] | [
56,
11
] | python | en | ['en', 'en', 'en'] | True |
Instrument.before_run | (self) | Called at the beginning of :func:`trio.run`. | Called at the beginning of :func:`trio.run`. | def before_run(self):
"""Called at the beginning of :func:`trio.run`.""" | [
"def",
"before_run",
"(",
"self",
")",
":"
] | [
69,
4
] | [
70,
58
] | python | en | ['en', 'en', 'en'] | True |
Instrument.after_run | (self) | Called just before :func:`trio.run` returns. | Called just before :func:`trio.run` returns. | def after_run(self):
"""Called just before :func:`trio.run` returns.""" | [
"def",
"after_run",
"(",
"self",
")",
":"
] | [
72,
4
] | [
73,
58
] | python | en | ['en', 'la', 'en'] | True |
Instrument.task_spawned | (self, task) | Called when the given task is created.
Args:
task (trio.lowlevel.Task): The new task.
| Called when the given task is created. | def task_spawned(self, task):
"""Called when the given task is created.
Args:
task (trio.lowlevel.Task): The new task.
""" | [
"def",
"task_spawned",
"(",
"self",
",",
"task",
")",
":"
] | [
75,
4
] | [
81,
11
] | python | en | ['en', 'en', 'en'] | True |
Instrument.task_scheduled | (self, task) | Called when the given task becomes runnable.
It may still be some time before it actually runs, if there are other
runnable tasks ahead of it.
Args:
task (trio.lowlevel.Task): The task that became runnable.
| Called when the given task becomes runnable. | def task_scheduled(self, task):
"""Called when the given task becomes runnable.
It may still be some time before it actually runs, if there are other
runnable tasks ahead of it.
Args:
task (trio.lowlevel.Task): The task that became runnable.
""" | [
"def",
"task_scheduled",
"(",
"self",
",",
"task",
")",
":"
] | [
83,
4
] | [
92,
11
] | python | en | ['en', 'en', 'en'] | True |
Instrument.before_task_step | (self, task) | Called immediately before we resume running the given task.
Args:
task (trio.lowlevel.Task): The task that is about to run.
| Called immediately before we resume running the given task. | def before_task_step(self, task):
"""Called immediately before we resume running the given task.
Args:
task (trio.lowlevel.Task): The task that is about to run.
""" | [
"def",
"before_task_step",
"(",
"self",
",",
"task",
")",
":"
] | [
94,
4
] | [
100,
11
] | python | en | ['en', 'en', 'en'] | True |
Instrument.after_task_step | (self, task) | Called when we return to the main run loop after a task has yielded.
Args:
task (trio.lowlevel.Task): The task that just ran.
| Called when we return to the main run loop after a task has yielded. | def after_task_step(self, task):
"""Called when we return to the main run loop after a task has yielded.
Args:
task (trio.lowlevel.Task): The task that just ran.
""" | [
"def",
"after_task_step",
"(",
"self",
",",
"task",
")",
":"
] | [
102,
4
] | [
108,
11
] | python | en | ['en', 'en', 'en'] | True |
Instrument.task_exited | (self, task) | Called when the given task exits.
Args:
task (trio.lowlevel.Task): The finished task.
| Called when the given task exits. | def task_exited(self, task):
"""Called when the given task exits.
Args:
task (trio.lowlevel.Task): The finished task.
""" | [
"def",
"task_exited",
"(",
"self",
",",
"task",
")",
":"
] | [
110,
4
] | [
116,
11
] | python | en | ['en', 'en', 'en'] | True |
Instrument.before_io_wait | (self, timeout) | Called before blocking to wait for I/O readiness.
Args:
timeout (float): The number of seconds we are willing to wait.
| Called before blocking to wait for I/O readiness. | def before_io_wait(self, timeout):
"""Called before blocking to wait for I/O readiness.
Args:
timeout (float): The number of seconds we are willing to wait.
""" | [
"def",
"before_io_wait",
"(",
"self",
",",
"timeout",
")",
":"
] | [
118,
4
] | [
124,
11
] | python | en | ['en', 'en', 'en'] | True |
Instrument.after_io_wait | (self, timeout) | Called after handling pending I/O.
Args:
timeout (float): The number of seconds we were willing to
wait. This much time may or may not have elapsed, depending on
whether any I/O was ready.
| Called after handling pending I/O. | def after_io_wait(self, timeout):
"""Called after handling pending I/O.
Args:
timeout (float): The number of seconds we were willing to
wait. This much time may or may not have elapsed, depending on
whether any I/O was ready.
""" | [
"def",
"after_io_wait",
"(",
"self",
",",
"timeout",
")",
":"
] | [
126,
4
] | [
134,
11
] | python | da | ['da', 'da', 'en'] | True |
HostnameResolver.getaddrinfo | (self, host, port, family=0, type=0, proto=0, flags=0) | A custom implementation of :func:`~trio.socket.getaddrinfo`.
Called by :func:`trio.socket.getaddrinfo`.
If ``host`` is given as a numeric IP address, then
:func:`~trio.socket.getaddrinfo` may handle the request itself rather
than calling this method.
Any required IDNA encoding is handled before calling this function;
your implementation can assume that it will never see U-labels like
``"café.com"``, and only needs to handle A-labels like
``b"xn--caf-dma.com"``.
| A custom implementation of :func:`~trio.socket.getaddrinfo`. | async def getaddrinfo(self, host, port, family=0, type=0, proto=0, flags=0):
"""A custom implementation of :func:`~trio.socket.getaddrinfo`.
Called by :func:`trio.socket.getaddrinfo`.
If ``host`` is given as a numeric IP address, then
:func:`~trio.socket.getaddrinfo` may handle the request itself rather
than calling this method.
Any required IDNA encoding is handled before calling this function;
your implementation can assume that it will never see U-labels like
``"café.com"``, and only needs to handle A-labels like
``b"xn--caf-dma.com"``.
""" | [
"async",
"def",
"getaddrinfo",
"(",
"self",
",",
"host",
",",
"port",
",",
"family",
"=",
"0",
",",
"type",
"=",
"0",
",",
"proto",
"=",
"0",
",",
"flags",
"=",
"0",
")",
":"
] | [
148,
4
] | [
162,
11
] | python | en | ['en', 'sv', 'en'] | True |
HostnameResolver.getnameinfo | (self, sockaddr, flags) | A custom implementation of :func:`~trio.socket.getnameinfo`.
Called by :func:`trio.socket.getnameinfo`.
| A custom implementation of :func:`~trio.socket.getnameinfo`. | async def getnameinfo(self, sockaddr, flags):
"""A custom implementation of :func:`~trio.socket.getnameinfo`.
Called by :func:`trio.socket.getnameinfo`.
""" | [
"async",
"def",
"getnameinfo",
"(",
"self",
",",
"sockaddr",
",",
"flags",
")",
":"
] | [
165,
4
] | [
170,
11
] | python | en | ['en', 'en', 'en'] | True |
SocketFactory.socket | (self, family=None, type=None, proto=None) | Create and return a socket object.
Your socket object must inherit from :class:`trio.socket.SocketType`,
which is an empty class whose only purpose is to "mark" which classes
should be considered valid Trio sockets.
Called by :func:`trio.socket.socket`.
Note that unlike :func:`trio.socket.socket`, this does not take a
``fileno=`` argument. If a ``fileno=`` is specified, then
:func:`trio.socket.socket` returns a regular Trio socket object
instead of calling this method.
| Create and return a socket object. | def socket(self, family=None, type=None, proto=None):
"""Create and return a socket object.
Your socket object must inherit from :class:`trio.socket.SocketType`,
which is an empty class whose only purpose is to "mark" which classes
should be considered valid Trio sockets.
Called by :func:`trio.socket.socket`.
Note that unlike :func:`trio.socket.socket`, this does not take a
``fileno=`` argument. If a ``fileno=`` is specified, then
:func:`trio.socket.socket` returns a regular Trio socket object
instead of calling this method.
""" | [
"def",
"socket",
"(",
"self",
",",
"family",
"=",
"None",
",",
"type",
"=",
"None",
",",
"proto",
"=",
"None",
")",
":"
] | [
182,
4
] | [
196,
11
] | python | en | ['en', 'ig', 'en'] | True |
AsyncResource.aclose | (self) | Close this resource, possibly blocking.
IMPORTANT: This method may block in order to perform a "graceful"
shutdown. But, if this fails, then it still *must* close any
underlying resources before returning. An error from this method
indicates a failure to achieve grace, *not* a failure to close the
connection.
For example, suppose we call :meth:`aclose` on a TLS-encrypted
connection. This requires sending a "goodbye" message; but if the peer
has become non-responsive, then our attempt to send this message might
block forever, and eventually time out and be cancelled. In this case
the :meth:`aclose` method on :class:`~trio.SSLStream` will
immediately close the underlying transport stream using
:func:`trio.aclose_forcefully` before raising :exc:`~trio.Cancelled`.
If the resource is already closed, then this method should silently
succeed.
Once this method completes, any other pending or future operations on
this resource should generally raise :exc:`~trio.ClosedResourceError`,
unless there's a good reason to do otherwise.
See also: :func:`trio.aclose_forcefully`.
| Close this resource, possibly blocking. | async def aclose(self):
"""Close this resource, possibly blocking.
IMPORTANT: This method may block in order to perform a "graceful"
shutdown. But, if this fails, then it still *must* close any
underlying resources before returning. An error from this method
indicates a failure to achieve grace, *not* a failure to close the
connection.
For example, suppose we call :meth:`aclose` on a TLS-encrypted
connection. This requires sending a "goodbye" message; but if the peer
has become non-responsive, then our attempt to send this message might
block forever, and eventually time out and be cancelled. In this case
the :meth:`aclose` method on :class:`~trio.SSLStream` will
immediately close the underlying transport stream using
:func:`trio.aclose_forcefully` before raising :exc:`~trio.Cancelled`.
If the resource is already closed, then this method should silently
succeed.
Once this method completes, any other pending or future operations on
this resource should generally raise :exc:`~trio.ClosedResourceError`,
unless there's a good reason to do otherwise.
See also: :func:`trio.aclose_forcefully`.
""" | [
"async",
"def",
"aclose",
"(",
"self",
")",
":"
] | [
228,
4
] | [
254,
11
] | python | en | ['en', 'en', 'en'] | True |
SendStream.send_all | (self, data) | Sends the given data through the stream, blocking if necessary.
Args:
data (bytes, bytearray, or memoryview): The data to send.
Raises:
trio.BusyResourceError: if another task is already executing a
:meth:`send_all`, :meth:`wait_send_all_might_not_block`, or
:meth:`HalfCloseableStream.send_eof` on this stream.
trio.BrokenResourceError: if something has gone wrong, and the stream
is broken.
trio.ClosedResourceError: if you previously closed this stream
object, or if another task closes this stream object while
:meth:`send_all` is running.
Most low-level operations in Trio provide a guarantee: if they raise
:exc:`trio.Cancelled`, this means that they had no effect, so the
system remains in a known state. This is **not true** for
:meth:`send_all`. If this operation raises :exc:`trio.Cancelled` (or
any other exception for that matter), then it may have sent some, all,
or none of the requested data, and there is no way to know which.
| Sends the given data through the stream, blocking if necessary. | async def send_all(self, data):
"""Sends the given data through the stream, blocking if necessary.
Args:
data (bytes, bytearray, or memoryview): The data to send.
Raises:
trio.BusyResourceError: if another task is already executing a
:meth:`send_all`, :meth:`wait_send_all_might_not_block`, or
:meth:`HalfCloseableStream.send_eof` on this stream.
trio.BrokenResourceError: if something has gone wrong, and the stream
is broken.
trio.ClosedResourceError: if you previously closed this stream
object, or if another task closes this stream object while
:meth:`send_all` is running.
Most low-level operations in Trio provide a guarantee: if they raise
:exc:`trio.Cancelled`, this means that they had no effect, so the
system remains in a known state. This is **not true** for
:meth:`send_all`. If this operation raises :exc:`trio.Cancelled` (or
any other exception for that matter), then it may have sent some, all,
or none of the requested data, and there is no way to know which.
""" | [
"async",
"def",
"send_all",
"(",
"self",
",",
"data",
")",
":"
] | [
282,
4
] | [
305,
11
] | python | en | ['en', 'en', 'en'] | True |
SendStream.wait_send_all_might_not_block | (self) | Block until it's possible that :meth:`send_all` might not block.
This method may return early: it's possible that after it returns,
:meth:`send_all` will still block. (In the worst case, if no better
implementation is available, then it might always return immediately
without blocking. It's nice to do better than that when possible,
though.)
This method **must not** return *late*: if it's possible for
:meth:`send_all` to complete without blocking, then it must
return. When implementing it, err on the side of returning early.
Raises:
trio.BusyResourceError: if another task is already executing a
:meth:`send_all`, :meth:`wait_send_all_might_not_block`, or
:meth:`HalfCloseableStream.send_eof` on this stream.
trio.BrokenResourceError: if something has gone wrong, and the stream
is broken.
trio.ClosedResourceError: if you previously closed this stream
object, or if another task closes this stream object while
:meth:`wait_send_all_might_not_block` is running.
Note:
This method is intended to aid in implementing protocols that want
to delay choosing which data to send until the last moment. E.g.,
suppose you're working on an implemention of a remote display server
like `VNC
<https://en.wikipedia.org/wiki/Virtual_Network_Computing>`__, and
the network connection is currently backed up so that if you call
:meth:`send_all` now then it will sit for 0.5 seconds before actually
sending anything. In this case it doesn't make sense to take a
screenshot, then wait 0.5 seconds, and then send it, because the
screen will keep changing while you wait; it's better to wait 0.5
seconds, then take the screenshot, and then send it, because this
way the data you deliver will be more
up-to-date. Using :meth:`wait_send_all_might_not_block` makes it
possible to implement the better strategy.
If you use this method, you might also want to read up on
``TCP_NOTSENT_LOWAT``.
Further reading:
* `Prioritization Only Works When There's Pending Data to Prioritize
<https://insouciant.org/tech/prioritization-only-works-when-theres-pending-data-to-prioritize/>`__
* WWDC 2015: Your App and Next Generation Networks: `slides
<http://devstreaming.apple.com/videos/wwdc/2015/719ui2k57m/719/719_your_app_and_next_generation_networks.pdf?dl=1>`__,
`video and transcript
<https://developer.apple.com/videos/play/wwdc2015/719/>`__
| Block until it's possible that :meth:`send_all` might not block. | async def wait_send_all_might_not_block(self):
"""Block until it's possible that :meth:`send_all` might not block.
This method may return early: it's possible that after it returns,
:meth:`send_all` will still block. (In the worst case, if no better
implementation is available, then it might always return immediately
without blocking. It's nice to do better than that when possible,
though.)
This method **must not** return *late*: if it's possible for
:meth:`send_all` to complete without blocking, then it must
return. When implementing it, err on the side of returning early.
Raises:
trio.BusyResourceError: if another task is already executing a
:meth:`send_all`, :meth:`wait_send_all_might_not_block`, or
:meth:`HalfCloseableStream.send_eof` on this stream.
trio.BrokenResourceError: if something has gone wrong, and the stream
is broken.
trio.ClosedResourceError: if you previously closed this stream
object, or if another task closes this stream object while
:meth:`wait_send_all_might_not_block` is running.
Note:
This method is intended to aid in implementing protocols that want
to delay choosing which data to send until the last moment. E.g.,
suppose you're working on an implemention of a remote display server
like `VNC
<https://en.wikipedia.org/wiki/Virtual_Network_Computing>`__, and
the network connection is currently backed up so that if you call
:meth:`send_all` now then it will sit for 0.5 seconds before actually
sending anything. In this case it doesn't make sense to take a
screenshot, then wait 0.5 seconds, and then send it, because the
screen will keep changing while you wait; it's better to wait 0.5
seconds, then take the screenshot, and then send it, because this
way the data you deliver will be more
up-to-date. Using :meth:`wait_send_all_might_not_block` makes it
possible to implement the better strategy.
If you use this method, you might also want to read up on
``TCP_NOTSENT_LOWAT``.
Further reading:
* `Prioritization Only Works When There's Pending Data to Prioritize
<https://insouciant.org/tech/prioritization-only-works-when-theres-pending-data-to-prioritize/>`__
* WWDC 2015: Your App and Next Generation Networks: `slides
<http://devstreaming.apple.com/videos/wwdc/2015/719ui2k57m/719/719_your_app_and_next_generation_networks.pdf?dl=1>`__,
`video and transcript
<https://developer.apple.com/videos/play/wwdc2015/719/>`__
""" | [
"async",
"def",
"wait_send_all_might_not_block",
"(",
"self",
")",
":"
] | [
308,
4
] | [
361,
11
] | python | en | ['en', 'en', 'en'] | True |
ReceiveStream.receive_some | (self, max_bytes=None) | Wait until there is data available on this stream, and then return
some of it.
A return value of ``b""`` (an empty bytestring) indicates that the
stream has reached end-of-file. Implementations should be careful that
they return ``b""`` if, and only if, the stream has reached
end-of-file!
Args:
max_bytes (int): The maximum number of bytes to return. Must be
greater than zero. Optional; if omitted, then the stream object
is free to pick a reasonable default.
Returns:
bytes or bytearray: The data received.
Raises:
trio.BusyResourceError: if two tasks attempt to call
:meth:`receive_some` on the same stream at the same time.
trio.BrokenResourceError: if something has gone wrong, and the stream
is broken.
trio.ClosedResourceError: if you previously closed this stream
object, or if another task closes this stream object while
:meth:`receive_some` is running.
| Wait until there is data available on this stream, and then return
some of it. | async def receive_some(self, max_bytes=None):
"""Wait until there is data available on this stream, and then return
some of it.
A return value of ``b""`` (an empty bytestring) indicates that the
stream has reached end-of-file. Implementations should be careful that
they return ``b""`` if, and only if, the stream has reached
end-of-file!
Args:
max_bytes (int): The maximum number of bytes to return. Must be
greater than zero. Optional; if omitted, then the stream object
is free to pick a reasonable default.
Returns:
bytes or bytearray: The data received.
Raises:
trio.BusyResourceError: if two tasks attempt to call
:meth:`receive_some` on the same stream at the same time.
trio.BrokenResourceError: if something has gone wrong, and the stream
is broken.
trio.ClosedResourceError: if you previously closed this stream
object, or if another task closes this stream object while
:meth:`receive_some` is running.
""" | [
"async",
"def",
"receive_some",
"(",
"self",
",",
"max_bytes",
"=",
"None",
")",
":"
] | [
388,
4
] | [
414,
11
] | python | en | ['en', 'en', 'en'] | True |
HalfCloseableStream.send_eof | (self) | Send an end-of-file indication on this stream, if possible.
The difference between :meth:`send_eof` and
:meth:`~AsyncResource.aclose` is that :meth:`send_eof` is a
*unidirectional* end-of-file indication. After you call this method,
you shouldn't try sending any more data on this stream, and your
remote peer should receive an end-of-file indication (eventually,
after receiving all the data you sent before that). But, they may
continue to send data to you, and you can continue to receive it by
calling :meth:`~ReceiveStream.receive_some`. You can think of it as
calling :meth:`~AsyncResource.aclose` on just the
:class:`SendStream` "half" of the stream object (and in fact that's
literally how :class:`trio.StapledStream` implements it).
Examples:
* On a socket, this corresponds to ``shutdown(..., SHUT_WR)`` (`man
page <https://linux.die.net/man/2/shutdown>`__).
* The SSH protocol provides the ability to multiplex bidirectional
"channels" on top of a single encrypted connection. A Trio
implementation of SSH could expose these channels as
:class:`HalfCloseableStream` objects, and calling :meth:`send_eof`
would send an ``SSH_MSG_CHANNEL_EOF`` request (see `RFC 4254 §5.3
<https://tools.ietf.org/html/rfc4254#section-5.3>`__).
* On an SSL/TLS-encrypted connection, the protocol doesn't provide any
way to do a unidirectional shutdown without closing the connection
entirely, so :class:`~trio.SSLStream` implements
:class:`Stream`, not :class:`HalfCloseableStream`.
If an EOF has already been sent, then this method should silently
succeed.
Raises:
trio.BusyResourceError: if another task is already executing a
:meth:`~SendStream.send_all`,
:meth:`~SendStream.wait_send_all_might_not_block`, or
:meth:`send_eof` on this stream.
trio.BrokenResourceError: if something has gone wrong, and the stream
is broken.
trio.ClosedResourceError: if you previously closed this stream
object, or if another task closes this stream object while
:meth:`send_eof` is running.
| Send an end-of-file indication on this stream, if possible. | async def send_eof(self):
"""Send an end-of-file indication on this stream, if possible.
The difference between :meth:`send_eof` and
:meth:`~AsyncResource.aclose` is that :meth:`send_eof` is a
*unidirectional* end-of-file indication. After you call this method,
you shouldn't try sending any more data on this stream, and your
remote peer should receive an end-of-file indication (eventually,
after receiving all the data you sent before that). But, they may
continue to send data to you, and you can continue to receive it by
calling :meth:`~ReceiveStream.receive_some`. You can think of it as
calling :meth:`~AsyncResource.aclose` on just the
:class:`SendStream` "half" of the stream object (and in fact that's
literally how :class:`trio.StapledStream` implements it).
Examples:
* On a socket, this corresponds to ``shutdown(..., SHUT_WR)`` (`man
page <https://linux.die.net/man/2/shutdown>`__).
* The SSH protocol provides the ability to multiplex bidirectional
"channels" on top of a single encrypted connection. A Trio
implementation of SSH could expose these channels as
:class:`HalfCloseableStream` objects, and calling :meth:`send_eof`
would send an ``SSH_MSG_CHANNEL_EOF`` request (see `RFC 4254 §5.3
<https://tools.ietf.org/html/rfc4254#section-5.3>`__).
* On an SSL/TLS-encrypted connection, the protocol doesn't provide any
way to do a unidirectional shutdown without closing the connection
entirely, so :class:`~trio.SSLStream` implements
:class:`Stream`, not :class:`HalfCloseableStream`.
If an EOF has already been sent, then this method should silently
succeed.
Raises:
trio.BusyResourceError: if another task is already executing a
:meth:`~SendStream.send_all`,
:meth:`~SendStream.wait_send_all_might_not_block`, or
:meth:`send_eof` on this stream.
trio.BrokenResourceError: if something has gone wrong, and the stream
is broken.
trio.ClosedResourceError: if you previously closed this stream
object, or if another task closes this stream object while
:meth:`send_eof` is running.
""" | [
"async",
"def",
"send_eof",
"(",
"self",
")",
":"
] | [
449,
4
] | [
495,
11
] | python | en | ['en', 'en', 'en'] | True |
Listener.accept | (self) | Wait until an incoming connection arrives, and then return it.
Returns:
AsyncResource: An object representing the incoming connection. In
practice this is generally some kind of :class:`Stream`,
but in principle you could also define a :class:`Listener` that
returned, say, channel objects.
Raises:
trio.BusyResourceError: if two tasks attempt to call
:meth:`accept` on the same listener at the same time.
trio.ClosedResourceError: if you previously closed this listener
object, or if another task closes this listener object while
:meth:`accept` is running.
Listeners don't generally raise :exc:`~trio.BrokenResourceError`,
because for listeners there is no general condition of "the
network/remote peer broke the connection" that can be handled in a
generic way, like there is for streams. Other errors *can* occur and
be raised from :meth:`accept` – for example, if you run out of file
descriptors then you might get an :class:`OSError` with its errno set
to ``EMFILE``.
| Wait until an incoming connection arrives, and then return it. | async def accept(self):
"""Wait until an incoming connection arrives, and then return it.
Returns:
AsyncResource: An object representing the incoming connection. In
practice this is generally some kind of :class:`Stream`,
but in principle you could also define a :class:`Listener` that
returned, say, channel objects.
Raises:
trio.BusyResourceError: if two tasks attempt to call
:meth:`accept` on the same listener at the same time.
trio.ClosedResourceError: if you previously closed this listener
object, or if another task closes this listener object while
:meth:`accept` is running.
Listeners don't generally raise :exc:`~trio.BrokenResourceError`,
because for listeners there is no general condition of "the
network/remote peer broke the connection" that can be handled in a
generic way, like there is for streams. Other errors *can* occur and
be raised from :meth:`accept` – for example, if you run out of file
descriptors then you might get an :class:`OSError` with its errno set
to ``EMFILE``.
""" | [
"async",
"def",
"accept",
"(",
"self",
")",
":"
] | [
528,
4
] | [
552,
11
] | python | en | ['en', 'en', 'en'] | True |
SendChannel.send | (self, value: SendType) | Attempt to send an object through the channel, blocking if necessary.
Args:
value (object): The object to send.
Raises:
trio.BrokenResourceError: if something has gone wrong, and the
channel is broken. For example, you may get this if the receiver
has already been closed.
trio.ClosedResourceError: if you previously closed this
:class:`SendChannel` object, or if another task closes it while
:meth:`send` is running.
trio.BusyResourceError: some channels allow multiple tasks to call
`send` at the same time, but others don't. If you try to call
`send` simultaneously from multiple tasks on a channel that
doesn't support it, then you can get `~trio.BusyResourceError`.
| Attempt to send an object through the channel, blocking if necessary. | async def send(self, value: SendType) -> None:
"""Attempt to send an object through the channel, blocking if necessary.
Args:
value (object): The object to send.
Raises:
trio.BrokenResourceError: if something has gone wrong, and the
channel is broken. For example, you may get this if the receiver
has already been closed.
trio.ClosedResourceError: if you previously closed this
:class:`SendChannel` object, or if another task closes it while
:meth:`send` is running.
trio.BusyResourceError: some channels allow multiple tasks to call
`send` at the same time, but others don't. If you try to call
`send` simultaneously from multiple tasks on a channel that
doesn't support it, then you can get `~trio.BusyResourceError`.
""" | [
"async",
"def",
"send",
"(",
"self",
",",
"value",
":",
"SendType",
")",
"->",
"None",
":"
] | [
570,
4
] | [
588,
11
] | python | en | ['en', 'en', 'en'] | True |
ReceiveChannel.receive | (self) | Attempt to receive an incoming object, blocking if necessary.
Returns:
object: Whatever object was received.
Raises:
trio.EndOfChannel: if the sender has been closed cleanly, and no
more objects are coming. This is not an error condition.
trio.ClosedResourceError: if you previously closed this
:class:`ReceiveChannel` object.
trio.BrokenResourceError: if something has gone wrong, and the
channel is broken.
trio.BusyResourceError: some channels allow multiple tasks to call
`receive` at the same time, but others don't. If you try to call
`receive` simultaneously from multiple tasks on a channel that
doesn't support it, then you can get `~trio.BusyResourceError`.
| Attempt to receive an incoming object, blocking if necessary. | async def receive(self) -> ReceiveType:
"""Attempt to receive an incoming object, blocking if necessary.
Returns:
object: Whatever object was received.
Raises:
trio.EndOfChannel: if the sender has been closed cleanly, and no
more objects are coming. This is not an error condition.
trio.ClosedResourceError: if you previously closed this
:class:`ReceiveChannel` object.
trio.BrokenResourceError: if something has gone wrong, and the
channel is broken.
trio.BusyResourceError: some channels allow multiple tasks to call
`receive` at the same time, but others don't. If you try to call
`receive` simultaneously from multiple tasks on a channel that
doesn't support it, then you can get `~trio.BusyResourceError`.
""" | [
"async",
"def",
"receive",
"(",
"self",
")",
"->",
"ReceiveType",
":"
] | [
615,
4
] | [
633,
11
] | python | en | ['en', 'en', 'en'] | True |
ExpectColumnMaxToBeBetween.validate_configuration | (self, configuration: Optional[ExpectationConfiguration]) |
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
True if the configuration has been validated successfully. Otherwise, raises an exception
|
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation. | def validate_configuration(self, configuration: Optional[ExpectationConfiguration]):
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
True if the configuration has been validated successfully. Otherwise, raises an exception
"""
super().validate_configuration(configuration)
self.validate_metric_value_between_configuration(configuration=configuration) | [
"def",
"validate_configuration",
"(",
"self",
",",
"configuration",
":",
"Optional",
"[",
"ExpectationConfiguration",
"]",
")",
":",
"super",
"(",
")",
".",
"validate_configuration",
"(",
"configuration",
")",
"self",
".",
"validate_metric_value_between_configuration",
"(",
"configuration",
"=",
"configuration",
")"
] | [
119,
4
] | [
131,
85
] | python | en | ['en', 'error', 'th'] | False |
test_snowflake_user_password_credentials_exit | (empty_data_context) | Test an empty project and after adding a single datasource. | Test an empty project and after adding a single datasource. | def test_snowflake_user_password_credentials_exit(empty_data_context):
"""Test an empty project and after adding a single datasource."""
project_root_dir = empty_data_context.root_directory
context = DataContext(project_root_dir)
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["datasource", "new", "-d", project_root_dir],
catch_exceptions=False,
input="2\n4\nmy_snowflake_db\n1\nuser\nABCD.us-east-1\ndefault_db\ndefault_schema\nxsmall\npublic\npassword\nn\n",
)
stdout = result.stdout.strip()
assert "ok, exiting now" in stdout.lower() | [
"def",
"test_snowflake_user_password_credentials_exit",
"(",
"empty_data_context",
")",
":",
"project_root_dir",
"=",
"empty_data_context",
".",
"root_directory",
"context",
"=",
"DataContext",
"(",
"project_root_dir",
")",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"datasource\"",
",",
"\"new\"",
",",
"\"-d\"",
",",
"project_root_dir",
"]",
",",
"catch_exceptions",
"=",
"False",
",",
"input",
"=",
"\"2\\n4\\nmy_snowflake_db\\n1\\nuser\\nABCD.us-east-1\\ndefault_db\\ndefault_schema\\nxsmall\\npublic\\npassword\\nn\\n\"",
",",
")",
"stdout",
"=",
"result",
".",
"stdout",
".",
"strip",
"(",
")",
"assert",
"\"ok, exiting now\"",
"in",
"stdout",
".",
"lower",
"(",
")"
] | [
13,
0
] | [
27,
46
] | python | en | ['en', 'en', 'en'] | True |
_execute_integration_test | (test_configuration, tmp_path) |
Prepare and environment and run integration tests from a list of tests.
Note that the only required parameter for a test in the matrix is
`user_flow_script` and that all other parameters are optional.
|
Prepare and environment and run integration tests from a list of tests. | def _execute_integration_test(test_configuration, tmp_path):
"""
Prepare and environment and run integration tests from a list of tests.
Note that the only required parameter for a test in the matrix is
`user_flow_script` and that all other parameters are optional.
"""
assert (
"user_flow_script" in test_configuration.keys()
), "a `user_flow_script` is required"
workdir = os.getcwd()
try:
base_dir = test_configuration.get(
"base_dir", file_relative_path(__file__, "../../")
)
os.chdir(tmp_path)
# Ensure GE is installed in our environment
ge_requirement = test_configuration.get("ge_requirement", "great_expectations")
execute_shell_command(f"pip install {ge_requirement}")
#
# Build test state
#
# DataContext
if test_configuration.get("data_context_dir"):
context_source_dir = os.path.join(
base_dir, test_configuration.get("data_context_dir")
)
test_context_dir = os.path.join(tmp_path, "great_expectations")
shutil.copytree(
context_source_dir,
test_context_dir,
)
# Test Data
if test_configuration.get("data_dir") is not None:
source_data_dir = os.path.join(base_dir, test_configuration.get("data_dir"))
test_data_dir = os.path.join(tmp_path, "data")
shutil.copytree(
source_data_dir,
test_data_dir,
)
# UAT Script
script_source = os.path.join(
base_dir,
test_configuration.get("user_flow_script"),
)
script_path = os.path.join(tmp_path, "test_script.py")
shutil.copyfile(script_source, script_path)
# Util Script
if test_configuration.get("util_script") is not None:
script_source = os.path.join(
base_dir,
test_configuration.get("util_script"),
)
util_script_path = os.path.join(tmp_path, "util.py")
shutil.copyfile(script_source, util_script_path)
# Check initial state
# Execute test
res = subprocess.run(["python", script_path], capture_output=True)
# Check final state
expected_stderrs = test_configuration.get("expected_stderrs")
expected_stdouts = test_configuration.get("expected_stdouts")
expected_failure = test_configuration.get("expected_failure")
outs = res.stdout.decode("utf-8")
errs = res.stderr.decode("utf-8")
print(outs)
print(errs)
if expected_stderrs:
assert expected_stderrs == errs
if expected_stdouts:
assert expected_stdouts == outs
if expected_failure:
assert res.returncode != 0
else:
assert res.returncode == 0
except:
raise
finally:
os.chdir(workdir) | [
"def",
"_execute_integration_test",
"(",
"test_configuration",
",",
"tmp_path",
")",
":",
"assert",
"(",
"\"user_flow_script\"",
"in",
"test_configuration",
".",
"keys",
"(",
")",
")",
",",
"\"a `user_flow_script` is required\"",
"workdir",
"=",
"os",
".",
"getcwd",
"(",
")",
"try",
":",
"base_dir",
"=",
"test_configuration",
".",
"get",
"(",
"\"base_dir\"",
",",
"file_relative_path",
"(",
"__file__",
",",
"\"../../\"",
")",
")",
"os",
".",
"chdir",
"(",
"tmp_path",
")",
"# Ensure GE is installed in our environment",
"ge_requirement",
"=",
"test_configuration",
".",
"get",
"(",
"\"ge_requirement\"",
",",
"\"great_expectations\"",
")",
"execute_shell_command",
"(",
"f\"pip install {ge_requirement}\"",
")",
"#",
"# Build test state",
"#",
"# DataContext",
"if",
"test_configuration",
".",
"get",
"(",
"\"data_context_dir\"",
")",
":",
"context_source_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"base_dir",
",",
"test_configuration",
".",
"get",
"(",
"\"data_context_dir\"",
")",
")",
"test_context_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tmp_path",
",",
"\"great_expectations\"",
")",
"shutil",
".",
"copytree",
"(",
"context_source_dir",
",",
"test_context_dir",
",",
")",
"# Test Data",
"if",
"test_configuration",
".",
"get",
"(",
"\"data_dir\"",
")",
"is",
"not",
"None",
":",
"source_data_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"base_dir",
",",
"test_configuration",
".",
"get",
"(",
"\"data_dir\"",
")",
")",
"test_data_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tmp_path",
",",
"\"data\"",
")",
"shutil",
".",
"copytree",
"(",
"source_data_dir",
",",
"test_data_dir",
",",
")",
"# UAT Script",
"script_source",
"=",
"os",
".",
"path",
".",
"join",
"(",
"base_dir",
",",
"test_configuration",
".",
"get",
"(",
"\"user_flow_script\"",
")",
",",
")",
"script_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tmp_path",
",",
"\"test_script.py\"",
")",
"shutil",
".",
"copyfile",
"(",
"script_source",
",",
"script_path",
")",
"# Util Script",
"if",
"test_configuration",
".",
"get",
"(",
"\"util_script\"",
")",
"is",
"not",
"None",
":",
"script_source",
"=",
"os",
".",
"path",
".",
"join",
"(",
"base_dir",
",",
"test_configuration",
".",
"get",
"(",
"\"util_script\"",
")",
",",
")",
"util_script_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tmp_path",
",",
"\"util.py\"",
")",
"shutil",
".",
"copyfile",
"(",
"script_source",
",",
"util_script_path",
")",
"# Check initial state",
"# Execute test",
"res",
"=",
"subprocess",
".",
"run",
"(",
"[",
"\"python\"",
",",
"script_path",
"]",
",",
"capture_output",
"=",
"True",
")",
"# Check final state",
"expected_stderrs",
"=",
"test_configuration",
".",
"get",
"(",
"\"expected_stderrs\"",
")",
"expected_stdouts",
"=",
"test_configuration",
".",
"get",
"(",
"\"expected_stdouts\"",
")",
"expected_failure",
"=",
"test_configuration",
".",
"get",
"(",
"\"expected_failure\"",
")",
"outs",
"=",
"res",
".",
"stdout",
".",
"decode",
"(",
"\"utf-8\"",
")",
"errs",
"=",
"res",
".",
"stderr",
".",
"decode",
"(",
"\"utf-8\"",
")",
"print",
"(",
"outs",
")",
"print",
"(",
"errs",
")",
"if",
"expected_stderrs",
":",
"assert",
"expected_stderrs",
"==",
"errs",
"if",
"expected_stdouts",
":",
"assert",
"expected_stdouts",
"==",
"outs",
"if",
"expected_failure",
":",
"assert",
"res",
".",
"returncode",
"!=",
"0",
"else",
":",
"assert",
"res",
".",
"returncode",
"==",
"0",
"except",
":",
"raise",
"finally",
":",
"os",
".",
"chdir",
"(",
"workdir",
")"
] | [
238,
0
] | [
325,
25
] | python | en | ['en', 'error', 'th'] | False |
_check_for_skipped_tests | (pytest_args, test_configuration) | Enable scripts to be skipped based on pytest invocation flags. | Enable scripts to be skipped based on pytest invocation flags. | def _check_for_skipped_tests(pytest_args, test_configuration) -> None:
"""Enable scripts to be skipped based on pytest invocation flags."""
dependencies = test_configuration.get("extra_backend_dependencies", None)
if not dependencies:
return
elif dependencies == BackendDependencies.POSTGRESQL and (
pytest_args.no_postgresql or pytest_args.no_sqlalchemy
):
pytest.skip("Skipping postgres tests")
elif dependencies == BackendDependencies.MYSQL and (
not pytest_args.mysql or pytest_args.no_sqlalchemy
):
pytest.skip("Skipping mysql tests")
elif dependencies == BackendDependencies.MSSQL and (
pytest_args.no_mssql or pytest_args.no_sqlalchemy
):
pytest.skip("Skipping mssql tests")
elif dependencies == BackendDependencies.BIGQUERY and pytest_args.no_sqlalchemy:
pytest.skip("Skipping bigquery tests")
elif dependencies == BackendDependencies.REDSHIFT and pytest_args.no_sqlalchemy:
pytest.skip("Skipping redshift tests")
elif dependencies == BackendDependencies.SPARK and pytest_args.no_spark:
pytest.skip("Skipping spark tests")
elif dependencies == BackendDependencies.SNOWFLAKE and pytest_args.no_sqlalchemy:
pytest.skip("Skipping snowflake tests") | [
"def",
"_check_for_skipped_tests",
"(",
"pytest_args",
",",
"test_configuration",
")",
"->",
"None",
":",
"dependencies",
"=",
"test_configuration",
".",
"get",
"(",
"\"extra_backend_dependencies\"",
",",
"None",
")",
"if",
"not",
"dependencies",
":",
"return",
"elif",
"dependencies",
"==",
"BackendDependencies",
".",
"POSTGRESQL",
"and",
"(",
"pytest_args",
".",
"no_postgresql",
"or",
"pytest_args",
".",
"no_sqlalchemy",
")",
":",
"pytest",
".",
"skip",
"(",
"\"Skipping postgres tests\"",
")",
"elif",
"dependencies",
"==",
"BackendDependencies",
".",
"MYSQL",
"and",
"(",
"not",
"pytest_args",
".",
"mysql",
"or",
"pytest_args",
".",
"no_sqlalchemy",
")",
":",
"pytest",
".",
"skip",
"(",
"\"Skipping mysql tests\"",
")",
"elif",
"dependencies",
"==",
"BackendDependencies",
".",
"MSSQL",
"and",
"(",
"pytest_args",
".",
"no_mssql",
"or",
"pytest_args",
".",
"no_sqlalchemy",
")",
":",
"pytest",
".",
"skip",
"(",
"\"Skipping mssql tests\"",
")",
"elif",
"dependencies",
"==",
"BackendDependencies",
".",
"BIGQUERY",
"and",
"pytest_args",
".",
"no_sqlalchemy",
":",
"pytest",
".",
"skip",
"(",
"\"Skipping bigquery tests\"",
")",
"elif",
"dependencies",
"==",
"BackendDependencies",
".",
"REDSHIFT",
"and",
"pytest_args",
".",
"no_sqlalchemy",
":",
"pytest",
".",
"skip",
"(",
"\"Skipping redshift tests\"",
")",
"elif",
"dependencies",
"==",
"BackendDependencies",
".",
"SPARK",
"and",
"pytest_args",
".",
"no_spark",
":",
"pytest",
".",
"skip",
"(",
"\"Skipping spark tests\"",
")",
"elif",
"dependencies",
"==",
"BackendDependencies",
".",
"SNOWFLAKE",
"and",
"pytest_args",
".",
"no_sqlalchemy",
":",
"pytest",
".",
"skip",
"(",
"\"Skipping snowflake tests\"",
")"
] | [
328,
0
] | [
352,
47
] | python | en | ['en', 'en', 'en'] | True |
SimpleSemanticTypeColumnDomainBuilder.__init__ | (
self,
data_context: DataContext,
batch_request: Optional[Union[BatchRequest, dict]] = None,
semantic_types: Optional[
Union[str, SemanticDomainTypes, List[Union[str, SemanticDomainTypes]]]
] = None,
) |
Args:
data_context: DataContext
batch_request: specified in DomainBuilder configuration to get Batch objects for domain computation.
|
Args:
data_context: DataContext
batch_request: specified in DomainBuilder configuration to get Batch objects for domain computation.
| def __init__(
self,
data_context: DataContext,
batch_request: Optional[Union[BatchRequest, dict]] = None,
semantic_types: Optional[
Union[str, SemanticDomainTypes, List[Union[str, SemanticDomainTypes]]]
] = None,
):
"""
Args:
data_context: DataContext
batch_request: specified in DomainBuilder configuration to get Batch objects for domain computation.
"""
super().__init__(
data_context=data_context,
batch_request=batch_request,
)
if semantic_types is None:
semantic_types = []
self._semantic_types = semantic_types | [
"def",
"__init__",
"(",
"self",
",",
"data_context",
":",
"DataContext",
",",
"batch_request",
":",
"Optional",
"[",
"Union",
"[",
"BatchRequest",
",",
"dict",
"]",
"]",
"=",
"None",
",",
"semantic_types",
":",
"Optional",
"[",
"Union",
"[",
"str",
",",
"SemanticDomainTypes",
",",
"List",
"[",
"Union",
"[",
"str",
",",
"SemanticDomainTypes",
"]",
"]",
"]",
"]",
"=",
"None",
",",
")",
":",
"super",
"(",
")",
".",
"__init__",
"(",
"data_context",
"=",
"data_context",
",",
"batch_request",
"=",
"batch_request",
",",
")",
"if",
"semantic_types",
"is",
"None",
":",
"semantic_types",
"=",
"[",
"]",
"self",
".",
"_semantic_types",
"=",
"semantic_types"
] | [
22,
4
] | [
43,
45
] | python | en | ['en', 'error', 'th'] | False |
SimpleSemanticTypeColumnDomainBuilder._get_domains | (
self,
variables: Optional[ParameterContainer] = None,
) |
Find the semantic column type for each column and return all domains matching the specified type or types.
|
Find the semantic column type for each column and return all domains matching the specified type or types.
| def _get_domains(
self,
variables: Optional[ParameterContainer] = None,
) -> List[Domain]:
"""
Find the semantic column type for each column and return all domains matching the specified type or types.
"""
semantic_types: List[
SemanticDomainTypes
] = _parse_semantic_domain_type_argument(semantic_types=self._semantic_types)
batch_id: str = self.get_batch_id(variables=variables)
column_types_dict_list: List[Dict[str, Any]] = self.get_validator(
variables=variables
).get_metric(
metric=MetricConfiguration(
metric_name="table.column_types",
metric_domain_kwargs={
"batch_id": batch_id,
},
metric_value_kwargs={
"include_nested": True,
},
metric_dependencies=None,
)
)
table_column_names: List[str] = self.get_validator(
variables=variables
).get_metric(
metric=MetricConfiguration(
metric_name="table.columns",
metric_domain_kwargs={
"batch_id": batch_id,
},
metric_value_kwargs=None,
metric_dependencies=None,
)
)
column_name: str
# A semantic type is distinguished from the structured column type;
# An example structured column type would be "integer". The inferred semantic type would be "id".
table_column_name_to_inferred_semantic_domain_type_mapping: Dict[
str, SemanticDomainTypes
] = {
column_name: self.infer_semantic_domain_type_from_table_column_type(
column_types_dict_list=column_types_dict_list,
column_name=column_name,
).semantic_domain_type
for column_name in table_column_names
}
candidate_column_names: List[str] = list(
filter(
lambda candidate_column_name: table_column_name_to_inferred_semantic_domain_type_mapping[
candidate_column_name
]
in semantic_types,
table_column_names,
)
)
domains: List[Domain] = [
Domain(
domain_type=MetricDomainTypes.COLUMN,
domain_kwargs={
"column": column_name,
},
details={
"inferred_semantic_domain_type": table_column_name_to_inferred_semantic_domain_type_mapping[
column_name
],
},
)
for column_name in candidate_column_names
]
return domains | [
"def",
"_get_domains",
"(",
"self",
",",
"variables",
":",
"Optional",
"[",
"ParameterContainer",
"]",
"=",
"None",
",",
")",
"->",
"List",
"[",
"Domain",
"]",
":",
"semantic_types",
":",
"List",
"[",
"SemanticDomainTypes",
"]",
"=",
"_parse_semantic_domain_type_argument",
"(",
"semantic_types",
"=",
"self",
".",
"_semantic_types",
")",
"batch_id",
":",
"str",
"=",
"self",
".",
"get_batch_id",
"(",
"variables",
"=",
"variables",
")",
"column_types_dict_list",
":",
"List",
"[",
"Dict",
"[",
"str",
",",
"Any",
"]",
"]",
"=",
"self",
".",
"get_validator",
"(",
"variables",
"=",
"variables",
")",
".",
"get_metric",
"(",
"metric",
"=",
"MetricConfiguration",
"(",
"metric_name",
"=",
"\"table.column_types\"",
",",
"metric_domain_kwargs",
"=",
"{",
"\"batch_id\"",
":",
"batch_id",
",",
"}",
",",
"metric_value_kwargs",
"=",
"{",
"\"include_nested\"",
":",
"True",
",",
"}",
",",
"metric_dependencies",
"=",
"None",
",",
")",
")",
"table_column_names",
":",
"List",
"[",
"str",
"]",
"=",
"self",
".",
"get_validator",
"(",
"variables",
"=",
"variables",
")",
".",
"get_metric",
"(",
"metric",
"=",
"MetricConfiguration",
"(",
"metric_name",
"=",
"\"table.columns\"",
",",
"metric_domain_kwargs",
"=",
"{",
"\"batch_id\"",
":",
"batch_id",
",",
"}",
",",
"metric_value_kwargs",
"=",
"None",
",",
"metric_dependencies",
"=",
"None",
",",
")",
")",
"column_name",
":",
"str",
"# A semantic type is distinguished from the structured column type;",
"# An example structured column type would be \"integer\". The inferred semantic type would be \"id\".",
"table_column_name_to_inferred_semantic_domain_type_mapping",
":",
"Dict",
"[",
"str",
",",
"SemanticDomainTypes",
"]",
"=",
"{",
"column_name",
":",
"self",
".",
"infer_semantic_domain_type_from_table_column_type",
"(",
"column_types_dict_list",
"=",
"column_types_dict_list",
",",
"column_name",
"=",
"column_name",
",",
")",
".",
"semantic_domain_type",
"for",
"column_name",
"in",
"table_column_names",
"}",
"candidate_column_names",
":",
"List",
"[",
"str",
"]",
"=",
"list",
"(",
"filter",
"(",
"lambda",
"candidate_column_name",
":",
"table_column_name_to_inferred_semantic_domain_type_mapping",
"[",
"candidate_column_name",
"]",
"in",
"semantic_types",
",",
"table_column_names",
",",
")",
")",
"domains",
":",
"List",
"[",
"Domain",
"]",
"=",
"[",
"Domain",
"(",
"domain_type",
"=",
"MetricDomainTypes",
".",
"COLUMN",
",",
"domain_kwargs",
"=",
"{",
"\"column\"",
":",
"column_name",
",",
"}",
",",
"details",
"=",
"{",
"\"inferred_semantic_domain_type\"",
":",
"table_column_name_to_inferred_semantic_domain_type_mapping",
"[",
"column_name",
"]",
",",
"}",
",",
")",
"for",
"column_name",
"in",
"candidate_column_names",
"]",
"return",
"domains"
] | [
45,
4
] | [
123,
22
] | python | en | ['en', 'error', 'th'] | False |
FARMReader.__init__ | (
self,
model_name_or_path: Union[str, Path],
model_version: Optional[str] = None,
context_window_size: int = 150,
batch_size: int = 50,
use_gpu: bool = True,
no_ans_boost: float = 0.0,
return_no_answer: bool = False,
top_k_per_candidate: int = 3,
top_k_per_sample: int = 1,
num_processes: Optional[int] = None,
max_seq_len: int = 256,
doc_stride: int = 128,
progress_bar: bool = True
) |
:param model_name_or_path: Directory of a saved model or the name of a public model e.g. 'bert-base-cased',
'deepset/bert-base-cased-squad2', 'deepset/bert-base-cased-squad2', 'distilbert-base-uncased-distilled-squad'.
See https://huggingface.co/models for full list of available models.
:param model_version: The version of model to use from the HuggingFace model hub. Can be tag name, branch name, or commit hash.
:param context_window_size: The size, in characters, of the window around the answer span that is used when
displaying the context around the answer.
:param batch_size: Number of samples the model receives in one batch for inference.
Memory consumption is much lower in inference mode. Recommendation: Increase the batch size
to a value so only a single batch is used.
:param use_gpu: Whether to use GPU (if available)
:param no_ans_boost: How much the no_answer logit is boosted/increased.
If set to 0 (default), the no_answer logit is not changed.
If a negative number, there is a lower chance of "no_answer" being predicted.
If a positive number, there is an increased chance of "no_answer"
:param return_no_answer: Whether to include no_answer predictions in the results.
:param top_k_per_candidate: How many answers to extract for each candidate doc that is coming from the retriever (might be a long text).
Note that this is not the number of "final answers" you will receive
(see `top_k` in FARMReader.predict() or Finder.get_answers() for that)
and that FARM includes no_answer in the sorted list of predictions.
:param top_k_per_sample: How many answers to extract from each small text passage that the model can process at once
(one "candidate doc" is usually split into many smaller "passages").
You usually want a very small value here, as it slows down inference
and you don't gain much of quality by having multiple answers from one passage.
Note that this is not the number of "final answers" you will receive
(see `top_k` in FARMReader.predict() or Finder.get_answers() for that)
and that FARM includes no_answer in the sorted list of predictions.
:param num_processes: The number of processes for `multiprocessing.Pool`. Set to value of 0 to disable
multiprocessing. Set to None to let Inferencer determine optimum number. If you
want to debug the Language Model, you might need to disable multiprocessing!
:param max_seq_len: Max sequence length of one input text for the model
:param doc_stride: Length of striding window for splitting long texts (used if ``len(text) > max_seq_len``)
:param progress_bar: Whether to show a tqdm progress bar or not.
Can be helpful to disable in production deployments to keep the logs clean.
|
:param model_name_or_path: Directory of a saved model or the name of a public model e.g. 'bert-base-cased',
'deepset/bert-base-cased-squad2', 'deepset/bert-base-cased-squad2', 'distilbert-base-uncased-distilled-squad'.
See https://huggingface.co/models for full list of available models.
:param model_version: The version of model to use from the HuggingFace model hub. Can be tag name, branch name, or commit hash.
:param context_window_size: The size, in characters, of the window around the answer span that is used when
displaying the context around the answer.
:param batch_size: Number of samples the model receives in one batch for inference.
Memory consumption is much lower in inference mode. Recommendation: Increase the batch size
to a value so only a single batch is used.
:param use_gpu: Whether to use GPU (if available)
:param no_ans_boost: How much the no_answer logit is boosted/increased.
If set to 0 (default), the no_answer logit is not changed.
If a negative number, there is a lower chance of "no_answer" being predicted.
If a positive number, there is an increased chance of "no_answer"
:param return_no_answer: Whether to include no_answer predictions in the results.
:param top_k_per_candidate: How many answers to extract for each candidate doc that is coming from the retriever (might be a long text).
Note that this is not the number of "final answers" you will receive
(see `top_k` in FARMReader.predict() or Finder.get_answers() for that)
and that FARM includes no_answer in the sorted list of predictions.
:param top_k_per_sample: How many answers to extract from each small text passage that the model can process at once
(one "candidate doc" is usually split into many smaller "passages").
You usually want a very small value here, as it slows down inference
and you don't gain much of quality by having multiple answers from one passage.
Note that this is not the number of "final answers" you will receive
(see `top_k` in FARMReader.predict() or Finder.get_answers() for that)
and that FARM includes no_answer in the sorted list of predictions.
:param num_processes: The number of processes for `multiprocessing.Pool`. Set to value of 0 to disable
multiprocessing. Set to None to let Inferencer determine optimum number. If you
want to debug the Language Model, you might need to disable multiprocessing!
:param max_seq_len: Max sequence length of one input text for the model
:param doc_stride: Length of striding window for splitting long texts (used if ``len(text) > max_seq_len``)
:param progress_bar: Whether to show a tqdm progress bar or not.
Can be helpful to disable in production deployments to keep the logs clean.
| def __init__(
self,
model_name_or_path: Union[str, Path],
model_version: Optional[str] = None,
context_window_size: int = 150,
batch_size: int = 50,
use_gpu: bool = True,
no_ans_boost: float = 0.0,
return_no_answer: bool = False,
top_k_per_candidate: int = 3,
top_k_per_sample: int = 1,
num_processes: Optional[int] = None,
max_seq_len: int = 256,
doc_stride: int = 128,
progress_bar: bool = True
):
"""
:param model_name_or_path: Directory of a saved model or the name of a public model e.g. 'bert-base-cased',
'deepset/bert-base-cased-squad2', 'deepset/bert-base-cased-squad2', 'distilbert-base-uncased-distilled-squad'.
See https://huggingface.co/models for full list of available models.
:param model_version: The version of model to use from the HuggingFace model hub. Can be tag name, branch name, or commit hash.
:param context_window_size: The size, in characters, of the window around the answer span that is used when
displaying the context around the answer.
:param batch_size: Number of samples the model receives in one batch for inference.
Memory consumption is much lower in inference mode. Recommendation: Increase the batch size
to a value so only a single batch is used.
:param use_gpu: Whether to use GPU (if available)
:param no_ans_boost: How much the no_answer logit is boosted/increased.
If set to 0 (default), the no_answer logit is not changed.
If a negative number, there is a lower chance of "no_answer" being predicted.
If a positive number, there is an increased chance of "no_answer"
:param return_no_answer: Whether to include no_answer predictions in the results.
:param top_k_per_candidate: How many answers to extract for each candidate doc that is coming from the retriever (might be a long text).
Note that this is not the number of "final answers" you will receive
(see `top_k` in FARMReader.predict() or Finder.get_answers() for that)
and that FARM includes no_answer in the sorted list of predictions.
:param top_k_per_sample: How many answers to extract from each small text passage that the model can process at once
(one "candidate doc" is usually split into many smaller "passages").
You usually want a very small value here, as it slows down inference
and you don't gain much of quality by having multiple answers from one passage.
Note that this is not the number of "final answers" you will receive
(see `top_k` in FARMReader.predict() or Finder.get_answers() for that)
and that FARM includes no_answer in the sorted list of predictions.
:param num_processes: The number of processes for `multiprocessing.Pool`. Set to value of 0 to disable
multiprocessing. Set to None to let Inferencer determine optimum number. If you
want to debug the Language Model, you might need to disable multiprocessing!
:param max_seq_len: Max sequence length of one input text for the model
:param doc_stride: Length of striding window for splitting long texts (used if ``len(text) > max_seq_len``)
:param progress_bar: Whether to show a tqdm progress bar or not.
Can be helpful to disable in production deployments to keep the logs clean.
"""
self.return_no_answers = return_no_answer
self.top_k_per_candidate = top_k_per_candidate
self.inferencer = QAInferencer.load(model_name_or_path, batch_size=batch_size, gpu=use_gpu,
task_type="question_answering", max_seq_len=max_seq_len,
doc_stride=doc_stride, num_processes=num_processes, revision=model_version,
disable_tqdm=not progress_bar)
self.inferencer.model.prediction_heads[0].context_window_size = context_window_size
self.inferencer.model.prediction_heads[0].no_ans_boost = no_ans_boost
self.inferencer.model.prediction_heads[0].n_best = top_k_per_candidate + 1 # including possible no_answer
try:
self.inferencer.model.prediction_heads[0].n_best_per_sample = top_k_per_sample
except:
logger.warning("Could not set `top_k_per_sample` in FARM. Please update FARM version.")
self.max_seq_len = max_seq_len
self.use_gpu = use_gpu
self.progress_bar = progress_bar | [
"def",
"__init__",
"(",
"self",
",",
"model_name_or_path",
":",
"Union",
"[",
"str",
",",
"Path",
"]",
",",
"model_version",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"context_window_size",
":",
"int",
"=",
"150",
",",
"batch_size",
":",
"int",
"=",
"50",
",",
"use_gpu",
":",
"bool",
"=",
"True",
",",
"no_ans_boost",
":",
"float",
"=",
"0.0",
",",
"return_no_answer",
":",
"bool",
"=",
"False",
",",
"top_k_per_candidate",
":",
"int",
"=",
"3",
",",
"top_k_per_sample",
":",
"int",
"=",
"1",
",",
"num_processes",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
",",
"max_seq_len",
":",
"int",
"=",
"256",
",",
"doc_stride",
":",
"int",
"=",
"128",
",",
"progress_bar",
":",
"bool",
"=",
"True",
")",
":",
"self",
".",
"return_no_answers",
"=",
"return_no_answer",
"self",
".",
"top_k_per_candidate",
"=",
"top_k_per_candidate",
"self",
".",
"inferencer",
"=",
"QAInferencer",
".",
"load",
"(",
"model_name_or_path",
",",
"batch_size",
"=",
"batch_size",
",",
"gpu",
"=",
"use_gpu",
",",
"task_type",
"=",
"\"question_answering\"",
",",
"max_seq_len",
"=",
"max_seq_len",
",",
"doc_stride",
"=",
"doc_stride",
",",
"num_processes",
"=",
"num_processes",
",",
"revision",
"=",
"model_version",
",",
"disable_tqdm",
"=",
"not",
"progress_bar",
")",
"self",
".",
"inferencer",
".",
"model",
".",
"prediction_heads",
"[",
"0",
"]",
".",
"context_window_size",
"=",
"context_window_size",
"self",
".",
"inferencer",
".",
"model",
".",
"prediction_heads",
"[",
"0",
"]",
".",
"no_ans_boost",
"=",
"no_ans_boost",
"self",
".",
"inferencer",
".",
"model",
".",
"prediction_heads",
"[",
"0",
"]",
".",
"n_best",
"=",
"top_k_per_candidate",
"+",
"1",
"# including possible no_answer",
"try",
":",
"self",
".",
"inferencer",
".",
"model",
".",
"prediction_heads",
"[",
"0",
"]",
".",
"n_best_per_sample",
"=",
"top_k_per_sample",
"except",
":",
"logger",
".",
"warning",
"(",
"\"Could not set `top_k_per_sample` in FARM. Please update FARM version.\"",
")",
"self",
".",
"max_seq_len",
"=",
"max_seq_len",
"self",
".",
"use_gpu",
"=",
"use_gpu",
"self",
".",
"progress_bar",
"=",
"progress_bar"
] | [
40,
4
] | [
108,
40
] | python | en | ['en', 'error', 'th'] | False |
FARMReader.train | (
self,
data_dir: str,
train_filename: str,
dev_filename: Optional[str] = None,
test_filename: Optional[str] = None,
use_gpu: Optional[bool] = None,
batch_size: int = 10,
n_epochs: int = 2,
learning_rate: float = 1e-5,
max_seq_len: Optional[int] = None,
warmup_proportion: float = 0.2,
dev_split: float = 0,
evaluate_every: int = 300,
save_dir: Optional[str] = None,
num_processes: Optional[int] = None,
use_amp: str = None,
) |
Fine-tune a model on a QA dataset. Options:
- Take a plain language model (e.g. `bert-base-cased`) and train it for QA (e.g. on SQuAD data)
- Take a QA model (e.g. `deepset/bert-base-cased-squad2`) and fine-tune it for your domain (e.g. using your labels collected via the haystack annotation tool)
:param data_dir: Path to directory containing your training data in SQuAD style
:param train_filename: Filename of training data
:param dev_filename: Filename of dev / eval data
:param test_filename: Filename of test data
:param dev_split: Instead of specifying a dev_filename, you can also specify a ratio (e.g. 0.1) here
that gets split off from training data for eval.
:param use_gpu: Whether to use GPU (if available)
:param batch_size: Number of samples the model receives in one batch for training
:param n_epochs: Number of iterations on the whole training data set
:param learning_rate: Learning rate of the optimizer
:param max_seq_len: Maximum text length (in tokens). Everything longer gets cut down.
:param warmup_proportion: Proportion of training steps until maximum learning rate is reached.
Until that point LR is increasing linearly. After that it's decreasing again linearly.
Options for different schedules are available in FARM.
:param evaluate_every: Evaluate the model every X steps on the hold-out eval dataset
:param save_dir: Path to store the final model
:param num_processes: The number of processes for `multiprocessing.Pool` during preprocessing.
Set to value of 1 to disable multiprocessing. When set to 1, you cannot split away a dev set from train set.
Set to None to use all CPU cores minus one.
:param use_amp: Optimization level of NVIDIA's automatic mixed precision (AMP). The higher the level, the faster the model.
Available options:
None (Don't use AMP)
"O0" (Normal FP32 training)
"O1" (Mixed Precision => Recommended)
"O2" (Almost FP16)
"O3" (Pure FP16).
See details on: https://nvidia.github.io/apex/amp.html
:return: None
|
Fine-tune a model on a QA dataset. Options: | def train(
self,
data_dir: str,
train_filename: str,
dev_filename: Optional[str] = None,
test_filename: Optional[str] = None,
use_gpu: Optional[bool] = None,
batch_size: int = 10,
n_epochs: int = 2,
learning_rate: float = 1e-5,
max_seq_len: Optional[int] = None,
warmup_proportion: float = 0.2,
dev_split: float = 0,
evaluate_every: int = 300,
save_dir: Optional[str] = None,
num_processes: Optional[int] = None,
use_amp: str = None,
):
"""
Fine-tune a model on a QA dataset. Options:
- Take a plain language model (e.g. `bert-base-cased`) and train it for QA (e.g. on SQuAD data)
- Take a QA model (e.g. `deepset/bert-base-cased-squad2`) and fine-tune it for your domain (e.g. using your labels collected via the haystack annotation tool)
:param data_dir: Path to directory containing your training data in SQuAD style
:param train_filename: Filename of training data
:param dev_filename: Filename of dev / eval data
:param test_filename: Filename of test data
:param dev_split: Instead of specifying a dev_filename, you can also specify a ratio (e.g. 0.1) here
that gets split off from training data for eval.
:param use_gpu: Whether to use GPU (if available)
:param batch_size: Number of samples the model receives in one batch for training
:param n_epochs: Number of iterations on the whole training data set
:param learning_rate: Learning rate of the optimizer
:param max_seq_len: Maximum text length (in tokens). Everything longer gets cut down.
:param warmup_proportion: Proportion of training steps until maximum learning rate is reached.
Until that point LR is increasing linearly. After that it's decreasing again linearly.
Options for different schedules are available in FARM.
:param evaluate_every: Evaluate the model every X steps on the hold-out eval dataset
:param save_dir: Path to store the final model
:param num_processes: The number of processes for `multiprocessing.Pool` during preprocessing.
Set to value of 1 to disable multiprocessing. When set to 1, you cannot split away a dev set from train set.
Set to None to use all CPU cores minus one.
:param use_amp: Optimization level of NVIDIA's automatic mixed precision (AMP). The higher the level, the faster the model.
Available options:
None (Don't use AMP)
"O0" (Normal FP32 training)
"O1" (Mixed Precision => Recommended)
"O2" (Almost FP16)
"O3" (Pure FP16).
See details on: https://nvidia.github.io/apex/amp.html
:return: None
"""
if dev_filename:
dev_split = 0
if num_processes is None:
num_processes = multiprocessing.cpu_count() - 1 or 1
set_all_seeds(seed=42)
# For these variables, by default, we use the value set when initializing the FARMReader.
# These can also be manually set when train() is called if you want a different value at train vs inference
if use_gpu is None:
use_gpu = self.use_gpu
if max_seq_len is None:
max_seq_len = self.max_seq_len
device, n_gpu = initialize_device_settings(use_cuda=use_gpu,use_amp=use_amp)
if not save_dir:
save_dir = f"../../saved_models/{self.inferencer.model.language_model.name}"
# 1. Create a DataProcessor that handles all the conversion from raw text into a pytorch Dataset
label_list = ["start_token", "end_token"]
metric = "squad"
processor = SquadProcessor(
tokenizer=self.inferencer.processor.tokenizer,
max_seq_len=max_seq_len,
label_list=label_list,
metric=metric,
train_filename=train_filename,
dev_filename=dev_filename,
dev_split=dev_split,
test_filename=test_filename,
data_dir=Path(data_dir),
)
# 2. Create a DataSilo that loads several datasets (train/dev/test), provides DataLoaders for them
# and calculates a few descriptive statistics of our datasets
data_silo = DataSilo(processor=processor, batch_size=batch_size, distributed=False, max_processes=num_processes)
# Quick-fix until this is fixed upstream in FARM:
# We must avoid applying DataParallel twice (once when loading the inferencer,
# once when calling initalize_optimizer)
self.inferencer.model.save("tmp_model")
model = BaseAdaptiveModel.load(load_dir="tmp_model", device=device, strict=True)
shutil.rmtree('tmp_model')
# 3. Create an optimizer and pass the already initialized model
model, optimizer, lr_schedule = initialize_optimizer(
model=model,
# model=self.inferencer.model,
learning_rate=learning_rate,
schedule_opts={"name": "LinearWarmup", "warmup_proportion": warmup_proportion},
n_batches=len(data_silo.loaders["train"]),
n_epochs=n_epochs,
device=device,
use_amp=use_amp,
)
# 4. Feed everything to the Trainer, which keeps care of growing our model and evaluates it from time to time
trainer = Trainer(
model=model,
optimizer=optimizer,
data_silo=data_silo,
epochs=n_epochs,
n_gpu=n_gpu,
lr_schedule=lr_schedule,
evaluate_every=evaluate_every,
device=device,
use_amp=use_amp,
disable_tqdm=not self.progress_bar
)
# 5. Let it grow!
self.inferencer.model = trainer.train()
self.save(Path(save_dir)) | [
"def",
"train",
"(",
"self",
",",
"data_dir",
":",
"str",
",",
"train_filename",
":",
"str",
",",
"dev_filename",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"test_filename",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"use_gpu",
":",
"Optional",
"[",
"bool",
"]",
"=",
"None",
",",
"batch_size",
":",
"int",
"=",
"10",
",",
"n_epochs",
":",
"int",
"=",
"2",
",",
"learning_rate",
":",
"float",
"=",
"1e-5",
",",
"max_seq_len",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
",",
"warmup_proportion",
":",
"float",
"=",
"0.2",
",",
"dev_split",
":",
"float",
"=",
"0",
",",
"evaluate_every",
":",
"int",
"=",
"300",
",",
"save_dir",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"num_processes",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
",",
"use_amp",
":",
"str",
"=",
"None",
",",
")",
":",
"if",
"dev_filename",
":",
"dev_split",
"=",
"0",
"if",
"num_processes",
"is",
"None",
":",
"num_processes",
"=",
"multiprocessing",
".",
"cpu_count",
"(",
")",
"-",
"1",
"or",
"1",
"set_all_seeds",
"(",
"seed",
"=",
"42",
")",
"# For these variables, by default, we use the value set when initializing the FARMReader.",
"# These can also be manually set when train() is called if you want a different value at train vs inference",
"if",
"use_gpu",
"is",
"None",
":",
"use_gpu",
"=",
"self",
".",
"use_gpu",
"if",
"max_seq_len",
"is",
"None",
":",
"max_seq_len",
"=",
"self",
".",
"max_seq_len",
"device",
",",
"n_gpu",
"=",
"initialize_device_settings",
"(",
"use_cuda",
"=",
"use_gpu",
",",
"use_amp",
"=",
"use_amp",
")",
"if",
"not",
"save_dir",
":",
"save_dir",
"=",
"f\"../../saved_models/{self.inferencer.model.language_model.name}\"",
"# 1. Create a DataProcessor that handles all the conversion from raw text into a pytorch Dataset",
"label_list",
"=",
"[",
"\"start_token\"",
",",
"\"end_token\"",
"]",
"metric",
"=",
"\"squad\"",
"processor",
"=",
"SquadProcessor",
"(",
"tokenizer",
"=",
"self",
".",
"inferencer",
".",
"processor",
".",
"tokenizer",
",",
"max_seq_len",
"=",
"max_seq_len",
",",
"label_list",
"=",
"label_list",
",",
"metric",
"=",
"metric",
",",
"train_filename",
"=",
"train_filename",
",",
"dev_filename",
"=",
"dev_filename",
",",
"dev_split",
"=",
"dev_split",
",",
"test_filename",
"=",
"test_filename",
",",
"data_dir",
"=",
"Path",
"(",
"data_dir",
")",
",",
")",
"# 2. Create a DataSilo that loads several datasets (train/dev/test), provides DataLoaders for them",
"# and calculates a few descriptive statistics of our datasets",
"data_silo",
"=",
"DataSilo",
"(",
"processor",
"=",
"processor",
",",
"batch_size",
"=",
"batch_size",
",",
"distributed",
"=",
"False",
",",
"max_processes",
"=",
"num_processes",
")",
"# Quick-fix until this is fixed upstream in FARM:",
"# We must avoid applying DataParallel twice (once when loading the inferencer,",
"# once when calling initalize_optimizer)",
"self",
".",
"inferencer",
".",
"model",
".",
"save",
"(",
"\"tmp_model\"",
")",
"model",
"=",
"BaseAdaptiveModel",
".",
"load",
"(",
"load_dir",
"=",
"\"tmp_model\"",
",",
"device",
"=",
"device",
",",
"strict",
"=",
"True",
")",
"shutil",
".",
"rmtree",
"(",
"'tmp_model'",
")",
"# 3. Create an optimizer and pass the already initialized model",
"model",
",",
"optimizer",
",",
"lr_schedule",
"=",
"initialize_optimizer",
"(",
"model",
"=",
"model",
",",
"# model=self.inferencer.model,",
"learning_rate",
"=",
"learning_rate",
",",
"schedule_opts",
"=",
"{",
"\"name\"",
":",
"\"LinearWarmup\"",
",",
"\"warmup_proportion\"",
":",
"warmup_proportion",
"}",
",",
"n_batches",
"=",
"len",
"(",
"data_silo",
".",
"loaders",
"[",
"\"train\"",
"]",
")",
",",
"n_epochs",
"=",
"n_epochs",
",",
"device",
"=",
"device",
",",
"use_amp",
"=",
"use_amp",
",",
")",
"# 4. Feed everything to the Trainer, which keeps care of growing our model and evaluates it from time to time",
"trainer",
"=",
"Trainer",
"(",
"model",
"=",
"model",
",",
"optimizer",
"=",
"optimizer",
",",
"data_silo",
"=",
"data_silo",
",",
"epochs",
"=",
"n_epochs",
",",
"n_gpu",
"=",
"n_gpu",
",",
"lr_schedule",
"=",
"lr_schedule",
",",
"evaluate_every",
"=",
"evaluate_every",
",",
"device",
"=",
"device",
",",
"use_amp",
"=",
"use_amp",
",",
"disable_tqdm",
"=",
"not",
"self",
".",
"progress_bar",
")",
"# 5. Let it grow!",
"self",
".",
"inferencer",
".",
"model",
"=",
"trainer",
".",
"train",
"(",
")",
"self",
".",
"save",
"(",
"Path",
"(",
"save_dir",
")",
")"
] | [
110,
4
] | [
238,
33
] | python | en | ['en', 'error', 'th'] | False |
FARMReader.update_parameters | (
self,
context_window_size: Optional[int] = None,
no_ans_boost: Optional[float] = None,
return_no_answer: Optional[bool] = None,
max_seq_len: Optional[int] = None,
doc_stride: Optional[int] = None,
) |
Hot update parameters of a loaded Reader. It may not to be safe when processing concurrent requests.
|
Hot update parameters of a loaded Reader. It may not to be safe when processing concurrent requests.
| def update_parameters(
self,
context_window_size: Optional[int] = None,
no_ans_boost: Optional[float] = None,
return_no_answer: Optional[bool] = None,
max_seq_len: Optional[int] = None,
doc_stride: Optional[int] = None,
):
"""
Hot update parameters of a loaded Reader. It may not to be safe when processing concurrent requests.
"""
if no_ans_boost is not None:
self.inferencer.model.prediction_heads[0].no_ans_boost = no_ans_boost
if return_no_answer is not None:
self.return_no_answers = return_no_answer
if doc_stride is not None:
self.inferencer.processor.doc_stride = doc_stride
if context_window_size is not None:
self.inferencer.model.prediction_heads[0].context_window_size = context_window_size
if max_seq_len is not None:
self.inferencer.processor.max_seq_len = max_seq_len
self.max_seq_len = max_seq_len | [
"def",
"update_parameters",
"(",
"self",
",",
"context_window_size",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
",",
"no_ans_boost",
":",
"Optional",
"[",
"float",
"]",
"=",
"None",
",",
"return_no_answer",
":",
"Optional",
"[",
"bool",
"]",
"=",
"None",
",",
"max_seq_len",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
",",
"doc_stride",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
",",
")",
":",
"if",
"no_ans_boost",
"is",
"not",
"None",
":",
"self",
".",
"inferencer",
".",
"model",
".",
"prediction_heads",
"[",
"0",
"]",
".",
"no_ans_boost",
"=",
"no_ans_boost",
"if",
"return_no_answer",
"is",
"not",
"None",
":",
"self",
".",
"return_no_answers",
"=",
"return_no_answer",
"if",
"doc_stride",
"is",
"not",
"None",
":",
"self",
".",
"inferencer",
".",
"processor",
".",
"doc_stride",
"=",
"doc_stride",
"if",
"context_window_size",
"is",
"not",
"None",
":",
"self",
".",
"inferencer",
".",
"model",
".",
"prediction_heads",
"[",
"0",
"]",
".",
"context_window_size",
"=",
"context_window_size",
"if",
"max_seq_len",
"is",
"not",
"None",
":",
"self",
".",
"inferencer",
".",
"processor",
".",
"max_seq_len",
"=",
"max_seq_len",
"self",
".",
"max_seq_len",
"=",
"max_seq_len"
] | [
240,
4
] | [
261,
42
] | python | en | ['en', 'error', 'th'] | False |
FARMReader.save | (self, directory: Path) |
Saves the Reader model so that it can be reused at a later point in time.
:param directory: Directory where the Reader model should be saved
|
Saves the Reader model so that it can be reused at a later point in time. | def save(self, directory: Path):
"""
Saves the Reader model so that it can be reused at a later point in time.
:param directory: Directory where the Reader model should be saved
"""
logger.info(f"Saving reader model to {directory}")
self.inferencer.model.save(directory)
self.inferencer.processor.save(directory) | [
"def",
"save",
"(",
"self",
",",
"directory",
":",
"Path",
")",
":",
"logger",
".",
"info",
"(",
"f\"Saving reader model to {directory}\"",
")",
"self",
".",
"inferencer",
".",
"model",
".",
"save",
"(",
"directory",
")",
"self",
".",
"inferencer",
".",
"processor",
".",
"save",
"(",
"directory",
")"
] | [
263,
4
] | [
271,
49
] | python | en | ['en', 'error', 'th'] | False |
FARMReader.predict_batch | (self, query_doc_list: List[dict], top_k: int = None, batch_size: int = None) |
Use loaded QA model to find answers for a list of queries in each query's supplied list of Document.
Returns list of dictionaries containing answers sorted by (desc.) probability
:param query_doc_list: List of dictionaries containing queries with their retrieved documents
:param top_k: The maximum number of answers to return for each query
:param batch_size: Number of samples the model receives in one batch for inference
:return: List of dictionaries containing query and answers
|
Use loaded QA model to find answers for a list of queries in each query's supplied list of Document. | def predict_batch(self, query_doc_list: List[dict], top_k: int = None, batch_size: int = None):
"""
Use loaded QA model to find answers for a list of queries in each query's supplied list of Document.
Returns list of dictionaries containing answers sorted by (desc.) probability
:param query_doc_list: List of dictionaries containing queries with their retrieved documents
:param top_k: The maximum number of answers to return for each query
:param batch_size: Number of samples the model receives in one batch for inference
:return: List of dictionaries containing query and answers
"""
# convert input to FARM format
inputs = []
number_of_docs = []
labels = []
# build input objects for inference_from_objects
for query_with_docs in query_doc_list:
documents = query_with_docs["docs"]
query = query_with_docs["question"]
labels.append(query)
number_of_docs.append(len(documents))
for doc in documents:
cur = QAInput(doc_text=doc.text,
questions=Question(text=query.question,
uid=doc.id))
inputs.append(cur)
self.inferencer.batch_size = batch_size
# make predictions on all document-query pairs
predictions = self.inferencer.inference_from_objects(
objects=inputs, return_json=False, multiprocessing_chunksize=1
)
# group predictions together
grouped_predictions = []
left_idx = 0
right_idx = 0
for number in number_of_docs:
right_idx = left_idx + number
grouped_predictions.append(predictions[left_idx:right_idx])
left_idx = right_idx
result = []
for idx, group in enumerate(grouped_predictions):
answers, max_no_ans_gap = self._extract_answers_of_predictions(group, top_k)
query = group[0].question
cur_label = labels[idx]
result.append({
"query": query,
"no_ans_gap": max_no_ans_gap,
"answers": answers,
"label": cur_label
})
return result | [
"def",
"predict_batch",
"(",
"self",
",",
"query_doc_list",
":",
"List",
"[",
"dict",
"]",
",",
"top_k",
":",
"int",
"=",
"None",
",",
"batch_size",
":",
"int",
"=",
"None",
")",
":",
"# convert input to FARM format",
"inputs",
"=",
"[",
"]",
"number_of_docs",
"=",
"[",
"]",
"labels",
"=",
"[",
"]",
"# build input objects for inference_from_objects",
"for",
"query_with_docs",
"in",
"query_doc_list",
":",
"documents",
"=",
"query_with_docs",
"[",
"\"docs\"",
"]",
"query",
"=",
"query_with_docs",
"[",
"\"question\"",
"]",
"labels",
".",
"append",
"(",
"query",
")",
"number_of_docs",
".",
"append",
"(",
"len",
"(",
"documents",
")",
")",
"for",
"doc",
"in",
"documents",
":",
"cur",
"=",
"QAInput",
"(",
"doc_text",
"=",
"doc",
".",
"text",
",",
"questions",
"=",
"Question",
"(",
"text",
"=",
"query",
".",
"question",
",",
"uid",
"=",
"doc",
".",
"id",
")",
")",
"inputs",
".",
"append",
"(",
"cur",
")",
"self",
".",
"inferencer",
".",
"batch_size",
"=",
"batch_size",
"# make predictions on all document-query pairs",
"predictions",
"=",
"self",
".",
"inferencer",
".",
"inference_from_objects",
"(",
"objects",
"=",
"inputs",
",",
"return_json",
"=",
"False",
",",
"multiprocessing_chunksize",
"=",
"1",
")",
"# group predictions together",
"grouped_predictions",
"=",
"[",
"]",
"left_idx",
"=",
"0",
"right_idx",
"=",
"0",
"for",
"number",
"in",
"number_of_docs",
":",
"right_idx",
"=",
"left_idx",
"+",
"number",
"grouped_predictions",
".",
"append",
"(",
"predictions",
"[",
"left_idx",
":",
"right_idx",
"]",
")",
"left_idx",
"=",
"right_idx",
"result",
"=",
"[",
"]",
"for",
"idx",
",",
"group",
"in",
"enumerate",
"(",
"grouped_predictions",
")",
":",
"answers",
",",
"max_no_ans_gap",
"=",
"self",
".",
"_extract_answers_of_predictions",
"(",
"group",
",",
"top_k",
")",
"query",
"=",
"group",
"[",
"0",
"]",
".",
"question",
"cur_label",
"=",
"labels",
"[",
"idx",
"]",
"result",
".",
"append",
"(",
"{",
"\"query\"",
":",
"query",
",",
"\"no_ans_gap\"",
":",
"max_no_ans_gap",
",",
"\"answers\"",
":",
"answers",
",",
"\"label\"",
":",
"cur_label",
"}",
")",
"return",
"result"
] | [
273,
4
] | [
330,
21
] | python | en | ['en', 'error', 'th'] | False |
FARMReader.predict | (self, query: str, documents: List[Document], top_k: Optional[int] = None) |
Use loaded QA model to find answers for a query in the supplied list of Document.
Returns dictionaries containing answers sorted by (desc.) probability.
Example:
```python
|{
| 'query': 'Who is the father of Arya Stark?',
| 'answers':[
| {'answer': 'Eddard,',
| 'context': " She travels with her father, Eddard, to King's Landing when he is ",
| 'offset_answer_start': 147,
| 'offset_answer_end': 154,
| 'probability': 0.9787139466668613,
| 'score': None,
| 'document_id': '1337'
| },...
| ]
|}
```
:param query: Query string
:param documents: List of Document in which to search for the answer
:param top_k: The maximum number of answers to return
:return: Dict containing query and answers
|
Use loaded QA model to find answers for a query in the supplied list of Document. | def predict(self, query: str, documents: List[Document], top_k: Optional[int] = None):
"""
Use loaded QA model to find answers for a query in the supplied list of Document.
Returns dictionaries containing answers sorted by (desc.) probability.
Example:
```python
|{
| 'query': 'Who is the father of Arya Stark?',
| 'answers':[
| {'answer': 'Eddard,',
| 'context': " She travels with her father, Eddard, to King's Landing when he is ",
| 'offset_answer_start': 147,
| 'offset_answer_end': 154,
| 'probability': 0.9787139466668613,
| 'score': None,
| 'document_id': '1337'
| },...
| ]
|}
```
:param query: Query string
:param documents: List of Document in which to search for the answer
:param top_k: The maximum number of answers to return
:return: Dict containing query and answers
"""
# convert input to FARM format
inputs = []
for doc in documents:
cur = QAInput(doc_text=doc.text,
questions=Question(text=query,
uid=doc.id))
inputs.append(cur)
# get answers from QA model
# TODO: Need fix in FARM's `to_dict` function of `QAInput` class
predictions = self.inferencer.inference_from_objects(
objects=inputs, return_json=False, multiprocessing_chunksize=1
)
# assemble answers from all the different documents & format them.
answers, max_no_ans_gap = self._extract_answers_of_predictions(predictions, top_k)
result = {"query": query,
"no_ans_gap": max_no_ans_gap,
"answers": answers}
return result | [
"def",
"predict",
"(",
"self",
",",
"query",
":",
"str",
",",
"documents",
":",
"List",
"[",
"Document",
"]",
",",
"top_k",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
")",
":",
"# convert input to FARM format",
"inputs",
"=",
"[",
"]",
"for",
"doc",
"in",
"documents",
":",
"cur",
"=",
"QAInput",
"(",
"doc_text",
"=",
"doc",
".",
"text",
",",
"questions",
"=",
"Question",
"(",
"text",
"=",
"query",
",",
"uid",
"=",
"doc",
".",
"id",
")",
")",
"inputs",
".",
"append",
"(",
"cur",
")",
"# get answers from QA model",
"# TODO: Need fix in FARM's `to_dict` function of `QAInput` class",
"predictions",
"=",
"self",
".",
"inferencer",
".",
"inference_from_objects",
"(",
"objects",
"=",
"inputs",
",",
"return_json",
"=",
"False",
",",
"multiprocessing_chunksize",
"=",
"1",
")",
"# assemble answers from all the different documents & format them.",
"answers",
",",
"max_no_ans_gap",
"=",
"self",
".",
"_extract_answers_of_predictions",
"(",
"predictions",
",",
"top_k",
")",
"result",
"=",
"{",
"\"query\"",
":",
"query",
",",
"\"no_ans_gap\"",
":",
"max_no_ans_gap",
",",
"\"answers\"",
":",
"answers",
"}",
"return",
"result"
] | [
332,
4
] | [
379,
21
] | python | en | ['en', 'error', 'th'] | False |
FARMReader.eval_on_file | (self, data_dir: str, test_filename: str, device: str) |
Performs evaluation on a SQuAD-formatted file.
Returns a dict containing the following metrics:
- "EM": exact match score
- "f1": F1-Score
- "top_n_accuracy": Proportion of predicted answers that overlap with correct answer
:param data_dir: The directory in which the test set can be found
:type data_dir: Path or str
:param test_filename: The name of the file containing the test data in SQuAD format.
:type test_filename: str
:param device: The device on which the tensors should be processed. Choose from "cpu" and "cuda".
:type device: str
|
Performs evaluation on a SQuAD-formatted file.
Returns a dict containing the following metrics:
- "EM": exact match score
- "f1": F1-Score
- "top_n_accuracy": Proportion of predicted answers that overlap with correct answer | def eval_on_file(self, data_dir: str, test_filename: str, device: str):
"""
Performs evaluation on a SQuAD-formatted file.
Returns a dict containing the following metrics:
- "EM": exact match score
- "f1": F1-Score
- "top_n_accuracy": Proportion of predicted answers that overlap with correct answer
:param data_dir: The directory in which the test set can be found
:type data_dir: Path or str
:param test_filename: The name of the file containing the test data in SQuAD format.
:type test_filename: str
:param device: The device on which the tensors should be processed. Choose from "cpu" and "cuda".
:type device: str
"""
eval_processor = SquadProcessor(
tokenizer=self.inferencer.processor.tokenizer,
max_seq_len=self.inferencer.processor.max_seq_len,
label_list=self.inferencer.processor.tasks["question_answering"]["label_list"],
metric=self.inferencer.processor.tasks["question_answering"]["metric"],
train_filename=None,
dev_filename=None,
dev_split=0,
test_filename=test_filename,
data_dir=Path(data_dir),
)
data_silo = DataSilo(processor=eval_processor, batch_size=self.inferencer.batch_size, distributed=False)
data_loader = data_silo.get_data_loader("test")
evaluator = Evaluator(data_loader=data_loader, tasks=eval_processor.tasks, device=device)
eval_results = evaluator.eval(self.inferencer.model)
results = {
"EM": eval_results[0]["EM"],
"f1": eval_results[0]["f1"],
"top_n_accuracy": eval_results[0]["top_n_accuracy"]
}
return results | [
"def",
"eval_on_file",
"(",
"self",
",",
"data_dir",
":",
"str",
",",
"test_filename",
":",
"str",
",",
"device",
":",
"str",
")",
":",
"eval_processor",
"=",
"SquadProcessor",
"(",
"tokenizer",
"=",
"self",
".",
"inferencer",
".",
"processor",
".",
"tokenizer",
",",
"max_seq_len",
"=",
"self",
".",
"inferencer",
".",
"processor",
".",
"max_seq_len",
",",
"label_list",
"=",
"self",
".",
"inferencer",
".",
"processor",
".",
"tasks",
"[",
"\"question_answering\"",
"]",
"[",
"\"label_list\"",
"]",
",",
"metric",
"=",
"self",
".",
"inferencer",
".",
"processor",
".",
"tasks",
"[",
"\"question_answering\"",
"]",
"[",
"\"metric\"",
"]",
",",
"train_filename",
"=",
"None",
",",
"dev_filename",
"=",
"None",
",",
"dev_split",
"=",
"0",
",",
"test_filename",
"=",
"test_filename",
",",
"data_dir",
"=",
"Path",
"(",
"data_dir",
")",
",",
")",
"data_silo",
"=",
"DataSilo",
"(",
"processor",
"=",
"eval_processor",
",",
"batch_size",
"=",
"self",
".",
"inferencer",
".",
"batch_size",
",",
"distributed",
"=",
"False",
")",
"data_loader",
"=",
"data_silo",
".",
"get_data_loader",
"(",
"\"test\"",
")",
"evaluator",
"=",
"Evaluator",
"(",
"data_loader",
"=",
"data_loader",
",",
"tasks",
"=",
"eval_processor",
".",
"tasks",
",",
"device",
"=",
"device",
")",
"eval_results",
"=",
"evaluator",
".",
"eval",
"(",
"self",
".",
"inferencer",
".",
"model",
")",
"results",
"=",
"{",
"\"EM\"",
":",
"eval_results",
"[",
"0",
"]",
"[",
"\"EM\"",
"]",
",",
"\"f1\"",
":",
"eval_results",
"[",
"0",
"]",
"[",
"\"f1\"",
"]",
",",
"\"top_n_accuracy\"",
":",
"eval_results",
"[",
"0",
"]",
"[",
"\"top_n_accuracy\"",
"]",
"}",
"return",
"results"
] | [
381,
4
] | [
419,
22
] | python | en | ['en', 'error', 'th'] | False |
FARMReader.eval | (
self,
document_store: BaseDocumentStore,
device: str,
label_index: str = "label",
doc_index: str = "eval_document",
label_origin: str = "gold_label",
) |
Performs evaluation on evaluation documents in the DocumentStore.
Returns a dict containing the following metrics:
- "EM": Proportion of exact matches of predicted answers with their corresponding correct answers
- "f1": Average overlap between predicted answers and their corresponding correct answers
- "top_n_accuracy": Proportion of predicted answers that overlap with correct answer
:param document_store: DocumentStore containing the evaluation documents
:param device: The device on which the tensors should be processed. Choose from "cpu" and "cuda".
:param label_index: Index/Table name where labeled questions are stored
:param doc_index: Index/Table name where documents that are used for evaluation are stored
|
Performs evaluation on evaluation documents in the DocumentStore.
Returns a dict containing the following metrics:
- "EM": Proportion of exact matches of predicted answers with their corresponding correct answers
- "f1": Average overlap between predicted answers and their corresponding correct answers
- "top_n_accuracy": Proportion of predicted answers that overlap with correct answer | def eval(
self,
document_store: BaseDocumentStore,
device: str,
label_index: str = "label",
doc_index: str = "eval_document",
label_origin: str = "gold_label",
):
"""
Performs evaluation on evaluation documents in the DocumentStore.
Returns a dict containing the following metrics:
- "EM": Proportion of exact matches of predicted answers with their corresponding correct answers
- "f1": Average overlap between predicted answers and their corresponding correct answers
- "top_n_accuracy": Proportion of predicted answers that overlap with correct answer
:param document_store: DocumentStore containing the evaluation documents
:param device: The device on which the tensors should be processed. Choose from "cpu" and "cuda".
:param label_index: Index/Table name where labeled questions are stored
:param doc_index: Index/Table name where documents that are used for evaluation are stored
"""
if self.top_k_per_candidate != 4:
logger.info(f"Performing Evaluation using top_k_per_candidate = {self.top_k_per_candidate} \n"
f"and consequently, QuestionAnsweringPredictionHead.n_best = {self.top_k_per_candidate + 1}. \n"
f"This deviates from FARM's default where QuestionAnsweringPredictionHead.n_best = 5")
# extract all questions for evaluation
filters = {"origin": [label_origin]}
labels = document_store.get_all_labels(index=label_index, filters=filters)
# Aggregate all answer labels per question
aggregated_per_doc = defaultdict(list)
for label in labels:
if not label.document_id:
logger.error(f"Label does not contain a document_id")
continue
aggregated_per_doc[label.document_id].append(label)
# Create squad style dicts
d: Dict[str, Any] = {}
all_doc_ids = [x.id for x in document_store.get_all_documents(doc_index)]
for doc_id in all_doc_ids:
doc = document_store.get_document_by_id(doc_id, index=doc_index)
if not doc:
logger.error(f"Document with the ID '{doc_id}' is not present in the document store.")
continue
d[str(doc_id)] = {
"context": doc.text
}
# get all questions / answers
aggregated_per_question: Dict[str, Any] = defaultdict(list)
if doc_id in aggregated_per_doc:
for label in aggregated_per_doc[doc_id]:
# add to existing answers
if label.question in aggregated_per_question.keys():
if label.offset_start_in_doc == 0 and label.answer == "":
continue
else:
# Hack to fix problem where duplicate questions are merged by doc_store processing creating a QA example with 8 annotations > 6 annotation max
if len(aggregated_per_question[label.question]["answers"]) >= 6:
continue
aggregated_per_question[label.question]["answers"].append({
"text": label.answer,
"answer_start": label.offset_start_in_doc})
aggregated_per_question[label.question]["is_impossible"] = False
# create new one
else:
# We don't need to create an answer dict if is_impossible / no_answer
if label.offset_start_in_doc == 0 and label.answer == "":
aggregated_per_question[label.question] = {
"id": str(hash(str(doc_id) + label.question)),
"question": label.question,
"answers": [],
"is_impossible": True
}
else:
aggregated_per_question[label.question] = {
"id": str(hash(str(doc_id)+label.question)),
"question": label.question,
"answers": [{
"text": label.answer,
"answer_start": label.offset_start_in_doc}],
"is_impossible": False
}
# Get rid of the question key again (after we aggregated we don't need it anymore)
d[str(doc_id)]["qas"] = [v for v in aggregated_per_question.values()]
# Convert input format for FARM
farm_input = [v for v in d.values()]
n_queries = len([y for x in farm_input for y in x["qas"]])
# Create DataLoader that can be passed to the Evaluator
tic = perf_counter()
indices = range(len(farm_input))
dataset, tensor_names, problematic_ids = self.inferencer.processor.dataset_from_dicts(farm_input, indices=indices)
data_loader = NamedDataLoader(dataset=dataset, batch_size=self.inferencer.batch_size, tensor_names=tensor_names)
evaluator = Evaluator(data_loader=data_loader, tasks=self.inferencer.processor.tasks, device=device)
eval_results = evaluator.eval(self.inferencer.model)
toc = perf_counter()
reader_time = toc - tic
results = {
"EM": eval_results[0]["EM"] * 100,
"f1": eval_results[0]["f1"] * 100,
"top_n_accuracy": eval_results[0]["top_n_accuracy"] * 100,
"top_n": self.inferencer.model.prediction_heads[0].n_best,
"reader_time": reader_time,
"seconds_per_query": reader_time / n_queries
}
return results | [
"def",
"eval",
"(",
"self",
",",
"document_store",
":",
"BaseDocumentStore",
",",
"device",
":",
"str",
",",
"label_index",
":",
"str",
"=",
"\"label\"",
",",
"doc_index",
":",
"str",
"=",
"\"eval_document\"",
",",
"label_origin",
":",
"str",
"=",
"\"gold_label\"",
",",
")",
":",
"if",
"self",
".",
"top_k_per_candidate",
"!=",
"4",
":",
"logger",
".",
"info",
"(",
"f\"Performing Evaluation using top_k_per_candidate = {self.top_k_per_candidate} \\n\"",
"f\"and consequently, QuestionAnsweringPredictionHead.n_best = {self.top_k_per_candidate + 1}. \\n\"",
"f\"This deviates from FARM's default where QuestionAnsweringPredictionHead.n_best = 5\"",
")",
"# extract all questions for evaluation",
"filters",
"=",
"{",
"\"origin\"",
":",
"[",
"label_origin",
"]",
"}",
"labels",
"=",
"document_store",
".",
"get_all_labels",
"(",
"index",
"=",
"label_index",
",",
"filters",
"=",
"filters",
")",
"# Aggregate all answer labels per question",
"aggregated_per_doc",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"label",
"in",
"labels",
":",
"if",
"not",
"label",
".",
"document_id",
":",
"logger",
".",
"error",
"(",
"f\"Label does not contain a document_id\"",
")",
"continue",
"aggregated_per_doc",
"[",
"label",
".",
"document_id",
"]",
".",
"append",
"(",
"label",
")",
"# Create squad style dicts",
"d",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
"=",
"{",
"}",
"all_doc_ids",
"=",
"[",
"x",
".",
"id",
"for",
"x",
"in",
"document_store",
".",
"get_all_documents",
"(",
"doc_index",
")",
"]",
"for",
"doc_id",
"in",
"all_doc_ids",
":",
"doc",
"=",
"document_store",
".",
"get_document_by_id",
"(",
"doc_id",
",",
"index",
"=",
"doc_index",
")",
"if",
"not",
"doc",
":",
"logger",
".",
"error",
"(",
"f\"Document with the ID '{doc_id}' is not present in the document store.\"",
")",
"continue",
"d",
"[",
"str",
"(",
"doc_id",
")",
"]",
"=",
"{",
"\"context\"",
":",
"doc",
".",
"text",
"}",
"# get all questions / answers",
"aggregated_per_question",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
"=",
"defaultdict",
"(",
"list",
")",
"if",
"doc_id",
"in",
"aggregated_per_doc",
":",
"for",
"label",
"in",
"aggregated_per_doc",
"[",
"doc_id",
"]",
":",
"# add to existing answers",
"if",
"label",
".",
"question",
"in",
"aggregated_per_question",
".",
"keys",
"(",
")",
":",
"if",
"label",
".",
"offset_start_in_doc",
"==",
"0",
"and",
"label",
".",
"answer",
"==",
"\"\"",
":",
"continue",
"else",
":",
"# Hack to fix problem where duplicate questions are merged by doc_store processing creating a QA example with 8 annotations > 6 annotation max",
"if",
"len",
"(",
"aggregated_per_question",
"[",
"label",
".",
"question",
"]",
"[",
"\"answers\"",
"]",
")",
">=",
"6",
":",
"continue",
"aggregated_per_question",
"[",
"label",
".",
"question",
"]",
"[",
"\"answers\"",
"]",
".",
"append",
"(",
"{",
"\"text\"",
":",
"label",
".",
"answer",
",",
"\"answer_start\"",
":",
"label",
".",
"offset_start_in_doc",
"}",
")",
"aggregated_per_question",
"[",
"label",
".",
"question",
"]",
"[",
"\"is_impossible\"",
"]",
"=",
"False",
"# create new one",
"else",
":",
"# We don't need to create an answer dict if is_impossible / no_answer",
"if",
"label",
".",
"offset_start_in_doc",
"==",
"0",
"and",
"label",
".",
"answer",
"==",
"\"\"",
":",
"aggregated_per_question",
"[",
"label",
".",
"question",
"]",
"=",
"{",
"\"id\"",
":",
"str",
"(",
"hash",
"(",
"str",
"(",
"doc_id",
")",
"+",
"label",
".",
"question",
")",
")",
",",
"\"question\"",
":",
"label",
".",
"question",
",",
"\"answers\"",
":",
"[",
"]",
",",
"\"is_impossible\"",
":",
"True",
"}",
"else",
":",
"aggregated_per_question",
"[",
"label",
".",
"question",
"]",
"=",
"{",
"\"id\"",
":",
"str",
"(",
"hash",
"(",
"str",
"(",
"doc_id",
")",
"+",
"label",
".",
"question",
")",
")",
",",
"\"question\"",
":",
"label",
".",
"question",
",",
"\"answers\"",
":",
"[",
"{",
"\"text\"",
":",
"label",
".",
"answer",
",",
"\"answer_start\"",
":",
"label",
".",
"offset_start_in_doc",
"}",
"]",
",",
"\"is_impossible\"",
":",
"False",
"}",
"# Get rid of the question key again (after we aggregated we don't need it anymore)",
"d",
"[",
"str",
"(",
"doc_id",
")",
"]",
"[",
"\"qas\"",
"]",
"=",
"[",
"v",
"for",
"v",
"in",
"aggregated_per_question",
".",
"values",
"(",
")",
"]",
"# Convert input format for FARM",
"farm_input",
"=",
"[",
"v",
"for",
"v",
"in",
"d",
".",
"values",
"(",
")",
"]",
"n_queries",
"=",
"len",
"(",
"[",
"y",
"for",
"x",
"in",
"farm_input",
"for",
"y",
"in",
"x",
"[",
"\"qas\"",
"]",
"]",
")",
"# Create DataLoader that can be passed to the Evaluator",
"tic",
"=",
"perf_counter",
"(",
")",
"indices",
"=",
"range",
"(",
"len",
"(",
"farm_input",
")",
")",
"dataset",
",",
"tensor_names",
",",
"problematic_ids",
"=",
"self",
".",
"inferencer",
".",
"processor",
".",
"dataset_from_dicts",
"(",
"farm_input",
",",
"indices",
"=",
"indices",
")",
"data_loader",
"=",
"NamedDataLoader",
"(",
"dataset",
"=",
"dataset",
",",
"batch_size",
"=",
"self",
".",
"inferencer",
".",
"batch_size",
",",
"tensor_names",
"=",
"tensor_names",
")",
"evaluator",
"=",
"Evaluator",
"(",
"data_loader",
"=",
"data_loader",
",",
"tasks",
"=",
"self",
".",
"inferencer",
".",
"processor",
".",
"tasks",
",",
"device",
"=",
"device",
")",
"eval_results",
"=",
"evaluator",
".",
"eval",
"(",
"self",
".",
"inferencer",
".",
"model",
")",
"toc",
"=",
"perf_counter",
"(",
")",
"reader_time",
"=",
"toc",
"-",
"tic",
"results",
"=",
"{",
"\"EM\"",
":",
"eval_results",
"[",
"0",
"]",
"[",
"\"EM\"",
"]",
"*",
"100",
",",
"\"f1\"",
":",
"eval_results",
"[",
"0",
"]",
"[",
"\"f1\"",
"]",
"*",
"100",
",",
"\"top_n_accuracy\"",
":",
"eval_results",
"[",
"0",
"]",
"[",
"\"top_n_accuracy\"",
"]",
"*",
"100",
",",
"\"top_n\"",
":",
"self",
".",
"inferencer",
".",
"model",
".",
"prediction_heads",
"[",
"0",
"]",
".",
"n_best",
",",
"\"reader_time\"",
":",
"reader_time",
",",
"\"seconds_per_query\"",
":",
"reader_time",
"/",
"n_queries",
"}",
"return",
"results"
] | [
421,
4
] | [
533,
22
] | python | en | ['en', 'error', 'th'] | False |
FARMReader.predict_on_texts | (self, question: str, texts: List[str], top_k: Optional[int] = None) |
Use loaded QA model to find answers for a question in the supplied list of Document.
Returns dictionaries containing answers sorted by (desc.) probability.
Example:
```python
|{
| 'question': 'Who is the father of Arya Stark?',
| 'answers':[
| {'answer': 'Eddard,',
| 'context': " She travels with her father, Eddard, to King's Landing when he is ",
| 'offset_answer_start': 147,
| 'offset_answer_end': 154,
| 'probability': 0.9787139466668613,
| 'score': None,
| 'document_id': '1337'
| },...
| ]
|}
```
:param question: Question string
:param documents: List of documents as string type
:param top_k: The maximum number of answers to return
:return: Dict containing question and answers
|
Use loaded QA model to find answers for a question in the supplied list of Document.
Returns dictionaries containing answers sorted by (desc.) probability.
Example:
```python
|{
| 'question': 'Who is the father of Arya Stark?',
| 'answers':[
| {'answer': 'Eddard,',
| 'context': " She travels with her father, Eddard, to King's Landing when he is ",
| 'offset_answer_start': 147,
| 'offset_answer_end': 154,
| 'probability': 0.9787139466668613,
| 'score': None,
| 'document_id': '1337'
| },...
| ]
|}
``` | def predict_on_texts(self, question: str, texts: List[str], top_k: Optional[int] = None):
"""
Use loaded QA model to find answers for a question in the supplied list of Document.
Returns dictionaries containing answers sorted by (desc.) probability.
Example:
```python
|{
| 'question': 'Who is the father of Arya Stark?',
| 'answers':[
| {'answer': 'Eddard,',
| 'context': " She travels with her father, Eddard, to King's Landing when he is ",
| 'offset_answer_start': 147,
| 'offset_answer_end': 154,
| 'probability': 0.9787139466668613,
| 'score': None,
| 'document_id': '1337'
| },...
| ]
|}
```
:param question: Question string
:param documents: List of documents as string type
:param top_k: The maximum number of answers to return
:return: Dict containing question and answers
"""
documents = []
for text in texts:
documents.append(
Document(
text=text
)
)
predictions = self.predict(question, documents, top_k)
return predictions | [
"def",
"predict_on_texts",
"(",
"self",
",",
"question",
":",
"str",
",",
"texts",
":",
"List",
"[",
"str",
"]",
",",
"top_k",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
")",
":",
"documents",
"=",
"[",
"]",
"for",
"text",
"in",
"texts",
":",
"documents",
".",
"append",
"(",
"Document",
"(",
"text",
"=",
"text",
")",
")",
"predictions",
"=",
"self",
".",
"predict",
"(",
"question",
",",
"documents",
",",
"top_k",
")",
"return",
"predictions"
] | [
597,
4
] | [
631,
26
] | python | en | ['en', 'error', 'th'] | False |
FARMReader.convert_to_onnx | (
cls,
model_name: str,
output_path: Path,
convert_to_float16: bool = False,
quantize: bool = False,
task_type: str = "question_answering",
opset_version: int = 11
) |
Convert a PyTorch BERT model to ONNX format and write to ./onnx-export dir. The converted ONNX model
can be loaded with in the `FARMReader` using the export path as `model_name_or_path` param.
Usage:
`from haystack.reader.farm import FARMReader
from pathlib import Path
onnx_model_path = Path("roberta-onnx-model")
FARMReader.convert_to_onnx(model_name="deepset/bert-base-cased-squad2", output_path=onnx_model_path)
reader = FARMReader(onnx_model_path)`
:param model_name: transformers model name
:param output_path: Path to output the converted model
:param convert_to_float16: Many models use float32 precision by default. With the half precision of float16,
inference is faster on Nvidia GPUs with Tensor core like T4 or V100. On older GPUs,
float32 could still be be more performant.
:param quantize: convert floating point number to integers
:param task_type: Type of task for the model. Available options: "question_answering" or "embeddings".
:param opset_version: ONNX opset version
|
Convert a PyTorch BERT model to ONNX format and write to ./onnx-export dir. The converted ONNX model
can be loaded with in the `FARMReader` using the export path as `model_name_or_path` param. | def convert_to_onnx(
cls,
model_name: str,
output_path: Path,
convert_to_float16: bool = False,
quantize: bool = False,
task_type: str = "question_answering",
opset_version: int = 11
):
"""
Convert a PyTorch BERT model to ONNX format and write to ./onnx-export dir. The converted ONNX model
can be loaded with in the `FARMReader` using the export path as `model_name_or_path` param.
Usage:
`from haystack.reader.farm import FARMReader
from pathlib import Path
onnx_model_path = Path("roberta-onnx-model")
FARMReader.convert_to_onnx(model_name="deepset/bert-base-cased-squad2", output_path=onnx_model_path)
reader = FARMReader(onnx_model_path)`
:param model_name: transformers model name
:param output_path: Path to output the converted model
:param convert_to_float16: Many models use float32 precision by default. With the half precision of float16,
inference is faster on Nvidia GPUs with Tensor core like T4 or V100. On older GPUs,
float32 could still be be more performant.
:param quantize: convert floating point number to integers
:param task_type: Type of task for the model. Available options: "question_answering" or "embeddings".
:param opset_version: ONNX opset version
"""
AdaptiveModel.convert_to_onnx(
model_name=model_name,
output_path=output_path,
task_type=task_type,
convert_to_float16=convert_to_float16,
quantize=quantize,
opset_version=opset_version
) | [
"def",
"convert_to_onnx",
"(",
"cls",
",",
"model_name",
":",
"str",
",",
"output_path",
":",
"Path",
",",
"convert_to_float16",
":",
"bool",
"=",
"False",
",",
"quantize",
":",
"bool",
"=",
"False",
",",
"task_type",
":",
"str",
"=",
"\"question_answering\"",
",",
"opset_version",
":",
"int",
"=",
"11",
")",
":",
"AdaptiveModel",
".",
"convert_to_onnx",
"(",
"model_name",
"=",
"model_name",
",",
"output_path",
"=",
"output_path",
",",
"task_type",
"=",
"task_type",
",",
"convert_to_float16",
"=",
"convert_to_float16",
",",
"quantize",
"=",
"quantize",
",",
"opset_version",
"=",
"opset_version",
")"
] | [
634,
4
] | [
671,
9
] | python | en | ['en', 'error', 'th'] | False |
ExpectColumnMeanToBeBetween.validate_configuration | (self, configuration: Optional[ExpectationConfiguration]) |
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
True if the configuration has been validated successfully. Otherwise, raises an exception
|
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation. | def validate_configuration(self, configuration: Optional[ExpectationConfiguration]):
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
True if the configuration has been validated successfully. Otherwise, raises an exception
"""
super().validate_configuration(configuration)
self.validate_metric_value_between_configuration(configuration=configuration) | [
"def",
"validate_configuration",
"(",
"self",
",",
"configuration",
":",
"Optional",
"[",
"ExpectationConfiguration",
"]",
")",
":",
"super",
"(",
")",
".",
"validate_configuration",
"(",
"configuration",
")",
"self",
".",
"validate_metric_value_between_configuration",
"(",
"configuration",
"=",
"configuration",
")"
] | [
100,
4
] | [
112,
85
] | python | en | ['en', 'error', 'th'] | False |
get_domain_url | (url) |
Use this to convert a url like this:
https://blog.xkcd.com/2014/07/22/what-if-book-tour/
Into this:
https://blog.xkcd.com
|
Use this to convert a url like this:
https://blog.xkcd.com/2014/07/22/what-if-book-tour/
Into this:
https://blog.xkcd.com
| def get_domain_url(url):
"""
Use this to convert a url like this:
https://blog.xkcd.com/2014/07/22/what-if-book-tour/
Into this:
https://blog.xkcd.com
"""
if not url.startswith("http://") and not url.startswith("https://"):
return url
url_header = url.split('://')[0]
simple_url = url.split('://')[1]
base_url = simple_url.split('/')[0]
domain_url = url_header + '://' + base_url
return domain_url | [
"def",
"get_domain_url",
"(",
"url",
")",
":",
"if",
"not",
"url",
".",
"startswith",
"(",
"\"http://\"",
")",
"and",
"not",
"url",
".",
"startswith",
"(",
"\"https://\"",
")",
":",
"return",
"url",
"url_header",
"=",
"url",
".",
"split",
"(",
"'://'",
")",
"[",
"0",
"]",
"simple_url",
"=",
"url",
".",
"split",
"(",
"'://'",
")",
"[",
"1",
"]",
"base_url",
"=",
"simple_url",
".",
"split",
"(",
"'/'",
")",
"[",
"0",
"]",
"domain_url",
"=",
"url_header",
"+",
"'://'",
"+",
"base_url",
"return",
"domain_url"
] | [
8,
0
] | [
21,
21
] | python | en | ['en', 'error', 'th'] | False |
is_xpath_selector | (selector) |
A basic method to determine if a selector is an xpath selector.
|
A basic method to determine if a selector is an xpath selector.
| def is_xpath_selector(selector):
"""
A basic method to determine if a selector is an xpath selector.
"""
if (selector.startswith('/') or selector.startswith('./') or (
selector.startswith('('))):
return True
return False | [
"def",
"is_xpath_selector",
"(",
"selector",
")",
":",
"if",
"(",
"selector",
".",
"startswith",
"(",
"'/'",
")",
"or",
"selector",
".",
"startswith",
"(",
"'./'",
")",
"or",
"(",
"selector",
".",
"startswith",
"(",
"'('",
")",
")",
")",
":",
"return",
"True",
"return",
"False"
] | [
24,
0
] | [
31,
16
] | python | en | ['en', 'error', 'th'] | False |
is_link_text_selector | (selector) |
A basic method to determine if a selector is a link text selector.
|
A basic method to determine if a selector is a link text selector.
| def is_link_text_selector(selector):
"""
A basic method to determine if a selector is a link text selector.
"""
if (selector.startswith('link=') or selector.startswith('link_text=') or (
selector.startswith('text='))):
return True
return False | [
"def",
"is_link_text_selector",
"(",
"selector",
")",
":",
"if",
"(",
"selector",
".",
"startswith",
"(",
"'link='",
")",
"or",
"selector",
".",
"startswith",
"(",
"'link_text='",
")",
"or",
"(",
"selector",
".",
"startswith",
"(",
"'text='",
")",
")",
")",
":",
"return",
"True",
"return",
"False"
] | [
34,
0
] | [
41,
16
] | python | en | ['en', 'error', 'th'] | False |
is_partial_link_text_selector | (selector) |
A basic method to determine if a selector is a partial link text selector.
|
A basic method to determine if a selector is a partial link text selector.
| def is_partial_link_text_selector(selector):
"""
A basic method to determine if a selector is a partial link text selector.
"""
if (selector.startswith('partial_link=') or (
selector.startswith('partial_link_text=') or (
selector.startswith('partial_text=')))):
return True
return False | [
"def",
"is_partial_link_text_selector",
"(",
"selector",
")",
":",
"if",
"(",
"selector",
".",
"startswith",
"(",
"'partial_link='",
")",
"or",
"(",
"selector",
".",
"startswith",
"(",
"'partial_link_text='",
")",
"or",
"(",
"selector",
".",
"startswith",
"(",
"'partial_text='",
")",
")",
")",
")",
":",
"return",
"True",
"return",
"False"
] | [
44,
0
] | [
52,
16
] | python | en | ['en', 'error', 'th'] | False |
is_name_selector | (selector) |
A basic method to determine if a selector is a name selector.
|
A basic method to determine if a selector is a name selector.
| def is_name_selector(selector):
"""
A basic method to determine if a selector is a name selector.
"""
if selector.startswith('name='):
return True
return False | [
"def",
"is_name_selector",
"(",
"selector",
")",
":",
"if",
"selector",
".",
"startswith",
"(",
"'name='",
")",
":",
"return",
"True",
"return",
"False"
] | [
55,
0
] | [
61,
16
] | python | en | ['en', 'error', 'th'] | False |
get_link_text_from_selector | (selector) |
A basic method to get the link text from a link text selector.
|
A basic method to get the link text from a link text selector.
| def get_link_text_from_selector(selector):
"""
A basic method to get the link text from a link text selector.
"""
if selector.startswith('link='):
return selector.split('link=')[1]
elif selector.startswith('link_text='):
return selector.split('link_text=')[1]
elif selector.startswith('text='):
return selector.split('text=')[1]
return selector | [
"def",
"get_link_text_from_selector",
"(",
"selector",
")",
":",
"if",
"selector",
".",
"startswith",
"(",
"'link='",
")",
":",
"return",
"selector",
".",
"split",
"(",
"'link='",
")",
"[",
"1",
"]",
"elif",
"selector",
".",
"startswith",
"(",
"'link_text='",
")",
":",
"return",
"selector",
".",
"split",
"(",
"'link_text='",
")",
"[",
"1",
"]",
"elif",
"selector",
".",
"startswith",
"(",
"'text='",
")",
":",
"return",
"selector",
".",
"split",
"(",
"'text='",
")",
"[",
"1",
"]",
"return",
"selector"
] | [
64,
0
] | [
74,
19
] | python | en | ['en', 'error', 'th'] | False |
get_partial_link_text_from_selector | (selector) |
A basic method to get the partial link text from a partial link selector.
|
A basic method to get the partial link text from a partial link selector.
| def get_partial_link_text_from_selector(selector):
"""
A basic method to get the partial link text from a partial link selector.
"""
if selector.startswith('partial_link='):
return selector.split('partial_link=')[1]
elif selector.startswith('partial_link_text='):
return selector.split('partial_link_text=')[1]
elif selector.startswith('partial_text='):
return selector.split('partial_text=')[1]
return selector | [
"def",
"get_partial_link_text_from_selector",
"(",
"selector",
")",
":",
"if",
"selector",
".",
"startswith",
"(",
"'partial_link='",
")",
":",
"return",
"selector",
".",
"split",
"(",
"'partial_link='",
")",
"[",
"1",
"]",
"elif",
"selector",
".",
"startswith",
"(",
"'partial_link_text='",
")",
":",
"return",
"selector",
".",
"split",
"(",
"'partial_link_text='",
")",
"[",
"1",
"]",
"elif",
"selector",
".",
"startswith",
"(",
"'partial_text='",
")",
":",
"return",
"selector",
".",
"split",
"(",
"'partial_text='",
")",
"[",
"1",
"]",
"return",
"selector"
] | [
77,
0
] | [
87,
19
] | python | en | ['en', 'error', 'th'] | False |
get_name_from_selector | (selector) |
A basic method to get the name from a name selector.
|
A basic method to get the name from a name selector.
| def get_name_from_selector(selector):
"""
A basic method to get the name from a name selector.
"""
if selector.startswith('name='):
return selector.split('name=')[1]
return selector | [
"def",
"get_name_from_selector",
"(",
"selector",
")",
":",
"if",
"selector",
".",
"startswith",
"(",
"'name='",
")",
":",
"return",
"selector",
".",
"split",
"(",
"'name='",
")",
"[",
"1",
"]",
"return",
"selector"
] | [
90,
0
] | [
96,
19
] | python | en | ['en', 'error', 'th'] | False |
_get_unique_links | (page_url, soup) |
Returns all unique links.
Includes:
"a"->"href", "img"->"src", "link"->"href", and "script"->"src" links.
|
Returns all unique links.
Includes:
"a"->"href", "img"->"src", "link"->"href", and "script"->"src" links.
| def _get_unique_links(page_url, soup):
"""
Returns all unique links.
Includes:
"a"->"href", "img"->"src", "link"->"href", and "script"->"src" links.
"""
if not page_url.startswith("http://") and (
not page_url.startswith("https://")):
return []
prefix = 'http:'
if page_url.startswith('https:'):
prefix = 'https:'
simple_url = page_url.split('://')[1]
base_url = simple_url.split('/')[0]
full_base_url = prefix + "//" + base_url
raw_links = []
raw_unique_links = []
# Get "href" from all "a" tags
links = soup.find_all('a')
for link in links:
raw_links.append(link.get('href'))
# Get "src" from all "img" tags
img_links = soup.find_all('img')
for img_link in img_links:
raw_links.append(img_link.get('src'))
# Get "href" from all "link" tags
links = soup.find_all('link')
for link in links:
raw_links.append(link.get('href'))
# Get "src" from all "script" tags
img_links = soup.find_all('script')
for img_link in img_links:
raw_links.append(img_link.get('src'))
for link in raw_links:
if link not in raw_unique_links:
raw_unique_links.append(link)
unique_links = []
for link in raw_unique_links:
if link and len(link) > 1:
if link.startswith('//'):
link = prefix + link
elif link.startswith('/'):
link = full_base_url + link
elif link.startswith('./'):
link = full_base_url + link[1:]
elif link.startswith('#'):
link = full_base_url + link
elif '//' not in link:
link = full_base_url + "/" + link
else:
pass
unique_links.append(link)
return unique_links | [
"def",
"_get_unique_links",
"(",
"page_url",
",",
"soup",
")",
":",
"if",
"not",
"page_url",
".",
"startswith",
"(",
"\"http://\"",
")",
"and",
"(",
"not",
"page_url",
".",
"startswith",
"(",
"\"https://\"",
")",
")",
":",
"return",
"[",
"]",
"prefix",
"=",
"'http:'",
"if",
"page_url",
".",
"startswith",
"(",
"'https:'",
")",
":",
"prefix",
"=",
"'https:'",
"simple_url",
"=",
"page_url",
".",
"split",
"(",
"'://'",
")",
"[",
"1",
"]",
"base_url",
"=",
"simple_url",
".",
"split",
"(",
"'/'",
")",
"[",
"0",
"]",
"full_base_url",
"=",
"prefix",
"+",
"\"//\"",
"+",
"base_url",
"raw_links",
"=",
"[",
"]",
"raw_unique_links",
"=",
"[",
"]",
"# Get \"href\" from all \"a\" tags",
"links",
"=",
"soup",
".",
"find_all",
"(",
"'a'",
")",
"for",
"link",
"in",
"links",
":",
"raw_links",
".",
"append",
"(",
"link",
".",
"get",
"(",
"'href'",
")",
")",
"# Get \"src\" from all \"img\" tags",
"img_links",
"=",
"soup",
".",
"find_all",
"(",
"'img'",
")",
"for",
"img_link",
"in",
"img_links",
":",
"raw_links",
".",
"append",
"(",
"img_link",
".",
"get",
"(",
"'src'",
")",
")",
"# Get \"href\" from all \"link\" tags",
"links",
"=",
"soup",
".",
"find_all",
"(",
"'link'",
")",
"for",
"link",
"in",
"links",
":",
"raw_links",
".",
"append",
"(",
"link",
".",
"get",
"(",
"'href'",
")",
")",
"# Get \"src\" from all \"script\" tags",
"img_links",
"=",
"soup",
".",
"find_all",
"(",
"'script'",
")",
"for",
"img_link",
"in",
"img_links",
":",
"raw_links",
".",
"append",
"(",
"img_link",
".",
"get",
"(",
"'src'",
")",
")",
"for",
"link",
"in",
"raw_links",
":",
"if",
"link",
"not",
"in",
"raw_unique_links",
":",
"raw_unique_links",
".",
"append",
"(",
"link",
")",
"unique_links",
"=",
"[",
"]",
"for",
"link",
"in",
"raw_unique_links",
":",
"if",
"link",
"and",
"len",
"(",
"link",
")",
">",
"1",
":",
"if",
"link",
".",
"startswith",
"(",
"'//'",
")",
":",
"link",
"=",
"prefix",
"+",
"link",
"elif",
"link",
".",
"startswith",
"(",
"'/'",
")",
":",
"link",
"=",
"full_base_url",
"+",
"link",
"elif",
"link",
".",
"startswith",
"(",
"'./'",
")",
":",
"link",
"=",
"full_base_url",
"+",
"link",
"[",
"1",
":",
"]",
"elif",
"link",
".",
"startswith",
"(",
"'#'",
")",
":",
"link",
"=",
"full_base_url",
"+",
"link",
"elif",
"'//'",
"not",
"in",
"link",
":",
"link",
"=",
"full_base_url",
"+",
"\"/\"",
"+",
"link",
"else",
":",
"pass",
"unique_links",
".",
"append",
"(",
"link",
")",
"return",
"unique_links"
] | [
116,
0
] | [
176,
23
] | python | en | ['en', 'error', 'th'] | False |
_get_link_status_code | (link, allow_redirects=False, timeout=5) | Get the status code of a link.
If the timeout is exceeded, will return a 404.
For a list of available status codes, see:
https://en.wikipedia.org/wiki/List_of_HTTP_status_codes
| Get the status code of a link.
If the timeout is exceeded, will return a 404.
For a list of available status codes, see:
https://en.wikipedia.org/wiki/List_of_HTTP_status_codes
| def _get_link_status_code(link, allow_redirects=False, timeout=5):
""" Get the status code of a link.
If the timeout is exceeded, will return a 404.
For a list of available status codes, see:
https://en.wikipedia.org/wiki/List_of_HTTP_status_codes
"""
status_code = None
try:
response = requests.get(
link, allow_redirects=allow_redirects, timeout=timeout)
status_code = response.status_code
except Exception:
status_code = 404
return status_code | [
"def",
"_get_link_status_code",
"(",
"link",
",",
"allow_redirects",
"=",
"False",
",",
"timeout",
"=",
"5",
")",
":",
"status_code",
"=",
"None",
"try",
":",
"response",
"=",
"requests",
".",
"get",
"(",
"link",
",",
"allow_redirects",
"=",
"allow_redirects",
",",
"timeout",
"=",
"timeout",
")",
"status_code",
"=",
"response",
".",
"status_code",
"except",
"Exception",
":",
"status_code",
"=",
"404",
"return",
"status_code"
] | [
179,
0
] | [
192,
22
] | python | en | ['en', 'en', 'en'] | True |
_print_unique_links_with_status_codes | (page_url, soup) | Finds all unique links in the html of the page source
and then prints out those links with their status codes.
Format: ["link" -> "status_code"] (per line)
Page links include those obtained from:
"a"->"href", "img"->"src", "link"->"href", and "script"->"src".
| Finds all unique links in the html of the page source
and then prints out those links with their status codes.
Format: ["link" -> "status_code"] (per line)
Page links include those obtained from:
"a"->"href", "img"->"src", "link"->"href", and "script"->"src".
| def _print_unique_links_with_status_codes(page_url, soup):
""" Finds all unique links in the html of the page source
and then prints out those links with their status codes.
Format: ["link" -> "status_code"] (per line)
Page links include those obtained from:
"a"->"href", "img"->"src", "link"->"href", and "script"->"src".
"""
links = _get_unique_links(page_url, soup)
for link in links:
status_code = _get_link_status_code(link)
print(link, " -> ", status_code) | [
"def",
"_print_unique_links_with_status_codes",
"(",
"page_url",
",",
"soup",
")",
":",
"links",
"=",
"_get_unique_links",
"(",
"page_url",
",",
"soup",
")",
"for",
"link",
"in",
"links",
":",
"status_code",
"=",
"_get_link_status_code",
"(",
"link",
")",
"print",
"(",
"link",
",",
"\" -> \"",
",",
"status_code",
")"
] | [
195,
0
] | [
205,
40
] | python | en | ['en', 'en', 'en'] | True |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.