Search is not available for this dataset
identifier
stringlengths
1
155
parameters
stringlengths
2
6.09k
docstring
stringlengths
11
63.4k
docstring_summary
stringlengths
0
63.4k
function
stringlengths
29
99.8k
function_tokens
sequence
start_point
sequence
end_point
sequence
language
stringclasses
1 value
docstring_language
stringlengths
2
7
docstring_language_predictions
stringlengths
18
23
is_langid_reliable
stringclasses
2 values
test_expect_column_values_to_be_in_type_list
(spark_session, test_dataframe)
data asset expectation
data asset expectation
def test_expect_column_values_to_be_in_type_list(spark_session, test_dataframe): """ data asset expectation """ from pyspark.sql.utils import AnalysisException assert test_dataframe.expect_column_values_to_be_in_type_list( "address.street", ["StringType", "IntegerType"] ).success assert test_dataframe.expect_column_values_to_be_in_type_list( "`non.nested`", ["StringType", "IntegerType"] ).success assert test_dataframe.expect_column_values_to_be_in_type_list( "name", ["StringType", "IntegerType"] ).success with pytest.raises(AnalysisException): test_dataframe.expect_column_values_to_be_of_type("non.nested", "StringType")
[ "def", "test_expect_column_values_to_be_in_type_list", "(", "spark_session", ",", "test_dataframe", ")", ":", "from", "pyspark", ".", "sql", ".", "utils", "import", "AnalysisException", "assert", "test_dataframe", ".", "expect_column_values_to_be_in_type_list", "(", "\"address.street\"", ",", "[", "\"StringType\"", ",", "\"IntegerType\"", "]", ")", ".", "success", "assert", "test_dataframe", ".", "expect_column_values_to_be_in_type_list", "(", "\"`non.nested`\"", ",", "[", "\"StringType\"", ",", "\"IntegerType\"", "]", ")", ".", "success", "assert", "test_dataframe", ".", "expect_column_values_to_be_in_type_list", "(", "\"name\"", ",", "[", "\"StringType\"", ",", "\"IntegerType\"", "]", ")", ".", "success", "with", "pytest", ".", "raises", "(", "AnalysisException", ")", ":", "test_dataframe", ".", "expect_column_values_to_be_of_type", "(", "\"non.nested\"", ",", "\"StringType\"", ")" ]
[ 168, 0 ]
[ 184, 85 ]
python
en
['en', 'error', 'th']
False
test_expect_column_pair_values_to_be_equal
(spark_session, test_dataframe)
column_pair_map_expectation
column_pair_map_expectation
def test_expect_column_pair_values_to_be_equal(spark_session, test_dataframe): """ column_pair_map_expectation """ from pyspark.sql.utils import AnalysisException assert test_dataframe.expect_column_pair_values_to_be_equal( "name", "name_duplicate" ).success assert not test_dataframe.expect_column_pair_values_to_be_equal( "name", "address.street" ).success assert not test_dataframe.expect_column_pair_values_to_be_equal( "name", "`non.nested`" ).success # Expectation should fail when no `` surround a non-nested column with dot notation with pytest.raises(AnalysisException): test_dataframe.expect_column_pair_values_to_be_equal("name", "non.nested")
[ "def", "test_expect_column_pair_values_to_be_equal", "(", "spark_session", ",", "test_dataframe", ")", ":", "from", "pyspark", ".", "sql", ".", "utils", "import", "AnalysisException", "assert", "test_dataframe", ".", "expect_column_pair_values_to_be_equal", "(", "\"name\"", ",", "\"name_duplicate\"", ")", ".", "success", "assert", "not", "test_dataframe", ".", "expect_column_pair_values_to_be_equal", "(", "\"name\"", ",", "\"address.street\"", ")", ".", "success", "assert", "not", "test_dataframe", ".", "expect_column_pair_values_to_be_equal", "(", "\"name\"", ",", "\"`non.nested`\"", ")", ".", "success", "# Expectation should fail when no `` surround a non-nested column with dot notation", "with", "pytest", ".", "raises", "(", "AnalysisException", ")", ":", "test_dataframe", ".", "expect_column_pair_values_to_be_equal", "(", "\"name\"", ",", "\"non.nested\"", ")" ]
[ 191, 0 ]
[ 209, 82 ]
python
en
['en', 'error', 'th']
False
test_expect_column_pair_values_A_to_be_greater_than_B
( spark_session, test_dataframe )
column_pair_map_expectation
column_pair_map_expectation
def test_expect_column_pair_values_A_to_be_greater_than_B( spark_session, test_dataframe ): """ column_pair_map_expectation """ assert test_dataframe.expect_column_pair_values_A_to_be_greater_than_B( "address.house_number", "age" ).success assert test_dataframe.expect_column_pair_values_A_to_be_greater_than_B( "age", "age", or_equal=True ).success
[ "def", "test_expect_column_pair_values_A_to_be_greater_than_B", "(", "spark_session", ",", "test_dataframe", ")", ":", "assert", "test_dataframe", ".", "expect_column_pair_values_A_to_be_greater_than_B", "(", "\"address.house_number\"", ",", "\"age\"", ")", ".", "success", "assert", "test_dataframe", ".", "expect_column_pair_values_A_to_be_greater_than_B", "(", "\"age\"", ",", "\"age\"", ",", "or_equal", "=", "True", ")", ".", "success" ]
[ 216, 0 ]
[ 227, 13 ]
python
en
['en', 'error', 'th']
False
test_expect_select_column_values_to_be_unique_within_record
( spark_session, test_dataframe )
multicolumn_map_expectation
multicolumn_map_expectation
def test_expect_select_column_values_to_be_unique_within_record( spark_session, test_dataframe ): """ multicolumn_map_expectation """ from pyspark.sql.utils import AnalysisException assert test_dataframe.expect_select_column_values_to_be_unique_within_record( ["name", "age"] ).success assert test_dataframe.expect_select_column_values_to_be_unique_within_record( ["address.street", "name"] ).success assert test_dataframe.expect_select_column_values_to_be_unique_within_record( ["address.street", "`non.nested`"] ).success # Expectation should fail when no `` surround a non-nested column with dot notation with pytest.raises(AnalysisException): test_dataframe.expect_select_column_values_to_be_unique_within_record( ["address.street", "non.nested"] )
[ "def", "test_expect_select_column_values_to_be_unique_within_record", "(", "spark_session", ",", "test_dataframe", ")", ":", "from", "pyspark", ".", "sql", ".", "utils", "import", "AnalysisException", "assert", "test_dataframe", ".", "expect_select_column_values_to_be_unique_within_record", "(", "[", "\"name\"", ",", "\"age\"", "]", ")", ".", "success", "assert", "test_dataframe", ".", "expect_select_column_values_to_be_unique_within_record", "(", "[", "\"address.street\"", ",", "\"name\"", "]", ")", ".", "success", "assert", "test_dataframe", ".", "expect_select_column_values_to_be_unique_within_record", "(", "[", "\"address.street\"", ",", "\"`non.nested`\"", "]", ")", ".", "success", "# Expectation should fail when no `` surround a non-nested column with dot notation", "with", "pytest", ".", "raises", "(", "AnalysisException", ")", ":", "test_dataframe", ".", "expect_select_column_values_to_be_unique_within_record", "(", "[", "\"address.street\"", ",", "\"non.nested\"", "]", ")" ]
[ 234, 0 ]
[ 256, 9 ]
python
en
['en', 'error', 'th']
False
test_expect_compound_columns_to_be_unique
(spark_session, test_dataframe)
multicolumn_map_expectation
multicolumn_map_expectation
def test_expect_compound_columns_to_be_unique(spark_session, test_dataframe): """ multicolumn_map_expectation """ from pyspark.sql.utils import AnalysisException # Positive tests assert test_dataframe.expect_compound_columns_to_be_unique(["name", "age"]).success assert test_dataframe.expect_compound_columns_to_be_unique( ["address.street", "name"] ).success assert test_dataframe.expect_compound_columns_to_be_unique( ["address.street", "address.city"] ).success assert test_dataframe.expect_compound_columns_to_be_unique( ["name_with_duplicates", "age_with_duplicates", "name"] ).success assert test_dataframe.expect_compound_columns_to_be_unique( ["address.street", "`non.nested`"] ).success assert test_dataframe.expect_compound_columns_to_be_unique( ["name", "name_with_duplicates"] ).success assert test_dataframe.expect_compound_columns_to_be_unique( [ "name", "name_with_duplicates", "address_with_duplicates.street", "address_with_duplicates.city", "address_with_duplicates.house_number", ] ).success # Negative tests assert not test_dataframe.expect_compound_columns_to_be_unique( ["address_with_duplicates.city", "address_with_duplicates.house_number"] ).success assert not test_dataframe.expect_compound_columns_to_be_unique( ["name_with_duplicates"] ).success assert not test_dataframe.expect_compound_columns_to_be_unique( ["name_with_duplicates", "address_with_duplicates.street"] ).success assert not test_dataframe.expect_compound_columns_to_be_unique( [ "name_with_duplicates", "address_with_duplicates.street", "address_with_duplicates.house_number", ] ).success # Expectation should fail when no `` surround a non-nested column with dot notation with pytest.raises(AnalysisException): test_dataframe.expect_compound_columns_to_be_unique( ["address.street", "non.nested"] )
[ "def", "test_expect_compound_columns_to_be_unique", "(", "spark_session", ",", "test_dataframe", ")", ":", "from", "pyspark", ".", "sql", ".", "utils", "import", "AnalysisException", "# Positive tests", "assert", "test_dataframe", ".", "expect_compound_columns_to_be_unique", "(", "[", "\"name\"", ",", "\"age\"", "]", ")", ".", "success", "assert", "test_dataframe", ".", "expect_compound_columns_to_be_unique", "(", "[", "\"address.street\"", ",", "\"name\"", "]", ")", ".", "success", "assert", "test_dataframe", ".", "expect_compound_columns_to_be_unique", "(", "[", "\"address.street\"", ",", "\"address.city\"", "]", ")", ".", "success", "assert", "test_dataframe", ".", "expect_compound_columns_to_be_unique", "(", "[", "\"name_with_duplicates\"", ",", "\"age_with_duplicates\"", ",", "\"name\"", "]", ")", ".", "success", "assert", "test_dataframe", ".", "expect_compound_columns_to_be_unique", "(", "[", "\"address.street\"", ",", "\"`non.nested`\"", "]", ")", ".", "success", "assert", "test_dataframe", ".", "expect_compound_columns_to_be_unique", "(", "[", "\"name\"", ",", "\"name_with_duplicates\"", "]", ")", ".", "success", "assert", "test_dataframe", ".", "expect_compound_columns_to_be_unique", "(", "[", "\"name\"", ",", "\"name_with_duplicates\"", ",", "\"address_with_duplicates.street\"", ",", "\"address_with_duplicates.city\"", ",", "\"address_with_duplicates.house_number\"", ",", "]", ")", ".", "success", "# Negative tests", "assert", "not", "test_dataframe", ".", "expect_compound_columns_to_be_unique", "(", "[", "\"address_with_duplicates.city\"", ",", "\"address_with_duplicates.house_number\"", "]", ")", ".", "success", "assert", "not", "test_dataframe", ".", "expect_compound_columns_to_be_unique", "(", "[", "\"name_with_duplicates\"", "]", ")", ".", "success", "assert", "not", "test_dataframe", ".", "expect_compound_columns_to_be_unique", "(", "[", "\"name_with_duplicates\"", ",", "\"address_with_duplicates.street\"", "]", ")", ".", "success", "assert", "not", "test_dataframe", ".", "expect_compound_columns_to_be_unique", "(", "[", "\"name_with_duplicates\"", ",", "\"address_with_duplicates.street\"", ",", "\"address_with_duplicates.house_number\"", ",", "]", ")", ".", "success", "# Expectation should fail when no `` surround a non-nested column with dot notation", "with", "pytest", ".", "raises", "(", "AnalysisException", ")", ":", "test_dataframe", ".", "expect_compound_columns_to_be_unique", "(", "[", "\"address.street\"", ",", "\"non.nested\"", "]", ")" ]
[ 263, 0 ]
[ 318, 9 ]
python
en
['en', 'error', 'th']
False
test_expect_column_values_to_be_unique
(spark_session, test_dataframe)
column_map_expectation
column_map_expectation
def test_expect_column_values_to_be_unique(spark_session, test_dataframe): """ column_map_expectation """ from pyspark.sql.utils import AnalysisException assert test_dataframe.expect_column_values_to_be_unique("name").success assert not test_dataframe.expect_column_values_to_be_unique("address.city").success assert test_dataframe.expect_column_values_to_be_unique("`non.nested`").success # Expectation should fail when no `` surround a non-nested column with dot notation with pytest.raises(AnalysisException): test_dataframe.expect_column_values_to_be_unique("non.nested")
[ "def", "test_expect_column_values_to_be_unique", "(", "spark_session", ",", "test_dataframe", ")", ":", "from", "pyspark", ".", "sql", ".", "utils", "import", "AnalysisException", "assert", "test_dataframe", ".", "expect_column_values_to_be_unique", "(", "\"name\"", ")", ".", "success", "assert", "not", "test_dataframe", ".", "expect_column_values_to_be_unique", "(", "\"address.city\"", ")", ".", "success", "assert", "test_dataframe", ".", "expect_column_values_to_be_unique", "(", "\"`non.nested`\"", ")", ".", "success", "# Expectation should fail when no `` surround a non-nested column with dot notation", "with", "pytest", ".", "raises", "(", "AnalysisException", ")", ":", "test_dataframe", ".", "expect_column_values_to_be_unique", "(", "\"non.nested\"", ")" ]
[ 325, 0 ]
[ 337, 70 ]
python
en
['en', 'error', 'th']
False
test_expect_column_value_lengths_to_be_between
(spark_session, test_dataframe)
column_map_expectation
column_map_expectation
def test_expect_column_value_lengths_to_be_between(spark_session, test_dataframe): """ column_map_expectation """ assert test_dataframe.expect_column_value_lengths_to_be_between( "name", 3, 7 ).success assert test_dataframe.expect_column_value_lengths_to_be_between( "address.street", 1, 10 ).success
[ "def", "test_expect_column_value_lengths_to_be_between", "(", "spark_session", ",", "test_dataframe", ")", ":", "assert", "test_dataframe", ".", "expect_column_value_lengths_to_be_between", "(", "\"name\"", ",", "3", ",", "7", ")", ".", "success", "assert", "test_dataframe", ".", "expect_column_value_lengths_to_be_between", "(", "\"address.street\"", ",", "1", ",", "10", ")", ".", "success" ]
[ 344, 0 ]
[ 353, 13 ]
python
en
['en', 'error', 'th']
False
test_expect_column_value_lengths_to_equal
(spark_session, test_dataframe)
column_map_expectation
column_map_expectation
def test_expect_column_value_lengths_to_equal(spark_session, test_dataframe): """ column_map_expectation """ assert test_dataframe.expect_column_value_lengths_to_equal("age", 1).success assert test_dataframe.expect_column_value_lengths_to_equal( "address.street", 8 ).success
[ "def", "test_expect_column_value_lengths_to_equal", "(", "spark_session", ",", "test_dataframe", ")", ":", "assert", "test_dataframe", ".", "expect_column_value_lengths_to_equal", "(", "\"age\"", ",", "1", ")", ".", "success", "assert", "test_dataframe", ".", "expect_column_value_lengths_to_equal", "(", "\"address.street\"", ",", "8", ")", ".", "success" ]
[ 360, 0 ]
[ 367, 13 ]
python
en
['en', 'error', 'th']
False
BaseTranslator.translate
( self, query: Optional[str] = None, documents: Optional[Union[List[Document], List[str], List[Dict[str, Any]]]] = None, dict_key: Optional[str] = None, **kwargs )
Translate the passed query or a list of documents from language A to B.
Translate the passed query or a list of documents from language A to B.
def translate( self, query: Optional[str] = None, documents: Optional[Union[List[Document], List[str], List[Dict[str, Any]]]] = None, dict_key: Optional[str] = None, **kwargs ) -> Union[str, List[Document], List[str], List[Dict[str, Any]]]: """ Translate the passed query or a list of documents from language A to B. """ pass
[ "def", "translate", "(", "self", ",", "query", ":", "Optional", "[", "str", "]", "=", "None", ",", "documents", ":", "Optional", "[", "Union", "[", "List", "[", "Document", "]", ",", "List", "[", "str", "]", ",", "List", "[", "Dict", "[", "str", ",", "Any", "]", "]", "]", "]", "=", "None", ",", "dict_key", ":", "Optional", "[", "str", "]", "=", "None", ",", "*", "*", "kwargs", ")", "->", "Union", "[", "str", ",", "List", "[", "Document", "]", ",", "List", "[", "str", "]", ",", "List", "[", "Dict", "[", "str", ",", "Any", "]", "]", "]", ":", "pass" ]
[ 14, 4 ]
[ 24, 12 ]
python
en
['en', 'error', 'th']
False
BaseTranslator.run
( self, query: Optional[str] = None, documents: Optional[Union[List[Document], List[str], List[Dict[str, Any]]]] = None, answers: Optional[Union[Dict[str, Any], List[Dict[str, Any]]]] = None, dict_key: Optional[str] = None, **kwargs )
Method that gets executed when this class is used as a Node in a Haystack Pipeline
Method that gets executed when this class is used as a Node in a Haystack Pipeline
def run( self, query: Optional[str] = None, documents: Optional[Union[List[Document], List[str], List[Dict[str, Any]]]] = None, answers: Optional[Union[Dict[str, Any], List[Dict[str, Any]]]] = None, dict_key: Optional[str] = None, **kwargs ): """Method that gets executed when this class is used as a Node in a Haystack Pipeline""" results: Dict = { **kwargs } # This will cover input query stage if query: results["query"] = self.translate(query=query) # This will cover retriever and summarizer if documents: dict_key = dict_key or "text" results["documents"] = self.translate(documents=documents, dict_key=dict_key) if answers: dict_key = dict_key or "answer" if isinstance(answers, Mapping): # This will cover reader results["answers"] = self.translate(documents=answers["answers"], dict_key=dict_key) else: # This will cover generator results["answers"] = self.translate(documents=answers, dict_key=dict_key) return results, "output_1"
[ "def", "run", "(", "self", ",", "query", ":", "Optional", "[", "str", "]", "=", "None", ",", "documents", ":", "Optional", "[", "Union", "[", "List", "[", "Document", "]", ",", "List", "[", "str", "]", ",", "List", "[", "Dict", "[", "str", ",", "Any", "]", "]", "]", "]", "=", "None", ",", "answers", ":", "Optional", "[", "Union", "[", "Dict", "[", "str", ",", "Any", "]", ",", "List", "[", "Dict", "[", "str", ",", "Any", "]", "]", "]", "]", "=", "None", ",", "dict_key", ":", "Optional", "[", "str", "]", "=", "None", ",", "*", "*", "kwargs", ")", ":", "results", ":", "Dict", "=", "{", "*", "*", "kwargs", "}", "# This will cover input query stage", "if", "query", ":", "results", "[", "\"query\"", "]", "=", "self", ".", "translate", "(", "query", "=", "query", ")", "# This will cover retriever and summarizer", "if", "documents", ":", "dict_key", "=", "dict_key", "or", "\"text\"", "results", "[", "\"documents\"", "]", "=", "self", ".", "translate", "(", "documents", "=", "documents", ",", "dict_key", "=", "dict_key", ")", "if", "answers", ":", "dict_key", "=", "dict_key", "or", "\"answer\"", "if", "isinstance", "(", "answers", ",", "Mapping", ")", ":", "# This will cover reader", "results", "[", "\"answers\"", "]", "=", "self", ".", "translate", "(", "documents", "=", "answers", "[", "\"answers\"", "]", ",", "dict_key", "=", "dict_key", ")", "else", ":", "# This will cover generator", "results", "[", "\"answers\"", "]", "=", "self", ".", "translate", "(", "documents", "=", "answers", ",", "dict_key", "=", "dict_key", ")", "return", "results", ",", "\"output_1\"" ]
[ 26, 4 ]
[ 57, 34 ]
python
en
['en', 'en', 'en']
True
pytest_addoption
(parser)
This parser plugin includes the following command-line options for pytest: --browser=BROWSER (The web browser to use.) --cap-file=FILE (The web browser's desired capabilities to use.) --cap-string=STRING (The web browser's desired capabilities to use.) --settings-file=FILE (Overrides SeleniumBase settings.py values.) --env=ENV (Set a test environment. Use "self.env" to use this in tests.) --data=DATA (Extra data to pass to tests. Use "self.data" in tests.) --var1=DATA (Extra data to pass to tests. Use "self.var1" in tests.) --var2=DATA (Extra data to pass to tests. Use "self.var2" in tests.) --var3=DATA (Extra data to pass to tests. Use "self.var3" in tests.) --user-data-dir=DIR (Set the Chrome user data directory to use.) --server=SERVER (The server / IP address used by the tests.) --port=PORT (The port that's used by the test server.) --proxy=SERVER:PORT (This is the proxy server:port combo used by tests.) --agent=STRING (This designates the web browser's User Agent to use.) --mobile (The option to use the mobile emulator while running tests.) --metrics=STRING ("CSSWidth,Height,PixelRatio" for mobile emulator tests.) --extension-zip=ZIP (Load a Chrome Extension .zip file, comma-separated.) --extension-dir=DIR (Load a Chrome Extension directory, comma-separated.) --headless (The option to run tests headlessly. The default on Linux OS.) --headed (The option to run tests with a GUI on Linux OS.) --start-page=URL (The starting URL for the web browser when tests begin.) --archive-logs (Archive old log files instead of deleting them.) --time-limit=SECONDS (Safely fail any test that exceeds the limit limit.) --slow (The option to slow down the automation.) --demo (The option to visually see test actions as they occur.) --demo-sleep=SECONDS (The option to wait longer after Demo Mode actions.) --highlights=NUM (Number of highlight animations for Demo Mode actions.) --message-duration=SECONDS (The time length for Messenger alerts.) --check-js (The option to check for JavaScript errors after page loads.) --ad-block (The option to block some display ads after page loads.) --verify-delay=SECONDS (The delay before MasterQA verification checks.) --disable-csp (This disables the Content Security Policy of websites.) --enable-sync (The option to enable "Chrome Sync".) --use-auto-ext (The option to use Chrome's automation extension.) --incognito (The option to enable Chrome's Incognito mode.) --guest (The option to enable Chrome's Guest mode.) --devtools (The option to open Chrome's DevTools when the browser opens.) --reuse-session (The option to reuse the browser session between tests.) --crumbs (Option to delete all cookies between tests reusing a session.) --maximize (The option to start with the web browser maximized.) --save-screenshot (The option to save a screenshot after each test.) --visual-baseline (Set the visual baseline for Visual/Layout tests.) --timeout-multiplier=MULTIPLIER (Multiplies the default timeout values.)
This parser plugin includes the following command-line options for pytest: --browser=BROWSER (The web browser to use.) --cap-file=FILE (The web browser's desired capabilities to use.) --cap-string=STRING (The web browser's desired capabilities to use.) --settings-file=FILE (Overrides SeleniumBase settings.py values.) --env=ENV (Set a test environment. Use "self.env" to use this in tests.) --data=DATA (Extra data to pass to tests. Use "self.data" in tests.) --var1=DATA (Extra data to pass to tests. Use "self.var1" in tests.) --var2=DATA (Extra data to pass to tests. Use "self.var2" in tests.) --var3=DATA (Extra data to pass to tests. Use "self.var3" in tests.) --user-data-dir=DIR (Set the Chrome user data directory to use.) --server=SERVER (The server / IP address used by the tests.) --port=PORT (The port that's used by the test server.) --proxy=SERVER:PORT (This is the proxy server:port combo used by tests.) --agent=STRING (This designates the web browser's User Agent to use.) --mobile (The option to use the mobile emulator while running tests.) --metrics=STRING ("CSSWidth,Height,PixelRatio" for mobile emulator tests.) --extension-zip=ZIP (Load a Chrome Extension .zip file, comma-separated.) --extension-dir=DIR (Load a Chrome Extension directory, comma-separated.) --headless (The option to run tests headlessly. The default on Linux OS.) --headed (The option to run tests with a GUI on Linux OS.) --start-page=URL (The starting URL for the web browser when tests begin.) --archive-logs (Archive old log files instead of deleting them.) --time-limit=SECONDS (Safely fail any test that exceeds the limit limit.) --slow (The option to slow down the automation.) --demo (The option to visually see test actions as they occur.) --demo-sleep=SECONDS (The option to wait longer after Demo Mode actions.) --highlights=NUM (Number of highlight animations for Demo Mode actions.) --message-duration=SECONDS (The time length for Messenger alerts.) --check-js (The option to check for JavaScript errors after page loads.) --ad-block (The option to block some display ads after page loads.) --verify-delay=SECONDS (The delay before MasterQA verification checks.) --disable-csp (This disables the Content Security Policy of websites.) --enable-sync (The option to enable "Chrome Sync".) --use-auto-ext (The option to use Chrome's automation extension.) --incognito (The option to enable Chrome's Incognito mode.) --guest (The option to enable Chrome's Guest mode.) --devtools (The option to open Chrome's DevTools when the browser opens.) --reuse-session (The option to reuse the browser session between tests.) --crumbs (Option to delete all cookies between tests reusing a session.) --maximize (The option to start with the web browser maximized.) --save-screenshot (The option to save a screenshot after each test.) --visual-baseline (Set the visual baseline for Visual/Layout tests.) --timeout-multiplier=MULTIPLIER (Multiplies the default timeout values.)
def pytest_addoption(parser): """ This parser plugin includes the following command-line options for pytest: --browser=BROWSER (The web browser to use.) --cap-file=FILE (The web browser's desired capabilities to use.) --cap-string=STRING (The web browser's desired capabilities to use.) --settings-file=FILE (Overrides SeleniumBase settings.py values.) --env=ENV (Set a test environment. Use "self.env" to use this in tests.) --data=DATA (Extra data to pass to tests. Use "self.data" in tests.) --var1=DATA (Extra data to pass to tests. Use "self.var1" in tests.) --var2=DATA (Extra data to pass to tests. Use "self.var2" in tests.) --var3=DATA (Extra data to pass to tests. Use "self.var3" in tests.) --user-data-dir=DIR (Set the Chrome user data directory to use.) --server=SERVER (The server / IP address used by the tests.) --port=PORT (The port that's used by the test server.) --proxy=SERVER:PORT (This is the proxy server:port combo used by tests.) --agent=STRING (This designates the web browser's User Agent to use.) --mobile (The option to use the mobile emulator while running tests.) --metrics=STRING ("CSSWidth,Height,PixelRatio" for mobile emulator tests.) --extension-zip=ZIP (Load a Chrome Extension .zip file, comma-separated.) --extension-dir=DIR (Load a Chrome Extension directory, comma-separated.) --headless (The option to run tests headlessly. The default on Linux OS.) --headed (The option to run tests with a GUI on Linux OS.) --start-page=URL (The starting URL for the web browser when tests begin.) --archive-logs (Archive old log files instead of deleting them.) --time-limit=SECONDS (Safely fail any test that exceeds the limit limit.) --slow (The option to slow down the automation.) --demo (The option to visually see test actions as they occur.) --demo-sleep=SECONDS (The option to wait longer after Demo Mode actions.) --highlights=NUM (Number of highlight animations for Demo Mode actions.) --message-duration=SECONDS (The time length for Messenger alerts.) --check-js (The option to check for JavaScript errors after page loads.) --ad-block (The option to block some display ads after page loads.) --verify-delay=SECONDS (The delay before MasterQA verification checks.) --disable-csp (This disables the Content Security Policy of websites.) --enable-sync (The option to enable "Chrome Sync".) --use-auto-ext (The option to use Chrome's automation extension.) --incognito (The option to enable Chrome's Incognito mode.) --guest (The option to enable Chrome's Guest mode.) --devtools (The option to open Chrome's DevTools when the browser opens.) --reuse-session (The option to reuse the browser session between tests.) --crumbs (Option to delete all cookies between tests reusing a session.) --maximize (The option to start with the web browser maximized.) --save-screenshot (The option to save a screenshot after each test.) --visual-baseline (Set the visual baseline for Visual/Layout tests.) --timeout-multiplier=MULTIPLIER (Multiplies the default timeout values.) """ parser = parser.getgroup('SeleniumBase', 'SeleniumBase specific configuration options') parser.addoption('--browser', action="store", dest='browser', type=str.lower, choices=constants.ValidBrowsers.valid_browsers, default=constants.Browser.GOOGLE_CHROME, help="""Specifies the web browser to use. Default: Chrome. If you want to use Firefox, explicitly indicate that. Example: (--browser=firefox)""") parser.addoption('--with-selenium', action="store_true", dest='with_selenium', default=True, help="Use if tests need to be run with a web browser.") parser.addoption('--env', action='store', dest='environment', type=str.lower, choices=( constants.Environment.QA, constants.Environment.STAGING, constants.Environment.DEVELOP, constants.Environment.PRODUCTION, constants.Environment.MASTER, constants.Environment.LOCAL, constants.Environment.TEST ), default=constants.Environment.TEST, help="The environment to run the tests in.") parser.addoption('--data', dest='data', default=None, help='Extra data to pass to tests from the command line.') parser.addoption('--var1', dest='var1', default=None, help='Extra data to pass to tests from the command line.') parser.addoption('--var2', dest='var2', default=None, help='Extra data to pass to tests from the command line.') parser.addoption('--var3', dest='var3', default=None, help='Extra data to pass to tests from the command line.') parser.addoption('--cap_file', '--cap-file', dest='cap_file', default=None, help="""The file that stores browser desired capabilities for BrowserStack, Sauce Labs, and other remote web drivers to use.""") parser.addoption('--cap_string', '--cap-string', dest='cap_string', default=None, help="""The string that stores browser desired capabilities for BrowserStack, Sauce Labs, and other remote web drivers to use. Enclose cap-string in single quotes. Enclose parameter keys in double quotes. Example: --cap-string='{"name":"test1","v":"42"}'""") parser.addoption('--settings_file', '--settings-file', '--settings', action='store', dest='settings_file', default=None, help="""The file that stores key/value pairs for overriding values in the seleniumbase/config/settings.py file.""") parser.addoption('--user_data_dir', '--user-data-dir', dest='user_data_dir', default=None, help="""The Chrome User Data Directory to use. (Profile) If the directory doesn't exist, it'll be created.""") parser.addoption('--with-testing_base', '--with-testing-base', action="store_true", dest='with_testing_base', default=True, help="""Use to save logs and screenshots when tests fail. The following options are now active by default with --with-testing_base (which is on by default): --with-screen_shots , --with-basic_test_info , --with-page_source """) parser.addoption('--log_path', '--log-path', dest='log_path', default='latest_logs/', help='Where log files are saved. (No longer editable!)') parser.addoption('--archive_logs', '--archive-logs', action="store_true", dest='archive_logs', default=False, help="Archive old log files instead of deleting them.") parser.addoption('--with-db_reporting', '--with-db-reporting', action="store_true", dest='with_db_reporting', default=False, help="Use to record test data in the MySQL database.") parser.addoption('--database_env', '--database-env', action='store', dest='database_env', choices=( constants.Environment.QA, constants.Environment.STAGING, constants.Environment.DEVELOP, constants.Environment.PRODUCTION, constants.Environment.MASTER, constants.Environment.LOCAL, constants.Environment.TEST ), default=constants.Environment.TEST, help="The database environment to run the tests in.") parser.addoption('--with-s3_logging', '--with-s3-logging', action="store_true", dest='with_s3_logging', default=False, help="Use to save test log files in Amazon S3.") parser.addoption('--with-screen_shots', '--with-screen-shots', action="store_true", dest='with_screen_shots', default=False, help="""Use to save screenshots on test failure. (Automatically on when using --with-testing_base)""") parser.addoption('--with-basic_test_info', '--with-basic-test-info', action="store_true", dest='with_basic_test_info', default=False, help="""Use to save basic test info on test failure. (Automatically on when using --with-testing_base)""") parser.addoption('--with-page_source', '--with-page-source', action="store_true", dest='with_page_source', default=False, help="""Use to save page source on test failure. (Automatically on when using --with-testing_base)""") parser.addoption('--server', action='store', dest='servername', default='localhost', help="""Designates the Selenium Grid server to use. Use "127.0.0.1" to connect to a localhost Grid. If unset or set to "localhost", Grid isn't used. Default: "localhost".""") parser.addoption('--port', action='store', dest='port', default='4444', help="""Designates the Selenium Grid port to use. Default: 4444.""") parser.addoption('--proxy', action='store', dest='proxy_string', default=None, help="""Designates the proxy server:port to use. Format: servername:port. OR username:password@servername:port OR A dict key from proxy_list.PROXY_LIST Default: None.""") parser.addoption('--agent', '--user-agent', '--user_agent', action='store', dest='user_agent', default=None, help="""Designates the User-Agent for the browser to use. Format: A string. Default: None.""") parser.addoption('--mobile', '--mobile-emulator', '--mobile_emulator', action="store_true", dest='mobile_emulator', default=False, help="""If this option is enabled, the mobile emulator will be used while running tests.""") parser.addoption('--metrics', '--device-metrics', '--device_metrics', action='store', dest='device_metrics', default=None, help="""Designates the three device metrics of the mobile emulator: CSS Width, CSS Height, and Pixel-Ratio. Format: A comma-separated string with the 3 values. Example: "375,734,3" Default: None. (Will use default values if None)""") parser.addoption('--extension_zip', '--extension-zip', action='store', dest='extension_zip', default=None, help="""Designates the Chrome Extension ZIP file to load. Format: A comma-separated list of .zip or .crx files containing the Chrome extensions to load. Default: None.""") parser.addoption('--extension_dir', '--extension-dir', action='store', dest='extension_dir', default=None, help="""Designates the Chrome Extension folder to load. Format: A directory containing the Chrome extension. (Can also be a comma-separated list of directories.) Default: None.""") parser.addoption('--headless', action="store_true", dest='headless', default=False, help="""Using this makes Webdriver run web browsers headlessly, which is required on headless machines. Default: False on Mac/Windows. True on Linux.""") parser.addoption('--headed', '--gui', action="store_true", dest='headed', default=False, help="""Using this makes Webdriver run web browsers with a GUI when running tests on Linux machines. (The default setting on Linux is headless.) (The default setting on Mac or Windows is headed.) """) parser.addoption('--start_page', '--start-page', '--url', action='store', dest='start_page', default=None, help="""Designates the starting URL for the web browser when each test begins. Default: None.""") parser.addoption('--is_pytest', '--is-pytest', action="store_true", dest='is_pytest', default=True, help="""This is used by the BaseCase class to tell apart pytest runs from nosetest runs. (Automatic)""") parser.addoption('--time_limit', '--time-limit', '--timelimit', action='store', dest='time_limit', default=None, help="""Use this to set a time limit per test, in seconds. If a test runs beyond the limit, it fails.""") parser.addoption('--slow_mode', '--slow-mode', '--slow', action="store_true", dest='slow_mode', default=False, help="""Using this slows down the automation.""") parser.addoption('--demo_mode', '--demo-mode', '--demo', action="store_true", dest='demo_mode', default=False, help="""Using this slows down the automation and lets you visually see what the tests are actually doing.""") parser.addoption('--demo_sleep', '--demo-sleep', action='store', dest='demo_sleep', default=None, help="""Setting this overrides the Demo Mode sleep time that happens after browser actions.""") parser.addoption('--highlights', action='store', dest='highlights', default=None, help="""Setting this overrides the default number of highlight animation loops to have per call.""") parser.addoption('--message_duration', '--message-duration', action="store", dest='message_duration', default=None, help="""Setting this overrides the default time that messenger notifications remain visible when reaching assert statements during Demo Mode.""") parser.addoption('--check_js', '--check-js', action="store_true", dest='js_checking_on', default=False, help="""The option to check for JavaScript errors after every page load.""") parser.addoption('--ad_block', '--ad-block', '--block_ads', '--block-ads', action="store_true", dest='ad_block_on', default=False, help="""Using this makes WebDriver block display ads that are defined in ad_block_list.AD_BLOCK_LIST.""") parser.addoption('--verify_delay', '--verify-delay', action='store', dest='verify_delay', default=None, help="""Setting this overrides the default wait time before each MasterQA verification pop-up.""") parser.addoption('--disable_csp', '--disable-csp', '--no_csp', '--no-csp', action="store_true", dest='disable_csp', default=False, help="""Using this disables the Content Security Policy of websites, which may interfere with some features of SeleniumBase, such as loading custom JavaScript libraries for various testing actions. Setting this to True (--disable_csp) overrides the value set in seleniumbase/config/settings.py""") parser.addoption('--enable_sync', '--enable-sync', action="store_true", dest='enable_sync', default=False, help="""Using this enables the "Chrome Sync" feature.""") parser.addoption('--use_auto_ext', '--use-auto-ext', '--auto-ext', action="store_true", dest='use_auto_ext', default=False, help="""Using this enables Chrome's Automation Extension. It's not required, but some commands & advanced features may need it.""") parser.addoption('--no_sandbox', '--no-sandbox', action="store_true", dest='no_sandbox', default=False, help="""Using this enables the "No Sandbox" feature. (This setting is now always enabled by default.)""") parser.addoption('--disable_gpu', '--disable-gpu', action="store_true", dest='disable_gpu', default=False, help="""Using this enables the "Disable GPU" feature. (This setting is now always enabled by default.)""") parser.addoption('--incognito', '--incognito_mode', '--incognito-mode', action="store_true", dest='incognito', default=False, help="""Using this enables Chrome's Incognito mode.""") parser.addoption('--guest', '--guest_mode', '--guest-mode', action="store_true", dest='guest_mode', default=False, help="""Using this enables Chrome's Guest mode.""") parser.addoption('--devtools', '--open_devtools', '--open-devtools', action="store_true", dest='devtools', default=False, help="""Using this opens Chrome's DevTools.""") parser.addoption('--rs', '--reuse_session', '--reuse-session', action="store_true", dest='reuse_session', default=False, help="""The option to reuse the selenium browser window session between tests.""") parser.addoption('--crumbs', action="store_true", dest='crumbs', default=False, help="""The option to delete all cookies between tests that reuse the same browser session. This option is only needed when using "--reuse-session".""") parser.addoption('--maximize_window', '--maximize-window', '--maximize', '--fullscreen', action="store_true", dest='maximize_option', default=False, help="""The option to start with the browser window maximized.""") parser.addoption('--save_screenshot', '--save-screenshot', action='store_true', dest='save_screenshot', default=False, help="""Take a screenshot on last page after the last step of the test. (Added to the "latest_logs" folder.)""") parser.addoption('--visual_baseline', '--visual-baseline', action='store_true', dest='visual_baseline', default=False, help="""Setting this resets the visual baseline for Automated Visual Testing with SeleniumBase. When a test calls self.check_window(), it will rebuild its files in the visual_baseline folder.""") parser.addoption('--timeout_multiplier', '--timeout-multiplier', action='store', dest='timeout_multiplier', default=None, help="""Setting this overrides the default timeout by the multiplier when waiting for page elements. Unused when tests overide the default value.""") for arg in sys.argv: if "--timeout=" in arg: raise Exception( "\n\n Don't use --timeout=s from pytest-timeout! " "\n It's not thread-safe for WebDriver processes! " "\n Use --time-limit=s from SeleniumBase instead!\n")
[ "def", "pytest_addoption", "(", "parser", ")", ":", "parser", "=", "parser", ".", "getgroup", "(", "'SeleniumBase'", ",", "'SeleniumBase specific configuration options'", ")", "parser", ".", "addoption", "(", "'--browser'", ",", "action", "=", "\"store\"", ",", "dest", "=", "'browser'", ",", "type", "=", "str", ".", "lower", ",", "choices", "=", "constants", ".", "ValidBrowsers", ".", "valid_browsers", ",", "default", "=", "constants", ".", "Browser", ".", "GOOGLE_CHROME", ",", "help", "=", "\"\"\"Specifies the web browser to use. Default: Chrome.\n If you want to use Firefox, explicitly indicate that.\n Example: (--browser=firefox)\"\"\"", ")", "parser", ".", "addoption", "(", "'--with-selenium'", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "'with_selenium'", ",", "default", "=", "True", ",", "help", "=", "\"Use if tests need to be run with a web browser.\"", ")", "parser", ".", "addoption", "(", "'--env'", ",", "action", "=", "'store'", ",", "dest", "=", "'environment'", ",", "type", "=", "str", ".", "lower", ",", "choices", "=", "(", "constants", ".", "Environment", ".", "QA", ",", "constants", ".", "Environment", ".", "STAGING", ",", "constants", ".", "Environment", ".", "DEVELOP", ",", "constants", ".", "Environment", ".", "PRODUCTION", ",", "constants", ".", "Environment", ".", "MASTER", ",", "constants", ".", "Environment", ".", "LOCAL", ",", "constants", ".", "Environment", ".", "TEST", ")", ",", "default", "=", "constants", ".", "Environment", ".", "TEST", ",", "help", "=", "\"The environment to run the tests in.\"", ")", "parser", ".", "addoption", "(", "'--data'", ",", "dest", "=", "'data'", ",", "default", "=", "None", ",", "help", "=", "'Extra data to pass to tests from the command line.'", ")", "parser", ".", "addoption", "(", "'--var1'", ",", "dest", "=", "'var1'", ",", "default", "=", "None", ",", "help", "=", "'Extra data to pass to tests from the command line.'", ")", "parser", ".", "addoption", "(", "'--var2'", ",", "dest", "=", "'var2'", ",", "default", "=", "None", ",", "help", "=", "'Extra data to pass to tests from the command line.'", ")", "parser", ".", "addoption", "(", "'--var3'", ",", "dest", "=", "'var3'", ",", "default", "=", "None", ",", "help", "=", "'Extra data to pass to tests from the command line.'", ")", "parser", ".", "addoption", "(", "'--cap_file'", ",", "'--cap-file'", ",", "dest", "=", "'cap_file'", ",", "default", "=", "None", ",", "help", "=", "\"\"\"The file that stores browser desired capabilities\n for BrowserStack, Sauce Labs, and other\n remote web drivers to use.\"\"\"", ")", "parser", ".", "addoption", "(", "'--cap_string'", ",", "'--cap-string'", ",", "dest", "=", "'cap_string'", ",", "default", "=", "None", ",", "help", "=", "\"\"\"The string that stores browser desired\n capabilities for BrowserStack, Sauce Labs,\n and other remote web drivers to use.\n Enclose cap-string in single quotes.\n Enclose parameter keys in double quotes.\n Example: --cap-string='{\"name\":\"test1\",\"v\":\"42\"}'\"\"\"", ")", "parser", ".", "addoption", "(", "'--settings_file'", ",", "'--settings-file'", ",", "'--settings'", ",", "action", "=", "'store'", ",", "dest", "=", "'settings_file'", ",", "default", "=", "None", ",", "help", "=", "\"\"\"The file that stores key/value pairs for\n overriding values in the\n seleniumbase/config/settings.py file.\"\"\"", ")", "parser", ".", "addoption", "(", "'--user_data_dir'", ",", "'--user-data-dir'", ",", "dest", "=", "'user_data_dir'", ",", "default", "=", "None", ",", "help", "=", "\"\"\"The Chrome User Data Directory to use. (Profile)\n If the directory doesn't exist, it'll be created.\"\"\"", ")", "parser", ".", "addoption", "(", "'--with-testing_base'", ",", "'--with-testing-base'", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "'with_testing_base'", ",", "default", "=", "True", ",", "help", "=", "\"\"\"Use to save logs and screenshots when tests fail.\n The following options are now active by default\n with --with-testing_base (which is on by default):\n --with-screen_shots ,\n --with-basic_test_info ,\n --with-page_source\n \"\"\"", ")", "parser", ".", "addoption", "(", "'--log_path'", ",", "'--log-path'", ",", "dest", "=", "'log_path'", ",", "default", "=", "'latest_logs/'", ",", "help", "=", "'Where log files are saved. (No longer editable!)'", ")", "parser", ".", "addoption", "(", "'--archive_logs'", ",", "'--archive-logs'", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "'archive_logs'", ",", "default", "=", "False", ",", "help", "=", "\"Archive old log files instead of deleting them.\"", ")", "parser", ".", "addoption", "(", "'--with-db_reporting'", ",", "'--with-db-reporting'", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "'with_db_reporting'", ",", "default", "=", "False", ",", "help", "=", "\"Use to record test data in the MySQL database.\"", ")", "parser", ".", "addoption", "(", "'--database_env'", ",", "'--database-env'", ",", "action", "=", "'store'", ",", "dest", "=", "'database_env'", ",", "choices", "=", "(", "constants", ".", "Environment", ".", "QA", ",", "constants", ".", "Environment", ".", "STAGING", ",", "constants", ".", "Environment", ".", "DEVELOP", ",", "constants", ".", "Environment", ".", "PRODUCTION", ",", "constants", ".", "Environment", ".", "MASTER", ",", "constants", ".", "Environment", ".", "LOCAL", ",", "constants", ".", "Environment", ".", "TEST", ")", ",", "default", "=", "constants", ".", "Environment", ".", "TEST", ",", "help", "=", "\"The database environment to run the tests in.\"", ")", "parser", ".", "addoption", "(", "'--with-s3_logging'", ",", "'--with-s3-logging'", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "'with_s3_logging'", ",", "default", "=", "False", ",", "help", "=", "\"Use to save test log files in Amazon S3.\"", ")", "parser", ".", "addoption", "(", "'--with-screen_shots'", ",", "'--with-screen-shots'", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "'with_screen_shots'", ",", "default", "=", "False", ",", "help", "=", "\"\"\"Use to save screenshots on test failure.\n (Automatically on when using --with-testing_base)\"\"\"", ")", "parser", ".", "addoption", "(", "'--with-basic_test_info'", ",", "'--with-basic-test-info'", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "'with_basic_test_info'", ",", "default", "=", "False", ",", "help", "=", "\"\"\"Use to save basic test info on test failure.\n (Automatically on when using --with-testing_base)\"\"\"", ")", "parser", ".", "addoption", "(", "'--with-page_source'", ",", "'--with-page-source'", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "'with_page_source'", ",", "default", "=", "False", ",", "help", "=", "\"\"\"Use to save page source on test failure.\n (Automatically on when using --with-testing_base)\"\"\"", ")", "parser", ".", "addoption", "(", "'--server'", ",", "action", "=", "'store'", ",", "dest", "=", "'servername'", ",", "default", "=", "'localhost'", ",", "help", "=", "\"\"\"Designates the Selenium Grid server to use.\n Use \"127.0.0.1\" to connect to a localhost Grid.\n If unset or set to \"localhost\", Grid isn't used.\n Default: \"localhost\".\"\"\"", ")", "parser", ".", "addoption", "(", "'--port'", ",", "action", "=", "'store'", ",", "dest", "=", "'port'", ",", "default", "=", "'4444'", ",", "help", "=", "\"\"\"Designates the Selenium Grid port to use.\n Default: 4444.\"\"\"", ")", "parser", ".", "addoption", "(", "'--proxy'", ",", "action", "=", "'store'", ",", "dest", "=", "'proxy_string'", ",", "default", "=", "None", ",", "help", "=", "\"\"\"Designates the proxy server:port to use.\n Format: servername:port. OR\n username:password@servername:port OR\n A dict key from proxy_list.PROXY_LIST\n Default: None.\"\"\"", ")", "parser", ".", "addoption", "(", "'--agent'", ",", "'--user-agent'", ",", "'--user_agent'", ",", "action", "=", "'store'", ",", "dest", "=", "'user_agent'", ",", "default", "=", "None", ",", "help", "=", "\"\"\"Designates the User-Agent for the browser to use.\n Format: A string.\n Default: None.\"\"\"", ")", "parser", ".", "addoption", "(", "'--mobile'", ",", "'--mobile-emulator'", ",", "'--mobile_emulator'", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "'mobile_emulator'", ",", "default", "=", "False", ",", "help", "=", "\"\"\"If this option is enabled, the mobile emulator\n will be used while running tests.\"\"\"", ")", "parser", ".", "addoption", "(", "'--metrics'", ",", "'--device-metrics'", ",", "'--device_metrics'", ",", "action", "=", "'store'", ",", "dest", "=", "'device_metrics'", ",", "default", "=", "None", ",", "help", "=", "\"\"\"Designates the three device metrics of the mobile\n emulator: CSS Width, CSS Height, and Pixel-Ratio.\n Format: A comma-separated string with the 3 values.\n Example: \"375,734,3\"\n Default: None. (Will use default values if None)\"\"\"", ")", "parser", ".", "addoption", "(", "'--extension_zip'", ",", "'--extension-zip'", ",", "action", "=", "'store'", ",", "dest", "=", "'extension_zip'", ",", "default", "=", "None", ",", "help", "=", "\"\"\"Designates the Chrome Extension ZIP file to load.\n Format: A comma-separated list of .zip or .crx files\n containing the Chrome extensions to load.\n Default: None.\"\"\"", ")", "parser", ".", "addoption", "(", "'--extension_dir'", ",", "'--extension-dir'", ",", "action", "=", "'store'", ",", "dest", "=", "'extension_dir'", ",", "default", "=", "None", ",", "help", "=", "\"\"\"Designates the Chrome Extension folder to load.\n Format: A directory containing the Chrome extension.\n (Can also be a comma-separated list of directories.)\n Default: None.\"\"\"", ")", "parser", ".", "addoption", "(", "'--headless'", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "'headless'", ",", "default", "=", "False", ",", "help", "=", "\"\"\"Using this makes Webdriver run web browsers\n headlessly, which is required on headless machines.\n Default: False on Mac/Windows. True on Linux.\"\"\"", ")", "parser", ".", "addoption", "(", "'--headed'", ",", "'--gui'", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "'headed'", ",", "default", "=", "False", ",", "help", "=", "\"\"\"Using this makes Webdriver run web browsers with\n a GUI when running tests on Linux machines.\n (The default setting on Linux is headless.)\n (The default setting on Mac or Windows is headed.)\n \"\"\"", ")", "parser", ".", "addoption", "(", "'--start_page'", ",", "'--start-page'", ",", "'--url'", ",", "action", "=", "'store'", ",", "dest", "=", "'start_page'", ",", "default", "=", "None", ",", "help", "=", "\"\"\"Designates the starting URL for the web browser\n when each test begins.\n Default: None.\"\"\"", ")", "parser", ".", "addoption", "(", "'--is_pytest'", ",", "'--is-pytest'", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "'is_pytest'", ",", "default", "=", "True", ",", "help", "=", "\"\"\"This is used by the BaseCase class to tell apart\n pytest runs from nosetest runs. (Automatic)\"\"\"", ")", "parser", ".", "addoption", "(", "'--time_limit'", ",", "'--time-limit'", ",", "'--timelimit'", ",", "action", "=", "'store'", ",", "dest", "=", "'time_limit'", ",", "default", "=", "None", ",", "help", "=", "\"\"\"Use this to set a time limit per test, in seconds.\n If a test runs beyond the limit, it fails.\"\"\"", ")", "parser", ".", "addoption", "(", "'--slow_mode'", ",", "'--slow-mode'", ",", "'--slow'", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "'slow_mode'", ",", "default", "=", "False", ",", "help", "=", "\"\"\"Using this slows down the automation.\"\"\"", ")", "parser", ".", "addoption", "(", "'--demo_mode'", ",", "'--demo-mode'", ",", "'--demo'", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "'demo_mode'", ",", "default", "=", "False", ",", "help", "=", "\"\"\"Using this slows down the automation and lets you\n visually see what the tests are actually doing.\"\"\"", ")", "parser", ".", "addoption", "(", "'--demo_sleep'", ",", "'--demo-sleep'", ",", "action", "=", "'store'", ",", "dest", "=", "'demo_sleep'", ",", "default", "=", "None", ",", "help", "=", "\"\"\"Setting this overrides the Demo Mode sleep\n time that happens after browser actions.\"\"\"", ")", "parser", ".", "addoption", "(", "'--highlights'", ",", "action", "=", "'store'", ",", "dest", "=", "'highlights'", ",", "default", "=", "None", ",", "help", "=", "\"\"\"Setting this overrides the default number of\n highlight animation loops to have per call.\"\"\"", ")", "parser", ".", "addoption", "(", "'--message_duration'", ",", "'--message-duration'", ",", "action", "=", "\"store\"", ",", "dest", "=", "'message_duration'", ",", "default", "=", "None", ",", "help", "=", "\"\"\"Setting this overrides the default time that\n messenger notifications remain visible when reaching\n assert statements during Demo Mode.\"\"\"", ")", "parser", ".", "addoption", "(", "'--check_js'", ",", "'--check-js'", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "'js_checking_on'", ",", "default", "=", "False", ",", "help", "=", "\"\"\"The option to check for JavaScript errors after\n every page load.\"\"\"", ")", "parser", ".", "addoption", "(", "'--ad_block'", ",", "'--ad-block'", ",", "'--block_ads'", ",", "'--block-ads'", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "'ad_block_on'", ",", "default", "=", "False", ",", "help", "=", "\"\"\"Using this makes WebDriver block display ads\n that are defined in ad_block_list.AD_BLOCK_LIST.\"\"\"", ")", "parser", ".", "addoption", "(", "'--verify_delay'", ",", "'--verify-delay'", ",", "action", "=", "'store'", ",", "dest", "=", "'verify_delay'", ",", "default", "=", "None", ",", "help", "=", "\"\"\"Setting this overrides the default wait time\n before each MasterQA verification pop-up.\"\"\"", ")", "parser", ".", "addoption", "(", "'--disable_csp'", ",", "'--disable-csp'", ",", "'--no_csp'", ",", "'--no-csp'", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "'disable_csp'", ",", "default", "=", "False", ",", "help", "=", "\"\"\"Using this disables the Content Security Policy of\n websites, which may interfere with some features of\n SeleniumBase, such as loading custom JavaScript\n libraries for various testing actions.\n Setting this to True (--disable_csp) overrides the\n value set in seleniumbase/config/settings.py\"\"\"", ")", "parser", ".", "addoption", "(", "'--enable_sync'", ",", "'--enable-sync'", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "'enable_sync'", ",", "default", "=", "False", ",", "help", "=", "\"\"\"Using this enables the \"Chrome Sync\" feature.\"\"\"", ")", "parser", ".", "addoption", "(", "'--use_auto_ext'", ",", "'--use-auto-ext'", ",", "'--auto-ext'", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "'use_auto_ext'", ",", "default", "=", "False", ",", "help", "=", "\"\"\"Using this enables Chrome's Automation Extension.\n It's not required, but some commands & advanced\n features may need it.\"\"\"", ")", "parser", ".", "addoption", "(", "'--no_sandbox'", ",", "'--no-sandbox'", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "'no_sandbox'", ",", "default", "=", "False", ",", "help", "=", "\"\"\"Using this enables the \"No Sandbox\" feature.\n (This setting is now always enabled by default.)\"\"\"", ")", "parser", ".", "addoption", "(", "'--disable_gpu'", ",", "'--disable-gpu'", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "'disable_gpu'", ",", "default", "=", "False", ",", "help", "=", "\"\"\"Using this enables the \"Disable GPU\" feature.\n (This setting is now always enabled by default.)\"\"\"", ")", "parser", ".", "addoption", "(", "'--incognito'", ",", "'--incognito_mode'", ",", "'--incognito-mode'", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "'incognito'", ",", "default", "=", "False", ",", "help", "=", "\"\"\"Using this enables Chrome's Incognito mode.\"\"\"", ")", "parser", ".", "addoption", "(", "'--guest'", ",", "'--guest_mode'", ",", "'--guest-mode'", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "'guest_mode'", ",", "default", "=", "False", ",", "help", "=", "\"\"\"Using this enables Chrome's Guest mode.\"\"\"", ")", "parser", ".", "addoption", "(", "'--devtools'", ",", "'--open_devtools'", ",", "'--open-devtools'", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "'devtools'", ",", "default", "=", "False", ",", "help", "=", "\"\"\"Using this opens Chrome's DevTools.\"\"\"", ")", "parser", ".", "addoption", "(", "'--rs'", ",", "'--reuse_session'", ",", "'--reuse-session'", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "'reuse_session'", ",", "default", "=", "False", ",", "help", "=", "\"\"\"The option to reuse the selenium browser window\n session between tests.\"\"\"", ")", "parser", ".", "addoption", "(", "'--crumbs'", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "'crumbs'", ",", "default", "=", "False", ",", "help", "=", "\"\"\"The option to delete all cookies between tests\n that reuse the same browser session. This option\n is only needed when using \"--reuse-session\".\"\"\"", ")", "parser", ".", "addoption", "(", "'--maximize_window'", ",", "'--maximize-window'", ",", "'--maximize'", ",", "'--fullscreen'", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "'maximize_option'", ",", "default", "=", "False", ",", "help", "=", "\"\"\"The option to start with the browser window\n maximized.\"\"\"", ")", "parser", ".", "addoption", "(", "'--save_screenshot'", ",", "'--save-screenshot'", ",", "action", "=", "'store_true'", ",", "dest", "=", "'save_screenshot'", ",", "default", "=", "False", ",", "help", "=", "\"\"\"Take a screenshot on last page after the last step\n of the test. (Added to the \"latest_logs\" folder.)\"\"\"", ")", "parser", ".", "addoption", "(", "'--visual_baseline'", ",", "'--visual-baseline'", ",", "action", "=", "'store_true'", ",", "dest", "=", "'visual_baseline'", ",", "default", "=", "False", ",", "help", "=", "\"\"\"Setting this resets the visual baseline for\n Automated Visual Testing with SeleniumBase.\n When a test calls self.check_window(), it will\n rebuild its files in the visual_baseline folder.\"\"\"", ")", "parser", ".", "addoption", "(", "'--timeout_multiplier'", ",", "'--timeout-multiplier'", ",", "action", "=", "'store'", ",", "dest", "=", "'timeout_multiplier'", ",", "default", "=", "None", ",", "help", "=", "\"\"\"Setting this overrides the default timeout\n by the multiplier when waiting for page elements.\n Unused when tests overide the default value.\"\"\"", ")", "for", "arg", "in", "sys", ".", "argv", ":", "if", "\"--timeout=\"", "in", "arg", ":", "raise", "Exception", "(", "\"\\n\\n Don't use --timeout=s from pytest-timeout! \"", "\"\\n It's not thread-safe for WebDriver processes! \"", "\"\\n Use --time-limit=s from SeleniumBase instead!\\n\"", ")" ]
[ 11, 0 ]
[ 433, 70 ]
python
en
['en', 'error', 'th']
False
pytest_configure
(config)
This runs after command line options have been parsed
This runs after command line options have been parsed
def pytest_configure(config): """ This runs after command line options have been parsed """ sb_config.is_pytest = True sb_config.browser = config.getoption('browser') sb_config.data = config.getoption('data') sb_config.var1 = config.getoption('var1') sb_config.var2 = config.getoption('var2') sb_config.var3 = config.getoption('var3') sb_config.environment = config.getoption('environment') sb_config.with_selenium = config.getoption('with_selenium') sb_config.user_agent = config.getoption('user_agent') sb_config.mobile_emulator = config.getoption('mobile_emulator') sb_config.device_metrics = config.getoption('device_metrics') sb_config.headless = config.getoption('headless') sb_config.headed = config.getoption('headed') sb_config.start_page = config.getoption('start_page') sb_config.extension_zip = config.getoption('extension_zip') sb_config.extension_dir = config.getoption('extension_dir') sb_config.with_testing_base = config.getoption('with_testing_base') sb_config.with_db_reporting = config.getoption('with_db_reporting') sb_config.with_s3_logging = config.getoption('with_s3_logging') sb_config.with_screen_shots = config.getoption('with_screen_shots') sb_config.with_basic_test_info = config.getoption('with_basic_test_info') sb_config.with_page_source = config.getoption('with_page_source') sb_config.servername = config.getoption('servername') sb_config.port = config.getoption('port') sb_config.proxy_string = config.getoption('proxy_string') sb_config.cap_file = config.getoption('cap_file') sb_config.cap_string = config.getoption('cap_string') sb_config.settings_file = config.getoption('settings_file') sb_config.user_data_dir = config.getoption('user_data_dir') sb_config.database_env = config.getoption('database_env') sb_config.log_path = 'latest_logs/' # (No longer editable!) sb_config.archive_logs = config.getoption('archive_logs') sb_config._time_limit = config.getoption('time_limit') sb_config.time_limit = config.getoption('time_limit') sb_config.slow_mode = config.getoption('slow_mode') sb_config.demo_mode = config.getoption('demo_mode') sb_config.demo_sleep = config.getoption('demo_sleep') sb_config.highlights = config.getoption('highlights') sb_config.message_duration = config.getoption('message_duration') sb_config.js_checking_on = config.getoption('js_checking_on') sb_config.ad_block_on = config.getoption('ad_block_on') sb_config.verify_delay = config.getoption('verify_delay') sb_config.disable_csp = config.getoption('disable_csp') sb_config.enable_sync = config.getoption('enable_sync') sb_config.use_auto_ext = config.getoption('use_auto_ext') sb_config.no_sandbox = config.getoption('no_sandbox') sb_config.disable_gpu = config.getoption('disable_gpu') sb_config.incognito = config.getoption('incognito') sb_config.guest_mode = config.getoption('guest_mode') sb_config.devtools = config.getoption('devtools') sb_config.reuse_session = config.getoption('reuse_session') sb_config.crumbs = config.getoption('crumbs') sb_config.shared_driver = None # The default driver for session reuse sb_config.maximize_option = config.getoption('maximize_option') sb_config.save_screenshot = config.getoption('save_screenshot') sb_config.visual_baseline = config.getoption('visual_baseline') sb_config.timeout_multiplier = config.getoption('timeout_multiplier') sb_config.pytest_html_report = config.getoption('htmlpath') # --html=FILE if sb_config.reuse_session: arg_join = " ".join(sys.argv) if ("-n" in sys.argv) or ("-n=" in arg_join) or (arg_join == "-c"): # sb_config.reuse_session = False pass # Allow multithreaded browser sessions to be reused now if "linux" in sys.platform and ( not sb_config.headed and not sb_config.headless): print( "(Running with --headless on Linux. " "Use --headed or --gui to override.)") sb_config.headless = True if not sb_config.headless: sb_config.headed = True if sb_config.with_testing_base: log_helper.log_folder_setup(sb_config.log_path, sb_config.archive_logs) proxy_helper.remove_proxy_zip_if_present()
[ "def", "pytest_configure", "(", "config", ")", ":", "sb_config", ".", "is_pytest", "=", "True", "sb_config", ".", "browser", "=", "config", ".", "getoption", "(", "'browser'", ")", "sb_config", ".", "data", "=", "config", ".", "getoption", "(", "'data'", ")", "sb_config", ".", "var1", "=", "config", ".", "getoption", "(", "'var1'", ")", "sb_config", ".", "var2", "=", "config", ".", "getoption", "(", "'var2'", ")", "sb_config", ".", "var3", "=", "config", ".", "getoption", "(", "'var3'", ")", "sb_config", ".", "environment", "=", "config", ".", "getoption", "(", "'environment'", ")", "sb_config", ".", "with_selenium", "=", "config", ".", "getoption", "(", "'with_selenium'", ")", "sb_config", ".", "user_agent", "=", "config", ".", "getoption", "(", "'user_agent'", ")", "sb_config", ".", "mobile_emulator", "=", "config", ".", "getoption", "(", "'mobile_emulator'", ")", "sb_config", ".", "device_metrics", "=", "config", ".", "getoption", "(", "'device_metrics'", ")", "sb_config", ".", "headless", "=", "config", ".", "getoption", "(", "'headless'", ")", "sb_config", ".", "headed", "=", "config", ".", "getoption", "(", "'headed'", ")", "sb_config", ".", "start_page", "=", "config", ".", "getoption", "(", "'start_page'", ")", "sb_config", ".", "extension_zip", "=", "config", ".", "getoption", "(", "'extension_zip'", ")", "sb_config", ".", "extension_dir", "=", "config", ".", "getoption", "(", "'extension_dir'", ")", "sb_config", ".", "with_testing_base", "=", "config", ".", "getoption", "(", "'with_testing_base'", ")", "sb_config", ".", "with_db_reporting", "=", "config", ".", "getoption", "(", "'with_db_reporting'", ")", "sb_config", ".", "with_s3_logging", "=", "config", ".", "getoption", "(", "'with_s3_logging'", ")", "sb_config", ".", "with_screen_shots", "=", "config", ".", "getoption", "(", "'with_screen_shots'", ")", "sb_config", ".", "with_basic_test_info", "=", "config", ".", "getoption", "(", "'with_basic_test_info'", ")", "sb_config", ".", "with_page_source", "=", "config", ".", "getoption", "(", "'with_page_source'", ")", "sb_config", ".", "servername", "=", "config", ".", "getoption", "(", "'servername'", ")", "sb_config", ".", "port", "=", "config", ".", "getoption", "(", "'port'", ")", "sb_config", ".", "proxy_string", "=", "config", ".", "getoption", "(", "'proxy_string'", ")", "sb_config", ".", "cap_file", "=", "config", ".", "getoption", "(", "'cap_file'", ")", "sb_config", ".", "cap_string", "=", "config", ".", "getoption", "(", "'cap_string'", ")", "sb_config", ".", "settings_file", "=", "config", ".", "getoption", "(", "'settings_file'", ")", "sb_config", ".", "user_data_dir", "=", "config", ".", "getoption", "(", "'user_data_dir'", ")", "sb_config", ".", "database_env", "=", "config", ".", "getoption", "(", "'database_env'", ")", "sb_config", ".", "log_path", "=", "'latest_logs/'", "# (No longer editable!)", "sb_config", ".", "archive_logs", "=", "config", ".", "getoption", "(", "'archive_logs'", ")", "sb_config", ".", "_time_limit", "=", "config", ".", "getoption", "(", "'time_limit'", ")", "sb_config", ".", "time_limit", "=", "config", ".", "getoption", "(", "'time_limit'", ")", "sb_config", ".", "slow_mode", "=", "config", ".", "getoption", "(", "'slow_mode'", ")", "sb_config", ".", "demo_mode", "=", "config", ".", "getoption", "(", "'demo_mode'", ")", "sb_config", ".", "demo_sleep", "=", "config", ".", "getoption", "(", "'demo_sleep'", ")", "sb_config", ".", "highlights", "=", "config", ".", "getoption", "(", "'highlights'", ")", "sb_config", ".", "message_duration", "=", "config", ".", "getoption", "(", "'message_duration'", ")", "sb_config", ".", "js_checking_on", "=", "config", ".", "getoption", "(", "'js_checking_on'", ")", "sb_config", ".", "ad_block_on", "=", "config", ".", "getoption", "(", "'ad_block_on'", ")", "sb_config", ".", "verify_delay", "=", "config", ".", "getoption", "(", "'verify_delay'", ")", "sb_config", ".", "disable_csp", "=", "config", ".", "getoption", "(", "'disable_csp'", ")", "sb_config", ".", "enable_sync", "=", "config", ".", "getoption", "(", "'enable_sync'", ")", "sb_config", ".", "use_auto_ext", "=", "config", ".", "getoption", "(", "'use_auto_ext'", ")", "sb_config", ".", "no_sandbox", "=", "config", ".", "getoption", "(", "'no_sandbox'", ")", "sb_config", ".", "disable_gpu", "=", "config", ".", "getoption", "(", "'disable_gpu'", ")", "sb_config", ".", "incognito", "=", "config", ".", "getoption", "(", "'incognito'", ")", "sb_config", ".", "guest_mode", "=", "config", ".", "getoption", "(", "'guest_mode'", ")", "sb_config", ".", "devtools", "=", "config", ".", "getoption", "(", "'devtools'", ")", "sb_config", ".", "reuse_session", "=", "config", ".", "getoption", "(", "'reuse_session'", ")", "sb_config", ".", "crumbs", "=", "config", ".", "getoption", "(", "'crumbs'", ")", "sb_config", ".", "shared_driver", "=", "None", "# The default driver for session reuse", "sb_config", ".", "maximize_option", "=", "config", ".", "getoption", "(", "'maximize_option'", ")", "sb_config", ".", "save_screenshot", "=", "config", ".", "getoption", "(", "'save_screenshot'", ")", "sb_config", ".", "visual_baseline", "=", "config", ".", "getoption", "(", "'visual_baseline'", ")", "sb_config", ".", "timeout_multiplier", "=", "config", ".", "getoption", "(", "'timeout_multiplier'", ")", "sb_config", ".", "pytest_html_report", "=", "config", ".", "getoption", "(", "'htmlpath'", ")", "# --html=FILE", "if", "sb_config", ".", "reuse_session", ":", "arg_join", "=", "\" \"", ".", "join", "(", "sys", ".", "argv", ")", "if", "(", "\"-n\"", "in", "sys", ".", "argv", ")", "or", "(", "\"-n=\"", "in", "arg_join", ")", "or", "(", "arg_join", "==", "\"-c\"", ")", ":", "# sb_config.reuse_session = False", "pass", "# Allow multithreaded browser sessions to be reused now", "if", "\"linux\"", "in", "sys", ".", "platform", "and", "(", "not", "sb_config", ".", "headed", "and", "not", "sb_config", ".", "headless", ")", ":", "print", "(", "\"(Running with --headless on Linux. \"", "\"Use --headed or --gui to override.)\"", ")", "sb_config", ".", "headless", "=", "True", "if", "not", "sb_config", ".", "headless", ":", "sb_config", ".", "headed", "=", "True", "if", "sb_config", ".", "with_testing_base", ":", "log_helper", ".", "log_folder_setup", "(", "sb_config", ".", "log_path", ",", "sb_config", ".", "archive_logs", ")", "proxy_helper", ".", "remove_proxy_zip_if_present", "(", ")" ]
[ 436, 0 ]
[ 514, 46 ]
python
en
['en', 'en', 'en']
True
pytest_unconfigure
()
This runs after all tests have completed with pytest.
This runs after all tests have completed with pytest.
def pytest_unconfigure(): """ This runs after all tests have completed with pytest. """ proxy_helper.remove_proxy_zip_if_present() if sb_config.reuse_session: # Close the shared browser session if sb_config.shared_driver: try: sb_config.shared_driver.quit() except AttributeError: pass except Exception: pass sb_config.shared_driver = None log_helper.archive_logs_if_set(sb_config.log_path, sb_config.archive_logs)
[ "def", "pytest_unconfigure", "(", ")", ":", "proxy_helper", ".", "remove_proxy_zip_if_present", "(", ")", "if", "sb_config", ".", "reuse_session", ":", "# Close the shared browser session", "if", "sb_config", ".", "shared_driver", ":", "try", ":", "sb_config", ".", "shared_driver", ".", "quit", "(", ")", "except", "AttributeError", ":", "pass", "except", "Exception", ":", "pass", "sb_config", ".", "shared_driver", "=", "None", "log_helper", ".", "archive_logs_if_set", "(", "sb_config", ".", "log_path", ",", "sb_config", ".", "archive_logs", ")" ]
[ 517, 0 ]
[ 530, 78 ]
python
en
['en', 'en', 'en']
True
pytest_runtest_setup
()
This runs before every test with pytest
This runs before every test with pytest
def pytest_runtest_setup(): """ This runs before every test with pytest """ pass
[ "def", "pytest_runtest_setup", "(", ")", ":", "pass" ]
[ 533, 0 ]
[ 535, 8 ]
python
en
['en', 'en', 'en']
True
pytest_runtest_teardown
(item)
This runs after every test with pytest
This runs after every test with pytest
def pytest_runtest_teardown(item): """ This runs after every test with pytest """ # Make sure webdriver has exited properly and any headless display try: self = item._testcase try: if hasattr(self, 'driver') and self.driver: self.driver.quit() except Exception: pass try: if hasattr(self, 'headless') and self.headless: if self.headless_active: if hasattr(self, 'display') and self.display: self.display.stop() except Exception: pass except Exception: pass
[ "def", "pytest_runtest_teardown", "(", "item", ")", ":", "# Make sure webdriver has exited properly and any headless display", "try", ":", "self", "=", "item", ".", "_testcase", "try", ":", "if", "hasattr", "(", "self", ",", "'driver'", ")", "and", "self", ".", "driver", ":", "self", ".", "driver", ".", "quit", "(", ")", "except", "Exception", ":", "pass", "try", ":", "if", "hasattr", "(", "self", ",", "'headless'", ")", "and", "self", ".", "headless", ":", "if", "self", ".", "headless_active", ":", "if", "hasattr", "(", "self", ",", "'display'", ")", "and", "self", ".", "display", ":", "self", ".", "display", ".", "stop", "(", ")", "except", "Exception", ":", "pass", "except", "Exception", ":", "pass" ]
[ 538, 0 ]
[ 557, 12 ]
python
en
['en', 'en', 'en']
True
sb
(request)
SeleniumBase as a pytest fixture. Usage example: "def test_one(sb):" You'll need to use this for tests that use other pytest fixtures.
SeleniumBase as a pytest fixture. Usage example: "def test_one(sb):" You'll need to use this for tests that use other pytest fixtures.
def sb(request): """ SeleniumBase as a pytest fixture. Usage example: "def test_one(sb):" You'll need to use this for tests that use other pytest fixtures. """ from seleniumbase import BaseCase class BaseClass(BaseCase): def base_method(): pass if request.cls: request.cls.sb = BaseClass("base_method") request.cls.sb.setUp() yield request.cls.sb request.cls.sb.tearDown() else: sb = BaseClass("base_method") sb.setUp() yield sb sb.tearDown()
[ "def", "sb", "(", "request", ")", ":", "from", "seleniumbase", "import", "BaseCase", "class", "BaseClass", "(", "BaseCase", ")", ":", "def", "base_method", "(", ")", ":", "pass", "if", "request", ".", "cls", ":", "request", ".", "cls", ".", "sb", "=", "BaseClass", "(", "\"base_method\"", ")", "request", ".", "cls", ".", "sb", ".", "setUp", "(", ")", "yield", "request", ".", "cls", ".", "sb", "request", ".", "cls", ".", "sb", ".", "tearDown", "(", ")", "else", ":", "sb", "=", "BaseClass", "(", "\"base_method\"", ")", "sb", ".", "setUp", "(", ")", "yield", "sb", "sb", ".", "tearDown", "(", ")" ]
[ 561, 0 ]
[ 580, 21 ]
python
en
['en', 'hu', 'en']
True
get_all_styles
()
Return an generator for all styles by name, both builtin and plugin.
Return an generator for all styles by name, both builtin and plugin.
def get_all_styles(): """Return an generator for all styles by name, both builtin and plugin.""" for name in STYLE_MAP: yield name for name, _ in find_plugin_styles(): yield name
[ "def", "get_all_styles", "(", ")", ":", "for", "name", "in", "STYLE_MAP", ":", "yield", "name", "for", "name", ",", "_", "in", "find_plugin_styles", "(", ")", ":", "yield", "name" ]
[ 70, 0 ]
[ 76, 18 ]
python
en
['en', 'en', 'en']
True
is_function
(node)
Check if the AST node is either a function or an async function
Check if the AST node is either a function or an async function
def is_function(node): """Check if the AST node is either a function or an async function """ if isinstance(node, ast.FunctionDef) or isinstance(node, ast.AsyncFunctionDef): return True return False
[ "def", "is_function", "(", "node", ")", ":", "if", "isinstance", "(", "node", ",", "ast", ".", "FunctionDef", ")", "or", "isinstance", "(", "node", ",", "ast", ".", "AsyncFunctionDef", ")", ":", "return", "True", "return", "False" ]
[ 38, 0 ]
[ 44, 16 ]
python
en
['en', 'en', 'en']
True
is_public
(node)
Check if the AST node has a _public decorator
Check if the AST node has a _public decorator
def is_public(node): """Check if the AST node has a _public decorator""" if not is_function(node): return False for decorator in node.decorator_list: if isinstance(decorator, ast.Name) and decorator.id == "_public": return True return False
[ "def", "is_public", "(", "node", ")", ":", "if", "not", "is_function", "(", "node", ")", ":", "return", "False", "for", "decorator", "in", "node", ".", "decorator_list", ":", "if", "isinstance", "(", "decorator", ",", "ast", ".", "Name", ")", "and", "decorator", ".", "id", "==", "\"_public\"", ":", "return", "True", "return", "False" ]
[ 47, 0 ]
[ 54, 16 ]
python
en
['en', 'en', 'en']
True
get_public_methods
(tree)
Return a list of methods marked as public. The function walks the given tree and extracts all objects that are functions which are marked public.
Return a list of methods marked as public. The function walks the given tree and extracts all objects that are functions which are marked public.
def get_public_methods(tree): """Return a list of methods marked as public. The function walks the given tree and extracts all objects that are functions which are marked public. """ for node in ast.walk(tree): if is_public(node): yield node
[ "def", "get_public_methods", "(", "tree", ")", ":", "for", "node", "in", "ast", ".", "walk", "(", "tree", ")", ":", "if", "is_public", "(", "node", ")", ":", "yield", "node" ]
[ 57, 0 ]
[ 65, 22 ]
python
en
['en', 'en', 'en']
True
create_passthrough_args
(funcdef)
Given a function definition, create a string that represents taking all the arguments from the function, and passing them through to another invocation of the same function. Example input: ast.parse("def f(a, *, b): ...") Example output: "(a, b=b)"
Given a function definition, create a string that represents taking all the arguments from the function, and passing them through to another invocation of the same function.
def create_passthrough_args(funcdef): """Given a function definition, create a string that represents taking all the arguments from the function, and passing them through to another invocation of the same function. Example input: ast.parse("def f(a, *, b): ...") Example output: "(a, b=b)" """ call_args = [] for arg in funcdef.args.args: call_args.append(arg.arg) if funcdef.args.vararg: call_args.append("*" + funcdef.args.vararg.arg) for arg in funcdef.args.kwonlyargs: call_args.append(arg.arg + "=" + arg.arg) if funcdef.args.kwarg: call_args.append("**" + funcdef.args.kwarg.arg) return "({})".format(", ".join(call_args))
[ "def", "create_passthrough_args", "(", "funcdef", ")", ":", "call_args", "=", "[", "]", "for", "arg", "in", "funcdef", ".", "args", ".", "args", ":", "call_args", ".", "append", "(", "arg", ".", "arg", ")", "if", "funcdef", ".", "args", ".", "vararg", ":", "call_args", ".", "append", "(", "\"*\"", "+", "funcdef", ".", "args", ".", "vararg", ".", "arg", ")", "for", "arg", "in", "funcdef", ".", "args", ".", "kwonlyargs", ":", "call_args", ".", "append", "(", "arg", ".", "arg", "+", "\"=\"", "+", "arg", ".", "arg", ")", "if", "funcdef", ".", "args", ".", "kwarg", ":", "call_args", ".", "append", "(", "\"**\"", "+", "funcdef", ".", "args", ".", "kwarg", ".", "arg", ")", "return", "\"({})\"", ".", "format", "(", "\", \"", ".", "join", "(", "call_args", ")", ")" ]
[ 68, 0 ]
[ 85, 46 ]
python
en
['en', 'en', 'en']
True
gen_public_wrappers_source
(source_path: Path, lookup_path: str)
Scan the given .py file for @_public decorators, and generate wrapper functions.
Scan the given .py file for @_public decorators, and generate wrapper functions.
def gen_public_wrappers_source(source_path: Path, lookup_path: str) -> str: """Scan the given .py file for @_public decorators, and generate wrapper functions. """ generated = [HEADER] source = astor.code_to_ast.parse_file(source_path) for method in get_public_methods(source): # Remove self from arguments assert method.args.args[0].arg == "self" del method.args.args[0] # Remove decorators method.decorator_list = [] # Create pass through arguments new_args = create_passthrough_args(method) # Remove method body without the docstring if ast.get_docstring(method) is None: del method.body[:] else: # The first entry is always the docstring del method.body[1:] # Create the function definition including the body func = astor.to_source(method, indent_with=" " * 4) # Create export function body template = TEMPLATE.format( " await " if isinstance(method, ast.AsyncFunctionDef) else " ", lookup_path, method.name + new_args, ) # Assemble function definition arguments and body snippet = func + indent(template, " " * 4) # Append the snippet to the corresponding module generated.append(snippet) generated.append(FOOTER) return "\n\n".join(generated)
[ "def", "gen_public_wrappers_source", "(", "source_path", ":", "Path", ",", "lookup_path", ":", "str", ")", "->", "str", ":", "generated", "=", "[", "HEADER", "]", "source", "=", "astor", ".", "code_to_ast", ".", "parse_file", "(", "source_path", ")", "for", "method", "in", "get_public_methods", "(", "source", ")", ":", "# Remove self from arguments", "assert", "method", ".", "args", ".", "args", "[", "0", "]", ".", "arg", "==", "\"self\"", "del", "method", ".", "args", ".", "args", "[", "0", "]", "# Remove decorators", "method", ".", "decorator_list", "=", "[", "]", "# Create pass through arguments", "new_args", "=", "create_passthrough_args", "(", "method", ")", "# Remove method body without the docstring", "if", "ast", ".", "get_docstring", "(", "method", ")", "is", "None", ":", "del", "method", ".", "body", "[", ":", "]", "else", ":", "# The first entry is always the docstring", "del", "method", ".", "body", "[", "1", ":", "]", "# Create the function definition including the body", "func", "=", "astor", ".", "to_source", "(", "method", ",", "indent_with", "=", "\" \"", "*", "4", ")", "# Create export function body", "template", "=", "TEMPLATE", ".", "format", "(", "\" await \"", "if", "isinstance", "(", "method", ",", "ast", ".", "AsyncFunctionDef", ")", "else", "\" \"", ",", "lookup_path", ",", "method", ".", "name", "+", "new_args", ",", ")", "# Assemble function definition arguments and body", "snippet", "=", "func", "+", "indent", "(", "template", ",", "\" \"", "*", "4", ")", "# Append the snippet to the corresponding module", "generated", ".", "append", "(", "snippet", ")", "generated", ".", "append", "(", "FOOTER", ")", "return", "\"\\n\\n\"", ".", "join", "(", "generated", ")" ]
[ 88, 0 ]
[ 129, 33 ]
python
en
['en', 'en', 'en']
True
is_installed
()
Return whether or not xauth is installed.
Return whether or not xauth is installed.
def is_installed(): ''' Return whether or not xauth is installed. ''' try: p = EasyProcess(['xauth', '-V']) p.enable_stdout_log = False p.enable_stderr_log = False p.call() except Exception: return False else: return True
[ "def", "is_installed", "(", ")", ":", "try", ":", "p", "=", "EasyProcess", "(", "[", "'xauth'", ",", "'-V'", "]", ")", "p", ".", "enable_stdout_log", "=", "False", "p", ".", "enable_stderr_log", "=", "False", "p", ".", "call", "(", ")", "except", "Exception", ":", "return", "False", "else", ":", "return", "True" ]
[ 11, 0 ]
[ 23, 19 ]
python
en
['en', 'error', 'th']
False
generate_mcookie
()
Generate a cookie string suitable for xauth.
Generate a cookie string suitable for xauth.
def generate_mcookie(): ''' Generate a cookie string suitable for xauth. ''' data = os.urandom(16) # 16 bytes = 128 bit return hashlib.md5(data).hexdigest()
[ "def", "generate_mcookie", "(", ")", ":", "data", "=", "os", ".", "urandom", "(", "16", ")", "# 16 bytes = 128 bit", "return", "hashlib", ".", "md5", "(", "data", ")", ".", "hexdigest", "(", ")" ]
[ 26, 0 ]
[ 31, 40 ]
python
en
['en', 'error', 'th']
False
call
(*args)
Call xauth with the given args.
Call xauth with the given args.
def call(*args): ''' Call xauth with the given args. ''' EasyProcess(['xauth'] + list(args)).call()
[ "def", "call", "(", "*", "args", ")", ":", "EasyProcess", "(", "[", "'xauth'", "]", "+", "list", "(", "args", ")", ")", ".", "call", "(", ")" ]
[ 34, 0 ]
[ 38, 46 ]
python
en
['en', 'error', 'th']
False
CheckpointStore.ge_cloud_response_json_to_object_dict
(self, response_json: Dict)
This method takes full json response from GE cloud and outputs a dict appropriate for deserialization into a GE object
This method takes full json response from GE cloud and outputs a dict appropriate for deserialization into a GE object
def ge_cloud_response_json_to_object_dict(self, response_json: Dict) -> Dict: """ This method takes full json response from GE cloud and outputs a dict appropriate for deserialization into a GE object """ ge_cloud_checkpoint_id = response_json["data"]["id"] checkpoint_config_dict = response_json["data"]["attributes"][ "checkpoint_config" ] checkpoint_config_dict["ge_cloud_id"] = ge_cloud_checkpoint_id return checkpoint_config_dict
[ "def", "ge_cloud_response_json_to_object_dict", "(", "self", ",", "response_json", ":", "Dict", ")", "->", "Dict", ":", "ge_cloud_checkpoint_id", "=", "response_json", "[", "\"data\"", "]", "[", "\"id\"", "]", "checkpoint_config_dict", "=", "response_json", "[", "\"data\"", "]", "[", "\"attributes\"", "]", "[", "\"checkpoint_config\"", "]", "checkpoint_config_dict", "[", "\"ge_cloud_id\"", "]", "=", "ge_cloud_checkpoint_id", "return", "checkpoint_config_dict" ]
[ 21, 4 ]
[ 32, 37 ]
python
en
['en', 'error', 'th']
False
ExpectColumnUniqueValueCountToBeBetween.validate_configuration
(self, configuration: Optional[ExpectationConfiguration])
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that necessary configuration arguments have been provided for the validation of the expectation. Args: configuration (OPTIONAL[ExpectationConfiguration]): \ An optional Expectation Configuration entry that will be used to configure the expectation Returns: True if the configuration has been validated successfully. Otherwise, raises an exception
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that necessary configuration arguments have been provided for the validation of the expectation.
def validate_configuration(self, configuration: Optional[ExpectationConfiguration]): """ Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that necessary configuration arguments have been provided for the validation of the expectation. Args: configuration (OPTIONAL[ExpectationConfiguration]): \ An optional Expectation Configuration entry that will be used to configure the expectation Returns: True if the configuration has been validated successfully. Otherwise, raises an exception """ super().validate_configuration(configuration) self.validate_metric_value_between_configuration(configuration=configuration)
[ "def", "validate_configuration", "(", "self", ",", "configuration", ":", "Optional", "[", "ExpectationConfiguration", "]", ")", ":", "super", "(", ")", ".", "validate_configuration", "(", "configuration", ")", "self", ".", "validate_metric_value_between_configuration", "(", "configuration", "=", "configuration", ")" ]
[ 104, 4 ]
[ 116, 85 ]
python
en
['en', 'error', 'th']
False
step_impl
(context)
:type context: behave.runner.Context
:type context: behave.runner.Context
def step_impl(context): """ :type context: behave.runner.Context """ print("Hello")
[ "def", "step_impl", "(", "context", ")", ":", "print", "(", "\"Hello\"", ")" ]
[ 6, 0 ]
[ 10, 18 ]
python
en
['en', 'error', 'th']
False
step_impl
(context, country)
:type context: behave.runner.Context :type country: str
:type context: behave.runner.Context :type country: str
def step_impl(context, country): """ :type context: behave.runner.Context :type country: str """ context.country = country
[ "def", "step_impl", "(", "context", ",", "country", ")", ":", "context", ".", "country", "=", "country" ]
[ 14, 0 ]
[ 19, 29 ]
python
en
['en', 'error', 'th']
False
step_impl
(context, city)
:type context: behave.runner.Context :type city: str
:type context: behave.runner.Context :type city: str
def step_impl(context, city): """ :type context: behave.runner.Context :type city: str """ capitals = {"USA": "Washington", "Japan": "Tokio"} assert capitals[context.country] == city
[ "def", "step_impl", "(", "context", ",", "city", ")", ":", "capitals", "=", "{", "\"USA\"", ":", "\"Washington\"", ",", "\"Japan\"", ":", "\"Tokio\"", "}", "assert", "capitals", "[", "context", ".", "country", "]", "==", "city" ]
[ 23, 0 ]
[ 29, 44 ]
python
en
['en', 'error', 'th']
False
step_impl
(context)
:type context: behave.runner.Context
:type context: behave.runner.Context
def step_impl(context): """ :type context: behave.runner.Context """ raise Exception("fail")
[ "def", "step_impl", "(", "context", ")", ":", "raise", "Exception", "(", "\"fail\"", ")" ]
[ 33, 0 ]
[ 37, 27 ]
python
en
['en', 'error', 'th']
False
step_impl
(context)
:type context: behave.runner.Context
:type context: behave.runner.Context
def step_impl(context): """ :type context: behave.runner.Context """ print("Background")
[ "def", "step_impl", "(", "context", ")", ":", "print", "(", "\"Background\"", ")" ]
[ 41, 0 ]
[ 45, 23 ]
python
en
['en', 'error', 'th']
False
step_impl
(context)
:type context: behave.runner.Context
:type context: behave.runner.Context
def step_impl(context): """ :type context: behave.runner.Context """ pass
[ "def", "step_impl", "(", "context", ")", ":", "pass" ]
[ 49, 0 ]
[ 53, 8 ]
python
en
['en', 'error', 'th']
False
step_impl
(context)
:type context: behave.runner.Context
:type context: behave.runner.Context
def step_impl(context): """ :type context: behave.runner.Context """ pass
[ "def", "step_impl", "(", "context", ")", ":", "pass" ]
[ 57, 0 ]
[ 61, 8 ]
python
en
['en', 'error', 'th']
False
step_impl
(context)
:type context: behave.runner.Context
:type context: behave.runner.Context
def step_impl(context): """ :type context: behave.runner.Context """ pass
[ "def", "step_impl", "(", "context", ")", ":", "pass" ]
[ 65, 0 ]
[ 69, 8 ]
python
en
['en', 'error', 'th']
False
step_impl
(context)
:type context: behave.runner.Context
:type context: behave.runner.Context
def step_impl(context): """ :type context: behave.runner.Context """ pass
[ "def", "step_impl", "(", "context", ")", ":", "pass" ]
[ 73, 0 ]
[ 77, 8 ]
python
en
['en', 'error', 'th']
False
series_to_supervised
(data, n_in=1, n_out=1, dropnan=True)
Frame a time series as a supervised learning dataset. Arguments: :param data: Sequence of observations as a list or NumPy array. :param n_in: Number of lag observations as input (X). :param n_out: Number of observations as output (y). :param dropnan: Boolean whether or not to drop rows with NaN values. :return: Pandas DataFrame of series framed for supervised learning.
Frame a time series as a supervised learning dataset. Arguments: :param data: Sequence of observations as a list or NumPy array. :param n_in: Number of lag observations as input (X). :param n_out: Number of observations as output (y). :param dropnan: Boolean whether or not to drop rows with NaN values. :return: Pandas DataFrame of series framed for supervised learning.
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True): """ Frame a time series as a supervised learning dataset. Arguments: :param data: Sequence of observations as a list or NumPy array. :param n_in: Number of lag observations as input (X). :param n_out: Number of observations as output (y). :param dropnan: Boolean whether or not to drop rows with NaN values. :return: Pandas DataFrame of series framed for supervised learning. """ n_vars = 1 if type(data) is list else data.shape[1] dataframe = pd.DataFrame(data) cols, names = list(), list() # input sequence (t-n, ... t-1) for i in range(n_in, 0, -1): cols.append(dataframe.shift(i)) names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)] # forecast sequence (t, t+1, ... t+n) for i in range(0, n_out): cols.append(dataframe.shift(-i)) if i == 0: names += [('var%d(t)' % (j+1)) for j in range(n_vars)] else: names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)] # put it all together agg = pd.concat(cols, axis=1) agg.columns = names # drop rows with NaN values if dropnan: agg.dropna(inplace=True) return agg
[ "def", "series_to_supervised", "(", "data", ",", "n_in", "=", "1", ",", "n_out", "=", "1", ",", "dropnan", "=", "True", ")", ":", "n_vars", "=", "1", "if", "type", "(", "data", ")", "is", "list", "else", "data", ".", "shape", "[", "1", "]", "dataframe", "=", "pd", ".", "DataFrame", "(", "data", ")", "cols", ",", "names", "=", "list", "(", ")", ",", "list", "(", ")", "# input sequence (t-n, ... t-1)", "for", "i", "in", "range", "(", "n_in", ",", "0", ",", "-", "1", ")", ":", "cols", ".", "append", "(", "dataframe", ".", "shift", "(", "i", ")", ")", "names", "+=", "[", "(", "'var%d(t-%d)'", "%", "(", "j", "+", "1", ",", "i", ")", ")", "for", "j", "in", "range", "(", "n_vars", ")", "]", "# forecast sequence (t, t+1, ... t+n)", "for", "i", "in", "range", "(", "0", ",", "n_out", ")", ":", "cols", ".", "append", "(", "dataframe", ".", "shift", "(", "-", "i", ")", ")", "if", "i", "==", "0", ":", "names", "+=", "[", "(", "'var%d(t)'", "%", "(", "j", "+", "1", ")", ")", "for", "j", "in", "range", "(", "n_vars", ")", "]", "else", ":", "names", "+=", "[", "(", "'var%d(t+%d)'", "%", "(", "j", "+", "1", ",", "i", ")", ")", "for", "j", "in", "range", "(", "n_vars", ")", "]", "# put it all together", "agg", "=", "pd", ".", "concat", "(", "cols", ",", "axis", "=", "1", ")", "agg", ".", "columns", "=", "names", "# drop rows with NaN values", "if", "dropnan", ":", "agg", ".", "dropna", "(", "inplace", "=", "True", ")", "return", "agg" ]
[ 28, 0 ]
[ 59, 14 ]
python
en
['en', 'error', 'th']
False
_RegistryQueryBase
(sysdir, key, value)
Use reg.exe to read a particular key. While ideally we might use the win32 module, we would like gyp to be python neutral, so for instance cygwin python lacks this module. Arguments: sysdir: The system subdirectory to attempt to launch reg.exe from. key: The registry key to read from. value: The particular value to read. Return: stdout from reg.exe, or None for failure.
Use reg.exe to read a particular key.
def _RegistryQueryBase(sysdir, key, value): """Use reg.exe to read a particular key. While ideally we might use the win32 module, we would like gyp to be python neutral, so for instance cygwin python lacks this module. Arguments: sysdir: The system subdirectory to attempt to launch reg.exe from. key: The registry key to read from. value: The particular value to read. Return: stdout from reg.exe, or None for failure. """ # Skip if not on Windows or Python Win32 setup issue if sys.platform not in ('win32', 'cygwin'): return None # Setup params to pass to and attempt to launch reg.exe cmd = [os.path.join(os.environ.get('WINDIR', ''), sysdir, 'reg.exe'), 'query', key] if value: cmd.extend(['/v', value]) p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # Obtain the stdout from reg.exe, reading to the end so p.returncode is valid # Note that the error text may be in [1] in some cases text = p.communicate()[0] # Check return code from reg.exe; officially 0==success and 1==error if p.returncode: return None return text
[ "def", "_RegistryQueryBase", "(", "sysdir", ",", "key", ",", "value", ")", ":", "# Skip if not on Windows or Python Win32 setup issue", "if", "sys", ".", "platform", "not", "in", "(", "'win32'", ",", "'cygwin'", ")", ":", "return", "None", "# Setup params to pass to and attempt to launch reg.exe", "cmd", "=", "[", "os", ".", "path", ".", "join", "(", "os", ".", "environ", ".", "get", "(", "'WINDIR'", ",", "''", ")", ",", "sysdir", ",", "'reg.exe'", ")", ",", "'query'", ",", "key", "]", "if", "value", ":", "cmd", ".", "extend", "(", "[", "'/v'", ",", "value", "]", ")", "p", "=", "subprocess", ".", "Popen", "(", "cmd", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "# Obtain the stdout from reg.exe, reading to the end so p.returncode is valid", "# Note that the error text may be in [1] in some cases", "text", "=", "p", ".", "communicate", "(", ")", "[", "0", "]", "# Check return code from reg.exe; officially 0==success and 1==error", "if", "p", ".", "returncode", ":", "return", "None", "return", "text" ]
[ 109, 0 ]
[ 137, 13 ]
python
en
['en', 'en', 'en']
True
_RegistryQuery
(key, value=None)
r"""Use reg.exe to read a particular key through _RegistryQueryBase. First tries to launch from %WinDir%\Sysnative to avoid WoW64 redirection. If that fails, it falls back to System32. Sysnative is available on Vista and up and available on Windows Server 2003 and XP through KB patch 942589. Note that Sysnative will always fail if using 64-bit python due to it being a virtual directory and System32 will work correctly in the first place. KB 942589 - http://support.microsoft.com/kb/942589/en-us. Arguments: key: The registry key. value: The particular registry value to read (optional). Return: stdout from reg.exe, or None for failure.
r"""Use reg.exe to read a particular key through _RegistryQueryBase.
def _RegistryQuery(key, value=None): r"""Use reg.exe to read a particular key through _RegistryQueryBase. First tries to launch from %WinDir%\Sysnative to avoid WoW64 redirection. If that fails, it falls back to System32. Sysnative is available on Vista and up and available on Windows Server 2003 and XP through KB patch 942589. Note that Sysnative will always fail if using 64-bit python due to it being a virtual directory and System32 will work correctly in the first place. KB 942589 - http://support.microsoft.com/kb/942589/en-us. Arguments: key: The registry key. value: The particular registry value to read (optional). Return: stdout from reg.exe, or None for failure. """ text = None try: text = _RegistryQueryBase('Sysnative', key, value) except OSError, e: if e.errno == errno.ENOENT: text = _RegistryQueryBase('System32', key, value) else: raise return text
[ "def", "_RegistryQuery", "(", "key", ",", "value", "=", "None", ")", ":", "text", "=", "None", "try", ":", "text", "=", "_RegistryQueryBase", "(", "'Sysnative'", ",", "key", ",", "value", ")", "except", "OSError", ",", "e", ":", "if", "e", ".", "errno", "==", "errno", ".", "ENOENT", ":", "text", "=", "_RegistryQueryBase", "(", "'System32'", ",", "key", ",", "value", ")", "else", ":", "raise", "return", "text" ]
[ 140, 0 ]
[ 165, 13 ]
python
en
['en', 'en', 'en']
True
_RegistryGetValueUsingWinReg
(key, value)
Use the _winreg module to obtain the value of a registry key. Args: key: The registry key. value: The particular registry value to read. Return: contents of the registry key's value, or None on failure. Throws ImportError if _winreg is unavailable.
Use the _winreg module to obtain the value of a registry key.
def _RegistryGetValueUsingWinReg(key, value): """Use the _winreg module to obtain the value of a registry key. Args: key: The registry key. value: The particular registry value to read. Return: contents of the registry key's value, or None on failure. Throws ImportError if _winreg is unavailable. """ import _winreg try: root, subkey = key.split('\\', 1) assert root == 'HKLM' # Only need HKLM for now. with _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, subkey) as hkey: return _winreg.QueryValueEx(hkey, value)[0] except WindowsError: return None
[ "def", "_RegistryGetValueUsingWinReg", "(", "key", ",", "value", ")", ":", "import", "_winreg", "try", ":", "root", ",", "subkey", "=", "key", ".", "split", "(", "'\\\\'", ",", "1", ")", "assert", "root", "==", "'HKLM'", "# Only need HKLM for now.", "with", "_winreg", ".", "OpenKey", "(", "_winreg", ".", "HKEY_LOCAL_MACHINE", ",", "subkey", ")", "as", "hkey", ":", "return", "_winreg", ".", "QueryValueEx", "(", "hkey", ",", "value", ")", "[", "0", "]", "except", "WindowsError", ":", "return", "None" ]
[ 168, 0 ]
[ 185, 15 ]
python
en
['en', 'en', 'en']
True
_RegistryGetValue
(key, value)
Use _winreg or reg.exe to obtain the value of a registry key. Using _winreg is preferable because it solves an issue on some corporate environments where access to reg.exe is locked down. However, we still need to fallback to reg.exe for the case where the _winreg module is not available (for example in cygwin python). Args: key: The registry key. value: The particular registry value to read. Return: contents of the registry key's value, or None on failure.
Use _winreg or reg.exe to obtain the value of a registry key.
def _RegistryGetValue(key, value): """Use _winreg or reg.exe to obtain the value of a registry key. Using _winreg is preferable because it solves an issue on some corporate environments where access to reg.exe is locked down. However, we still need to fallback to reg.exe for the case where the _winreg module is not available (for example in cygwin python). Args: key: The registry key. value: The particular registry value to read. Return: contents of the registry key's value, or None on failure. """ try: return _RegistryGetValueUsingWinReg(key, value) except ImportError: pass # Fallback to reg.exe if we fail to import _winreg. text = _RegistryQuery(key, value) if not text: return None # Extract value. match = re.search(r'REG_\w+\s+([^\r]+)\r\n', text) if not match: return None return match.group(1)
[ "def", "_RegistryGetValue", "(", "key", ",", "value", ")", ":", "try", ":", "return", "_RegistryGetValueUsingWinReg", "(", "key", ",", "value", ")", "except", "ImportError", ":", "pass", "# Fallback to reg.exe if we fail to import _winreg.", "text", "=", "_RegistryQuery", "(", "key", ",", "value", ")", "if", "not", "text", ":", "return", "None", "# Extract value.", "match", "=", "re", ".", "search", "(", "r'REG_\\w+\\s+([^\\r]+)\\r\\n'", ",", "text", ")", "if", "not", "match", ":", "return", "None", "return", "match", ".", "group", "(", "1", ")" ]
[ 188, 0 ]
[ 215, 23 ]
python
en
['en', 'en', 'en']
True
_CreateVersion
(name, path, sdk_based=False)
Sets up MSVS project generation. Setup is based off the GYP_MSVS_VERSION environment variable or whatever is autodetected if GYP_MSVS_VERSION is not explicitly specified. If a version is passed in that doesn't match a value in versions python will throw a error.
Sets up MSVS project generation.
def _CreateVersion(name, path, sdk_based=False): """Sets up MSVS project generation. Setup is based off the GYP_MSVS_VERSION environment variable or whatever is autodetected if GYP_MSVS_VERSION is not explicitly specified. If a version is passed in that doesn't match a value in versions python will throw a error. """ if path: path = os.path.normpath(path) versions = { '2015': VisualStudioVersion('2015', 'Visual Studio 2015', solution_version='12.00', project_version='14.0', flat_sln=False, uses_vcxproj=True, path=path, sdk_based=sdk_based, default_toolset='v140'), '2013': VisualStudioVersion('2013', 'Visual Studio 2013', solution_version='13.00', project_version='12.0', flat_sln=False, uses_vcxproj=True, path=path, sdk_based=sdk_based, default_toolset='v120'), '2013e': VisualStudioVersion('2013e', 'Visual Studio 2013', solution_version='13.00', project_version='12.0', flat_sln=True, uses_vcxproj=True, path=path, sdk_based=sdk_based, default_toolset='v120'), '2012': VisualStudioVersion('2012', 'Visual Studio 2012', solution_version='12.00', project_version='4.0', flat_sln=False, uses_vcxproj=True, path=path, sdk_based=sdk_based, default_toolset='v110'), '2012e': VisualStudioVersion('2012e', 'Visual Studio 2012', solution_version='12.00', project_version='4.0', flat_sln=True, uses_vcxproj=True, path=path, sdk_based=sdk_based, default_toolset='v110'), '2010': VisualStudioVersion('2010', 'Visual Studio 2010', solution_version='11.00', project_version='4.0', flat_sln=False, uses_vcxproj=True, path=path, sdk_based=sdk_based), '2010e': VisualStudioVersion('2010e', 'Visual C++ Express 2010', solution_version='11.00', project_version='4.0', flat_sln=True, uses_vcxproj=True, path=path, sdk_based=sdk_based), '2008': VisualStudioVersion('2008', 'Visual Studio 2008', solution_version='10.00', project_version='9.00', flat_sln=False, uses_vcxproj=False, path=path, sdk_based=sdk_based), '2008e': VisualStudioVersion('2008e', 'Visual Studio 2008', solution_version='10.00', project_version='9.00', flat_sln=True, uses_vcxproj=False, path=path, sdk_based=sdk_based), '2005': VisualStudioVersion('2005', 'Visual Studio 2005', solution_version='9.00', project_version='8.00', flat_sln=False, uses_vcxproj=False, path=path, sdk_based=sdk_based), '2005e': VisualStudioVersion('2005e', 'Visual Studio 2005', solution_version='9.00', project_version='8.00', flat_sln=True, uses_vcxproj=False, path=path, sdk_based=sdk_based), } return versions[str(name)]
[ "def", "_CreateVersion", "(", "name", ",", "path", ",", "sdk_based", "=", "False", ")", ":", "if", "path", ":", "path", "=", "os", ".", "path", ".", "normpath", "(", "path", ")", "versions", "=", "{", "'2015'", ":", "VisualStudioVersion", "(", "'2015'", ",", "'Visual Studio 2015'", ",", "solution_version", "=", "'12.00'", ",", "project_version", "=", "'14.0'", ",", "flat_sln", "=", "False", ",", "uses_vcxproj", "=", "True", ",", "path", "=", "path", ",", "sdk_based", "=", "sdk_based", ",", "default_toolset", "=", "'v140'", ")", ",", "'2013'", ":", "VisualStudioVersion", "(", "'2013'", ",", "'Visual Studio 2013'", ",", "solution_version", "=", "'13.00'", ",", "project_version", "=", "'12.0'", ",", "flat_sln", "=", "False", ",", "uses_vcxproj", "=", "True", ",", "path", "=", "path", ",", "sdk_based", "=", "sdk_based", ",", "default_toolset", "=", "'v120'", ")", ",", "'2013e'", ":", "VisualStudioVersion", "(", "'2013e'", ",", "'Visual Studio 2013'", ",", "solution_version", "=", "'13.00'", ",", "project_version", "=", "'12.0'", ",", "flat_sln", "=", "True", ",", "uses_vcxproj", "=", "True", ",", "path", "=", "path", ",", "sdk_based", "=", "sdk_based", ",", "default_toolset", "=", "'v120'", ")", ",", "'2012'", ":", "VisualStudioVersion", "(", "'2012'", ",", "'Visual Studio 2012'", ",", "solution_version", "=", "'12.00'", ",", "project_version", "=", "'4.0'", ",", "flat_sln", "=", "False", ",", "uses_vcxproj", "=", "True", ",", "path", "=", "path", ",", "sdk_based", "=", "sdk_based", ",", "default_toolset", "=", "'v110'", ")", ",", "'2012e'", ":", "VisualStudioVersion", "(", "'2012e'", ",", "'Visual Studio 2012'", ",", "solution_version", "=", "'12.00'", ",", "project_version", "=", "'4.0'", ",", "flat_sln", "=", "True", ",", "uses_vcxproj", "=", "True", ",", "path", "=", "path", ",", "sdk_based", "=", "sdk_based", ",", "default_toolset", "=", "'v110'", ")", ",", "'2010'", ":", "VisualStudioVersion", "(", "'2010'", ",", "'Visual Studio 2010'", ",", "solution_version", "=", "'11.00'", ",", "project_version", "=", "'4.0'", ",", "flat_sln", "=", "False", ",", "uses_vcxproj", "=", "True", ",", "path", "=", "path", ",", "sdk_based", "=", "sdk_based", ")", ",", "'2010e'", ":", "VisualStudioVersion", "(", "'2010e'", ",", "'Visual C++ Express 2010'", ",", "solution_version", "=", "'11.00'", ",", "project_version", "=", "'4.0'", ",", "flat_sln", "=", "True", ",", "uses_vcxproj", "=", "True", ",", "path", "=", "path", ",", "sdk_based", "=", "sdk_based", ")", ",", "'2008'", ":", "VisualStudioVersion", "(", "'2008'", ",", "'Visual Studio 2008'", ",", "solution_version", "=", "'10.00'", ",", "project_version", "=", "'9.00'", ",", "flat_sln", "=", "False", ",", "uses_vcxproj", "=", "False", ",", "path", "=", "path", ",", "sdk_based", "=", "sdk_based", ")", ",", "'2008e'", ":", "VisualStudioVersion", "(", "'2008e'", ",", "'Visual Studio 2008'", ",", "solution_version", "=", "'10.00'", ",", "project_version", "=", "'9.00'", ",", "flat_sln", "=", "True", ",", "uses_vcxproj", "=", "False", ",", "path", "=", "path", ",", "sdk_based", "=", "sdk_based", ")", ",", "'2005'", ":", "VisualStudioVersion", "(", "'2005'", ",", "'Visual Studio 2005'", ",", "solution_version", "=", "'9.00'", ",", "project_version", "=", "'8.00'", ",", "flat_sln", "=", "False", ",", "uses_vcxproj", "=", "False", ",", "path", "=", "path", ",", "sdk_based", "=", "sdk_based", ")", ",", "'2005e'", ":", "VisualStudioVersion", "(", "'2005e'", ",", "'Visual Studio 2005'", ",", "solution_version", "=", "'9.00'", ",", "project_version", "=", "'8.00'", ",", "flat_sln", "=", "True", ",", "uses_vcxproj", "=", "False", ",", "path", "=", "path", ",", "sdk_based", "=", "sdk_based", ")", ",", "}", "return", "versions", "[", "str", "(", "name", ")", "]" ]
[ 218, 0 ]
[ 322, 28 ]
python
en
['en', 'en', 'en']
True
_ConvertToCygpath
(path)
Convert to cygwin path if we are using cygwin.
Convert to cygwin path if we are using cygwin.
def _ConvertToCygpath(path): """Convert to cygwin path if we are using cygwin.""" if sys.platform == 'cygwin': p = subprocess.Popen(['cygpath', path], stdout=subprocess.PIPE) path = p.communicate()[0].strip() return path
[ "def", "_ConvertToCygpath", "(", "path", ")", ":", "if", "sys", ".", "platform", "==", "'cygwin'", ":", "p", "=", "subprocess", ".", "Popen", "(", "[", "'cygpath'", ",", "path", "]", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "path", "=", "p", ".", "communicate", "(", ")", "[", "0", "]", ".", "strip", "(", ")", "return", "path" ]
[ 325, 0 ]
[ 330, 13 ]
python
en
['en', 'en', 'en']
True
_DetectVisualStudioVersions
(versions_to_check, force_express)
Collect the list of installed visual studio versions. Returns: A list of visual studio versions installed in descending order of usage preference. Base this on the registry and a quick check if devenv.exe exists. Only versions 8-10 are considered. Possibilities are: 2005(e) - Visual Studio 2005 (8) 2008(e) - Visual Studio 2008 (9) 2010(e) - Visual Studio 2010 (10) 2012(e) - Visual Studio 2012 (11) 2013(e) - Visual Studio 2013 (12) 2015 - Visual Studio 2015 (14) Where (e) is e for express editions of MSVS and blank otherwise.
Collect the list of installed visual studio versions.
def _DetectVisualStudioVersions(versions_to_check, force_express): """Collect the list of installed visual studio versions. Returns: A list of visual studio versions installed in descending order of usage preference. Base this on the registry and a quick check if devenv.exe exists. Only versions 8-10 are considered. Possibilities are: 2005(e) - Visual Studio 2005 (8) 2008(e) - Visual Studio 2008 (9) 2010(e) - Visual Studio 2010 (10) 2012(e) - Visual Studio 2012 (11) 2013(e) - Visual Studio 2013 (12) 2015 - Visual Studio 2015 (14) Where (e) is e for express editions of MSVS and blank otherwise. """ version_to_year = { '8.0': '2005', '9.0': '2008', '10.0': '2010', '11.0': '2012', '12.0': '2013', '14.0': '2015', } versions = [] for version in versions_to_check: # Old method of searching for which VS version is installed # We don't use the 2010-encouraged-way because we also want to get the # path to the binaries, which it doesn't offer. keys = [r'HKLM\Software\Microsoft\VisualStudio\%s' % version, r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\%s' % version, r'HKLM\Software\Microsoft\VCExpress\%s' % version, r'HKLM\Software\Wow6432Node\Microsoft\VCExpress\%s' % version] for index in range(len(keys)): path = _RegistryGetValue(keys[index], 'InstallDir') if not path: continue path = _ConvertToCygpath(path) # Check for full. full_path = os.path.join(path, 'devenv.exe') express_path = os.path.join(path, '*express.exe') if not force_express and os.path.exists(full_path): # Add this one. versions.append(_CreateVersion(version_to_year[version], os.path.join(path, '..', '..'))) # Check for express. elif glob.glob(express_path): # Add this one. versions.append(_CreateVersion(version_to_year[version] + 'e', os.path.join(path, '..', '..'))) # The old method above does not work when only SDK is installed. keys = [r'HKLM\Software\Microsoft\VisualStudio\SxS\VC7', r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\SxS\VC7'] for index in range(len(keys)): path = _RegistryGetValue(keys[index], version) if not path: continue path = _ConvertToCygpath(path) if version != '14.0': # There is no Express edition for 2015. versions.append(_CreateVersion(version_to_year[version] + 'e', os.path.join(path, '..'), sdk_based=True)) return versions
[ "def", "_DetectVisualStudioVersions", "(", "versions_to_check", ",", "force_express", ")", ":", "version_to_year", "=", "{", "'8.0'", ":", "'2005'", ",", "'9.0'", ":", "'2008'", ",", "'10.0'", ":", "'2010'", ",", "'11.0'", ":", "'2012'", ",", "'12.0'", ":", "'2013'", ",", "'14.0'", ":", "'2015'", ",", "}", "versions", "=", "[", "]", "for", "version", "in", "versions_to_check", ":", "# Old method of searching for which VS version is installed", "# We don't use the 2010-encouraged-way because we also want to get the", "# path to the binaries, which it doesn't offer.", "keys", "=", "[", "r'HKLM\\Software\\Microsoft\\VisualStudio\\%s'", "%", "version", ",", "r'HKLM\\Software\\Wow6432Node\\Microsoft\\VisualStudio\\%s'", "%", "version", ",", "r'HKLM\\Software\\Microsoft\\VCExpress\\%s'", "%", "version", ",", "r'HKLM\\Software\\Wow6432Node\\Microsoft\\VCExpress\\%s'", "%", "version", "]", "for", "index", "in", "range", "(", "len", "(", "keys", ")", ")", ":", "path", "=", "_RegistryGetValue", "(", "keys", "[", "index", "]", ",", "'InstallDir'", ")", "if", "not", "path", ":", "continue", "path", "=", "_ConvertToCygpath", "(", "path", ")", "# Check for full.", "full_path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "'devenv.exe'", ")", "express_path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "'*express.exe'", ")", "if", "not", "force_express", "and", "os", ".", "path", ".", "exists", "(", "full_path", ")", ":", "# Add this one.", "versions", ".", "append", "(", "_CreateVersion", "(", "version_to_year", "[", "version", "]", ",", "os", ".", "path", ".", "join", "(", "path", ",", "'..'", ",", "'..'", ")", ")", ")", "# Check for express.", "elif", "glob", ".", "glob", "(", "express_path", ")", ":", "# Add this one.", "versions", ".", "append", "(", "_CreateVersion", "(", "version_to_year", "[", "version", "]", "+", "'e'", ",", "os", ".", "path", ".", "join", "(", "path", ",", "'..'", ",", "'..'", ")", ")", ")", "# The old method above does not work when only SDK is installed.", "keys", "=", "[", "r'HKLM\\Software\\Microsoft\\VisualStudio\\SxS\\VC7'", ",", "r'HKLM\\Software\\Wow6432Node\\Microsoft\\VisualStudio\\SxS\\VC7'", "]", "for", "index", "in", "range", "(", "len", "(", "keys", ")", ")", ":", "path", "=", "_RegistryGetValue", "(", "keys", "[", "index", "]", ",", "version", ")", "if", "not", "path", ":", "continue", "path", "=", "_ConvertToCygpath", "(", "path", ")", "if", "version", "!=", "'14.0'", ":", "# There is no Express edition for 2015.", "versions", ".", "append", "(", "_CreateVersion", "(", "version_to_year", "[", "version", "]", "+", "'e'", ",", "os", ".", "path", ".", "join", "(", "path", ",", "'..'", ")", ",", "sdk_based", "=", "True", ")", ")", "return", "versions" ]
[ 333, 0 ]
[ 397, 17 ]
python
en
['en', 'en', 'en']
True
SelectVisualStudioVersion
(version='auto', allow_fallback=True)
Select which version of Visual Studio projects to generate. Arguments: version: Hook to allow caller to force a particular version (vs auto). Returns: An object representing a visual studio project format version.
Select which version of Visual Studio projects to generate.
def SelectVisualStudioVersion(version='auto', allow_fallback=True): """Select which version of Visual Studio projects to generate. Arguments: version: Hook to allow caller to force a particular version (vs auto). Returns: An object representing a visual studio project format version. """ # In auto mode, check environment variable for override. if version == 'auto': version = os.environ.get('GYP_MSVS_VERSION', 'auto') version_map = { 'auto': ('14.0', '12.0', '10.0', '9.0', '8.0', '11.0'), '2005': ('8.0',), '2005e': ('8.0',), '2008': ('9.0',), '2008e': ('9.0',), '2010': ('10.0',), '2010e': ('10.0',), '2012': ('11.0',), '2012e': ('11.0',), '2013': ('12.0',), '2013e': ('12.0',), '2015': ('14.0',), } override_path = os.environ.get('GYP_MSVS_OVERRIDE_PATH') if override_path: msvs_version = os.environ.get('GYP_MSVS_VERSION') if not msvs_version: raise ValueError('GYP_MSVS_OVERRIDE_PATH requires GYP_MSVS_VERSION to be ' 'set to a particular version (e.g. 2010e).') return _CreateVersion(msvs_version, override_path, sdk_based=True) version = str(version) versions = _DetectVisualStudioVersions(version_map[version], 'e' in version) if not versions: if not allow_fallback: raise ValueError('Could not locate Visual Studio installation.') if version == 'auto': # Default to 2005 if we couldn't find anything return _CreateVersion('2005', None) else: return _CreateVersion(version, None) return versions[0]
[ "def", "SelectVisualStudioVersion", "(", "version", "=", "'auto'", ",", "allow_fallback", "=", "True", ")", ":", "# In auto mode, check environment variable for override.", "if", "version", "==", "'auto'", ":", "version", "=", "os", ".", "environ", ".", "get", "(", "'GYP_MSVS_VERSION'", ",", "'auto'", ")", "version_map", "=", "{", "'auto'", ":", "(", "'14.0'", ",", "'12.0'", ",", "'10.0'", ",", "'9.0'", ",", "'8.0'", ",", "'11.0'", ")", ",", "'2005'", ":", "(", "'8.0'", ",", ")", ",", "'2005e'", ":", "(", "'8.0'", ",", ")", ",", "'2008'", ":", "(", "'9.0'", ",", ")", ",", "'2008e'", ":", "(", "'9.0'", ",", ")", ",", "'2010'", ":", "(", "'10.0'", ",", ")", ",", "'2010e'", ":", "(", "'10.0'", ",", ")", ",", "'2012'", ":", "(", "'11.0'", ",", ")", ",", "'2012e'", ":", "(", "'11.0'", ",", ")", ",", "'2013'", ":", "(", "'12.0'", ",", ")", ",", "'2013e'", ":", "(", "'12.0'", ",", ")", ",", "'2015'", ":", "(", "'14.0'", ",", ")", ",", "}", "override_path", "=", "os", ".", "environ", ".", "get", "(", "'GYP_MSVS_OVERRIDE_PATH'", ")", "if", "override_path", ":", "msvs_version", "=", "os", ".", "environ", ".", "get", "(", "'GYP_MSVS_VERSION'", ")", "if", "not", "msvs_version", ":", "raise", "ValueError", "(", "'GYP_MSVS_OVERRIDE_PATH requires GYP_MSVS_VERSION to be '", "'set to a particular version (e.g. 2010e).'", ")", "return", "_CreateVersion", "(", "msvs_version", ",", "override_path", ",", "sdk_based", "=", "True", ")", "version", "=", "str", "(", "version", ")", "versions", "=", "_DetectVisualStudioVersions", "(", "version_map", "[", "version", "]", ",", "'e'", "in", "version", ")", "if", "not", "versions", ":", "if", "not", "allow_fallback", ":", "raise", "ValueError", "(", "'Could not locate Visual Studio installation.'", ")", "if", "version", "==", "'auto'", ":", "# Default to 2005 if we couldn't find anything", "return", "_CreateVersion", "(", "'2005'", ",", "None", ")", "else", ":", "return", "_CreateVersion", "(", "version", ",", "None", ")", "return", "versions", "[", "0", "]" ]
[ 400, 0 ]
[ 442, 20 ]
python
en
['en', 'en', 'en']
True
VisualStudioVersion.Description
(self)
Get the full description of the version.
Get the full description of the version.
def Description(self): """Get the full description of the version.""" return self.description
[ "def", "Description", "(", "self", ")", ":", "return", "self", ".", "description" ]
[ 34, 2 ]
[ 36, 27 ]
python
en
['en', 'en', 'en']
True
VisualStudioVersion.SolutionVersion
(self)
Get the version number of the sln files.
Get the version number of the sln files.
def SolutionVersion(self): """Get the version number of the sln files.""" return self.solution_version
[ "def", "SolutionVersion", "(", "self", ")", ":", "return", "self", ".", "solution_version" ]
[ 38, 2 ]
[ 40, 32 ]
python
en
['en', 'en', 'en']
True
VisualStudioVersion.ProjectVersion
(self)
Get the version number of the vcproj or vcxproj files.
Get the version number of the vcproj or vcxproj files.
def ProjectVersion(self): """Get the version number of the vcproj or vcxproj files.""" return self.project_version
[ "def", "ProjectVersion", "(", "self", ")", ":", "return", "self", ".", "project_version" ]
[ 42, 2 ]
[ 44, 31 ]
python
en
['en', 'en', 'pt']
True
VisualStudioVersion.UsesVcxproj
(self)
Returns true if this version uses a vcxproj file.
Returns true if this version uses a vcxproj file.
def UsesVcxproj(self): """Returns true if this version uses a vcxproj file.""" return self.uses_vcxproj
[ "def", "UsesVcxproj", "(", "self", ")", ":", "return", "self", ".", "uses_vcxproj" ]
[ 49, 2 ]
[ 51, 28 ]
python
en
['en', 'en', 'en']
True
VisualStudioVersion.ProjectExtension
(self)
Returns the file extension for the project.
Returns the file extension for the project.
def ProjectExtension(self): """Returns the file extension for the project.""" return self.uses_vcxproj and '.vcxproj' or '.vcproj'
[ "def", "ProjectExtension", "(", "self", ")", ":", "return", "self", ".", "uses_vcxproj", "and", "'.vcxproj'", "or", "'.vcproj'" ]
[ 53, 2 ]
[ 55, 56 ]
python
en
['en', 'en', 'en']
True
VisualStudioVersion.Path
(self)
Returns the path to Visual Studio installation.
Returns the path to Visual Studio installation.
def Path(self): """Returns the path to Visual Studio installation.""" return self.path
[ "def", "Path", "(", "self", ")", ":", "return", "self", ".", "path" ]
[ 57, 2 ]
[ 59, 20 ]
python
en
['en', 'en', 'en']
True
VisualStudioVersion.ToolPath
(self, tool)
Returns the path to a given compiler tool.
Returns the path to a given compiler tool.
def ToolPath(self, tool): """Returns the path to a given compiler tool. """ return os.path.normpath(os.path.join(self.path, "VC/bin", tool))
[ "def", "ToolPath", "(", "self", ",", "tool", ")", ":", "return", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "join", "(", "self", ".", "path", ",", "\"VC/bin\"", ",", "tool", ")", ")" ]
[ 61, 2 ]
[ 63, 68 ]
python
en
['en', 'en', 'en']
True
VisualStudioVersion.DefaultToolset
(self)
Returns the msbuild toolset version that will be used in the absence of a user override.
Returns the msbuild toolset version that will be used in the absence of a user override.
def DefaultToolset(self): """Returns the msbuild toolset version that will be used in the absence of a user override.""" return self.default_toolset
[ "def", "DefaultToolset", "(", "self", ")", ":", "return", "self", ".", "default_toolset" ]
[ 65, 2 ]
[ 68, 31 ]
python
en
['en', 'en', 'en']
True
VisualStudioVersion.SetupScript
(self, target_arch)
Returns a command (with arguments) to be used to set up the environment.
Returns a command (with arguments) to be used to set up the environment.
def SetupScript(self, target_arch): """Returns a command (with arguments) to be used to set up the environment.""" # Check if we are running in the SDK command line environment and use # the setup script from the SDK if so. |target_arch| should be either # 'x86' or 'x64'. assert target_arch in ('x86', 'x64') sdk_dir = os.environ.get('WindowsSDKDir') if self.sdk_based and sdk_dir: return [os.path.normpath(os.path.join(sdk_dir, 'Bin/SetEnv.Cmd')), '/' + target_arch] else: # We don't use VC/vcvarsall.bat for x86 because vcvarsall calls # vcvars32, which it can only find if VS??COMNTOOLS is set, which it # isn't always. if target_arch == 'x86': if self.short_name >= '2013' and self.short_name[-1] != 'e' and ( os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'): # VS2013 and later, non-Express have a x64-x86 cross that we want # to prefer. return [os.path.normpath( os.path.join(self.path, 'VC/vcvarsall.bat')), 'amd64_x86'] # Otherwise, the standard x86 compiler. return [os.path.normpath( os.path.join(self.path, 'Common7/Tools/vsvars32.bat'))] else: assert target_arch == 'x64' arg = 'x86_amd64' # Use the 64-on-64 compiler if we're not using an express # edition and we're running on a 64bit OS. if self.short_name[-1] != 'e' and ( os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'): arg = 'amd64' return [os.path.normpath( os.path.join(self.path, 'VC/vcvarsall.bat')), arg]
[ "def", "SetupScript", "(", "self", ",", "target_arch", ")", ":", "# Check if we are running in the SDK command line environment and use", "# the setup script from the SDK if so. |target_arch| should be either", "# 'x86' or 'x64'.", "assert", "target_arch", "in", "(", "'x86'", ",", "'x64'", ")", "sdk_dir", "=", "os", ".", "environ", ".", "get", "(", "'WindowsSDKDir'", ")", "if", "self", ".", "sdk_based", "and", "sdk_dir", ":", "return", "[", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "join", "(", "sdk_dir", ",", "'Bin/SetEnv.Cmd'", ")", ")", ",", "'/'", "+", "target_arch", "]", "else", ":", "# We don't use VC/vcvarsall.bat for x86 because vcvarsall calls", "# vcvars32, which it can only find if VS??COMNTOOLS is set, which it", "# isn't always.", "if", "target_arch", "==", "'x86'", ":", "if", "self", ".", "short_name", ">=", "'2013'", "and", "self", ".", "short_name", "[", "-", "1", "]", "!=", "'e'", "and", "(", "os", ".", "environ", ".", "get", "(", "'PROCESSOR_ARCHITECTURE'", ")", "==", "'AMD64'", "or", "os", ".", "environ", ".", "get", "(", "'PROCESSOR_ARCHITEW6432'", ")", "==", "'AMD64'", ")", ":", "# VS2013 and later, non-Express have a x64-x86 cross that we want", "# to prefer.", "return", "[", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "join", "(", "self", ".", "path", ",", "'VC/vcvarsall.bat'", ")", ")", ",", "'amd64_x86'", "]", "# Otherwise, the standard x86 compiler.", "return", "[", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "join", "(", "self", ".", "path", ",", "'Common7/Tools/vsvars32.bat'", ")", ")", "]", "else", ":", "assert", "target_arch", "==", "'x64'", "arg", "=", "'x86_amd64'", "# Use the 64-on-64 compiler if we're not using an express", "# edition and we're running on a 64bit OS.", "if", "self", ".", "short_name", "[", "-", "1", "]", "!=", "'e'", "and", "(", "os", ".", "environ", ".", "get", "(", "'PROCESSOR_ARCHITECTURE'", ")", "==", "'AMD64'", "or", "os", ".", "environ", ".", "get", "(", "'PROCESSOR_ARCHITEW6432'", ")", "==", "'AMD64'", ")", ":", "arg", "=", "'amd64'", "return", "[", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "join", "(", "self", ".", "path", ",", "'VC/vcvarsall.bat'", ")", ")", ",", "arg", "]" ]
[ 70, 2 ]
[ 106, 62 ]
python
en
['en', 'en', 'en']
True
SeleniumBrowser.finalize
(self, result)
This runs after all tests have completed with nosetests.
This runs after all tests have completed with nosetests.
def finalize(self, result): """ This runs after all tests have completed with nosetests. """ proxy_helper.remove_proxy_zip_if_present()
[ "def", "finalize", "(", "self", ",", "result", ")", ":", "proxy_helper", ".", "remove_proxy_zip_if_present", "(", ")" ]
[ 412, 4 ]
[ 414, 50 ]
python
en
['en', 'en', 'en']
True
open_memory_channel
(max_buffer_size)
Open a channel for passing objects between tasks within a process. Memory channels are lightweight, cheap to allocate, and entirely in-memory. They don't involve any operating-system resources, or any kind of serialization. They just pass Python objects directly between tasks (with a possible stop in an internal buffer along the way). Channel objects can be closed by calling `~trio.abc.AsyncResource.aclose` or using ``async with``. They are *not* automatically closed when garbage collected. Closing memory channels isn't mandatory, but it is generally a good idea, because it helps avoid situations where tasks get stuck waiting on a channel when there's no-one on the other side. See :ref:`channel-shutdown` for details. Memory channel operations are all atomic with respect to cancellation, either `~trio.abc.ReceiveChannel.receive` will successfully return an object, or it will raise :exc:`Cancelled` while leaving the channel unchanged. Args: max_buffer_size (int or math.inf): The maximum number of items that can be buffered in the channel before :meth:`~trio.abc.SendChannel.send` blocks. Choosing a sensible value here is important to ensure that backpressure is communicated promptly and avoid unnecessary latency; see :ref:`channel-buffering` for more details. If in doubt, use 0. Returns: A pair ``(send_channel, receive_channel)``. If you have trouble remembering which order these go in, remember: data flows from left → right. In addition to the standard channel methods, all memory channel objects provide a ``statistics()`` method, which returns an object with the following fields: * ``current_buffer_used``: The number of items currently stored in the channel buffer. * ``max_buffer_size``: The maximum number of items allowed in the buffer, as passed to :func:`open_memory_channel`. * ``open_send_channels``: The number of open :class:`MemorySendChannel` endpoints pointing to this channel. Initially 1, but can be increased by :meth:`MemorySendChannel.clone`. * ``open_receive_channels``: Likewise, but for open :class:`MemoryReceiveChannel` endpoints. * ``tasks_waiting_send``: The number of tasks blocked in ``send`` on this channel (summing over all clones). * ``tasks_waiting_receive``: The number of tasks blocked in ``receive`` on this channel (summing over all clones).
Open a channel for passing objects between tasks within a process.
def open_memory_channel(max_buffer_size): """Open a channel for passing objects between tasks within a process. Memory channels are lightweight, cheap to allocate, and entirely in-memory. They don't involve any operating-system resources, or any kind of serialization. They just pass Python objects directly between tasks (with a possible stop in an internal buffer along the way). Channel objects can be closed by calling `~trio.abc.AsyncResource.aclose` or using ``async with``. They are *not* automatically closed when garbage collected. Closing memory channels isn't mandatory, but it is generally a good idea, because it helps avoid situations where tasks get stuck waiting on a channel when there's no-one on the other side. See :ref:`channel-shutdown` for details. Memory channel operations are all atomic with respect to cancellation, either `~trio.abc.ReceiveChannel.receive` will successfully return an object, or it will raise :exc:`Cancelled` while leaving the channel unchanged. Args: max_buffer_size (int or math.inf): The maximum number of items that can be buffered in the channel before :meth:`~trio.abc.SendChannel.send` blocks. Choosing a sensible value here is important to ensure that backpressure is communicated promptly and avoid unnecessary latency; see :ref:`channel-buffering` for more details. If in doubt, use 0. Returns: A pair ``(send_channel, receive_channel)``. If you have trouble remembering which order these go in, remember: data flows from left → right. In addition to the standard channel methods, all memory channel objects provide a ``statistics()`` method, which returns an object with the following fields: * ``current_buffer_used``: The number of items currently stored in the channel buffer. * ``max_buffer_size``: The maximum number of items allowed in the buffer, as passed to :func:`open_memory_channel`. * ``open_send_channels``: The number of open :class:`MemorySendChannel` endpoints pointing to this channel. Initially 1, but can be increased by :meth:`MemorySendChannel.clone`. * ``open_receive_channels``: Likewise, but for open :class:`MemoryReceiveChannel` endpoints. * ``tasks_waiting_send``: The number of tasks blocked in ``send`` on this channel (summing over all clones). * ``tasks_waiting_receive``: The number of tasks blocked in ``receive`` on this channel (summing over all clones). """ if max_buffer_size != inf and not isinstance(max_buffer_size, int): raise TypeError("max_buffer_size must be an integer or math.inf") if max_buffer_size < 0: raise ValueError("max_buffer_size must be >= 0") state = MemoryChannelState(max_buffer_size) return ( MemorySendChannel._create(state), MemoryReceiveChannel._create(state), )
[ "def", "open_memory_channel", "(", "max_buffer_size", ")", ":", "if", "max_buffer_size", "!=", "inf", "and", "not", "isinstance", "(", "max_buffer_size", ",", "int", ")", ":", "raise", "TypeError", "(", "\"max_buffer_size must be an integer or math.inf\"", ")", "if", "max_buffer_size", "<", "0", ":", "raise", "ValueError", "(", "\"max_buffer_size must be >= 0\"", ")", "state", "=", "MemoryChannelState", "(", "max_buffer_size", ")", "return", "(", "MemorySendChannel", ".", "_create", "(", "state", ")", ",", "MemoryReceiveChannel", ".", "_create", "(", "state", ")", ",", ")" ]
[ 14, 0 ]
[ 74, 5 ]
python
en
['en', 'en', 'en']
True
Tool.__init__
(self, name, attrs=None)
Initializes the tool. Args: name: Tool name. attrs: Dict of tool attributes; may be None.
Initializes the tool.
def __init__(self, name, attrs=None): """Initializes the tool. Args: name: Tool name. attrs: Dict of tool attributes; may be None. """ self._attrs = attrs or {} self._attrs['Name'] = name
[ "def", "__init__", "(", "self", ",", "name", ",", "attrs", "=", "None", ")", ":", "self", ".", "_attrs", "=", "attrs", "or", "{", "}", "self", ".", "_attrs", "[", "'Name'", "]", "=", "name" ]
[ 15, 2 ]
[ 23, 30 ]
python
en
['en', 'en', 'en']
True
Tool._GetSpecification
(self)
Creates an element for the tool. Returns: A new xml.dom.Element for the tool.
Creates an element for the tool.
def _GetSpecification(self): """Creates an element for the tool. Returns: A new xml.dom.Element for the tool. """ return ['Tool', self._attrs]
[ "def", "_GetSpecification", "(", "self", ")", ":", "return", "[", "'Tool'", ",", "self", ".", "_attrs", "]" ]
[ 25, 2 ]
[ 31, 32 ]
python
en
['en', 'en', 'en']
True
Filter.__init__
(self, name, contents=None)
Initializes the folder. Args: name: Filter (folder) name. contents: List of filenames and/or Filter objects contained.
Initializes the folder.
def __init__(self, name, contents=None): """Initializes the folder. Args: name: Filter (folder) name. contents: List of filenames and/or Filter objects contained. """ self.name = name self.contents = list(contents or [])
[ "def", "__init__", "(", "self", ",", "name", ",", "contents", "=", "None", ")", ":", "self", ".", "name", "=", "name", "self", ".", "contents", "=", "list", "(", "contents", "or", "[", "]", ")" ]
[ 36, 2 ]
[ 44, 40 ]
python
en
['en', 'zu', 'en']
True
Writer.__init__
(self, project_path, version, name, guid=None, platforms=None)
Initializes the project. Args: project_path: Path to the project file. version: Format version to emit. name: Name of the project. guid: GUID to use for project, if not None. platforms: Array of string, the supported platforms. If null, ['Win32']
Initializes the project.
def __init__(self, project_path, version, name, guid=None, platforms=None): """Initializes the project. Args: project_path: Path to the project file. version: Format version to emit. name: Name of the project. guid: GUID to use for project, if not None. platforms: Array of string, the supported platforms. If null, ['Win32'] """ self.project_path = project_path self.version = version self.name = name self.guid = guid # Default to Win32 for platforms. if not platforms: platforms = ['Win32'] # Initialize the specifications of the various sections. self.platform_section = ['Platforms'] for platform in platforms: self.platform_section.append(['Platform', {'Name': platform}]) self.tool_files_section = ['ToolFiles'] self.configurations_section = ['Configurations'] self.files_section = ['Files'] # Keep a dict keyed on filename to speed up access. self.files_dict = dict()
[ "def", "__init__", "(", "self", ",", "project_path", ",", "version", ",", "name", ",", "guid", "=", "None", ",", "platforms", "=", "None", ")", ":", "self", ".", "project_path", "=", "project_path", "self", ".", "version", "=", "version", "self", ".", "name", "=", "name", "self", ".", "guid", "=", "guid", "# Default to Win32 for platforms.", "if", "not", "platforms", ":", "platforms", "=", "[", "'Win32'", "]", "# Initialize the specifications of the various sections.", "self", ".", "platform_section", "=", "[", "'Platforms'", "]", "for", "platform", "in", "platforms", ":", "self", ".", "platform_section", ".", "append", "(", "[", "'Platform'", ",", "{", "'Name'", ":", "platform", "}", "]", ")", "self", ".", "tool_files_section", "=", "[", "'ToolFiles'", "]", "self", ".", "configurations_section", "=", "[", "'Configurations'", "]", "self", ".", "files_section", "=", "[", "'Files'", "]", "# Keep a dict keyed on filename to speed up access.", "self", ".", "files_dict", "=", "dict", "(", ")" ]
[ 53, 2 ]
[ 81, 28 ]
python
en
['en', 'en', 'en']
True
Writer.AddToolFile
(self, path)
Adds a tool file to the project. Args: path: Relative path from project to tool file.
Adds a tool file to the project.
def AddToolFile(self, path): """Adds a tool file to the project. Args: path: Relative path from project to tool file. """ self.tool_files_section.append(['ToolFile', {'RelativePath': path}])
[ "def", "AddToolFile", "(", "self", ",", "path", ")", ":", "self", ".", "tool_files_section", ".", "append", "(", "[", "'ToolFile'", ",", "{", "'RelativePath'", ":", "path", "}", "]", ")" ]
[ 83, 2 ]
[ 89, 72 ]
python
en
['en', 'en', 'en']
True
Writer._GetSpecForConfiguration
(self, config_type, config_name, attrs, tools)
Returns the specification for a configuration. Args: config_type: Type of configuration node. config_name: Configuration name. attrs: Dict of configuration attributes; may be None. tools: List of tools (strings or Tool objects); may be None. Returns:
Returns the specification for a configuration.
def _GetSpecForConfiguration(self, config_type, config_name, attrs, tools): """Returns the specification for a configuration. Args: config_type: Type of configuration node. config_name: Configuration name. attrs: Dict of configuration attributes; may be None. tools: List of tools (strings or Tool objects); may be None. Returns: """ # Handle defaults if not attrs: attrs = {} if not tools: tools = [] # Add configuration node and its attributes node_attrs = attrs.copy() node_attrs['Name'] = config_name specification = [config_type, node_attrs] # Add tool nodes and their attributes if tools: for t in tools: if isinstance(t, Tool): specification.append(t._GetSpecification()) else: specification.append(Tool(t)._GetSpecification()) return specification
[ "def", "_GetSpecForConfiguration", "(", "self", ",", "config_type", ",", "config_name", ",", "attrs", ",", "tools", ")", ":", "# Handle defaults", "if", "not", "attrs", ":", "attrs", "=", "{", "}", "if", "not", "tools", ":", "tools", "=", "[", "]", "# Add configuration node and its attributes", "node_attrs", "=", "attrs", ".", "copy", "(", ")", "node_attrs", "[", "'Name'", "]", "=", "config_name", "specification", "=", "[", "config_type", ",", "node_attrs", "]", "# Add tool nodes and their attributes", "if", "tools", ":", "for", "t", "in", "tools", ":", "if", "isinstance", "(", "t", ",", "Tool", ")", ":", "specification", ".", "append", "(", "t", ".", "_GetSpecification", "(", ")", ")", "else", ":", "specification", ".", "append", "(", "Tool", "(", "t", ")", ".", "_GetSpecification", "(", ")", ")", "return", "specification" ]
[ 91, 2 ]
[ 119, 24 ]
python
en
['en', 'en', 'en']
True
Writer.AddConfig
(self, name, attrs=None, tools=None)
Adds a configuration to the project. Args: name: Configuration name. attrs: Dict of configuration attributes; may be None. tools: List of tools (strings or Tool objects); may be None.
Adds a configuration to the project.
def AddConfig(self, name, attrs=None, tools=None): """Adds a configuration to the project. Args: name: Configuration name. attrs: Dict of configuration attributes; may be None. tools: List of tools (strings or Tool objects); may be None. """ spec = self._GetSpecForConfiguration('Configuration', name, attrs, tools) self.configurations_section.append(spec)
[ "def", "AddConfig", "(", "self", ",", "name", ",", "attrs", "=", "None", ",", "tools", "=", "None", ")", ":", "spec", "=", "self", ".", "_GetSpecForConfiguration", "(", "'Configuration'", ",", "name", ",", "attrs", ",", "tools", ")", "self", ".", "configurations_section", ".", "append", "(", "spec", ")" ]
[ 122, 2 ]
[ 131, 44 ]
python
en
['en', 'en', 'en']
True
Writer._AddFilesToNode
(self, parent, files)
Adds files and/or filters to the parent node. Args: parent: Destination node files: A list of Filter objects and/or relative paths to files. Will call itself recursively, if the files list contains Filter objects.
Adds files and/or filters to the parent node.
def _AddFilesToNode(self, parent, files): """Adds files and/or filters to the parent node. Args: parent: Destination node files: A list of Filter objects and/or relative paths to files. Will call itself recursively, if the files list contains Filter objects. """ for f in files: if isinstance(f, Filter): node = ['Filter', {'Name': f.name}] self._AddFilesToNode(node, f.contents) else: node = ['File', {'RelativePath': f}] self.files_dict[f] = node parent.append(node)
[ "def", "_AddFilesToNode", "(", "self", ",", "parent", ",", "files", ")", ":", "for", "f", "in", "files", ":", "if", "isinstance", "(", "f", ",", "Filter", ")", ":", "node", "=", "[", "'Filter'", ",", "{", "'Name'", ":", "f", ".", "name", "}", "]", "self", ".", "_AddFilesToNode", "(", "node", ",", "f", ".", "contents", ")", "else", ":", "node", "=", "[", "'File'", ",", "{", "'RelativePath'", ":", "f", "}", "]", "self", ".", "files_dict", "[", "f", "]", "=", "node", "parent", ".", "append", "(", "node", ")" ]
[ 133, 2 ]
[ 149, 25 ]
python
en
['en', 'en', 'en']
True
Writer.AddFiles
(self, files)
Adds files to the project. Args: files: A list of Filter objects and/or relative paths to files. This makes a copy of the file/filter tree at the time of this call. If you later add files to a Filter object which was passed into a previous call to AddFiles(), it will not be reflected in this project.
Adds files to the project.
def AddFiles(self, files): """Adds files to the project. Args: files: A list of Filter objects and/or relative paths to files. This makes a copy of the file/filter tree at the time of this call. If you later add files to a Filter object which was passed into a previous call to AddFiles(), it will not be reflected in this project. """ self._AddFilesToNode(self.files_section, files)
[ "def", "AddFiles", "(", "self", ",", "files", ")", ":", "self", ".", "_AddFilesToNode", "(", "self", ".", "files_section", ",", "files", ")" ]
[ 151, 2 ]
[ 161, 51 ]
python
en
['en', 'en', 'en']
True
Writer.AddFileConfig
(self, path, config, attrs=None, tools=None)
Adds a configuration to a file. Args: path: Relative path to the file. config: Name of configuration to add. attrs: Dict of configuration attributes; may be None. tools: List of tools (strings or Tool objects); may be None. Raises: ValueError: Relative path does not match any file added via AddFiles().
Adds a configuration to a file.
def AddFileConfig(self, path, config, attrs=None, tools=None): """Adds a configuration to a file. Args: path: Relative path to the file. config: Name of configuration to add. attrs: Dict of configuration attributes; may be None. tools: List of tools (strings or Tool objects); may be None. Raises: ValueError: Relative path does not match any file added via AddFiles(). """ # Find the file node with the right relative path parent = self.files_dict.get(path) if not parent: raise ValueError('AddFileConfig: file "%s" not in project.' % path) # Add the config to the file node spec = self._GetSpecForConfiguration('FileConfiguration', config, attrs, tools) parent.append(spec)
[ "def", "AddFileConfig", "(", "self", ",", "path", ",", "config", ",", "attrs", "=", "None", ",", "tools", "=", "None", ")", ":", "# Find the file node with the right relative path", "parent", "=", "self", ".", "files_dict", ".", "get", "(", "path", ")", "if", "not", "parent", ":", "raise", "ValueError", "(", "'AddFileConfig: file \"%s\" not in project.'", "%", "path", ")", "# Add the config to the file node", "spec", "=", "self", ".", "_GetSpecForConfiguration", "(", "'FileConfiguration'", ",", "config", ",", "attrs", ",", "tools", ")", "parent", ".", "append", "(", "spec", ")" ]
[ 165, 2 ]
[ 185, 23 ]
python
en
['en', 'en', 'en']
True
Writer.WriteIfChanged
(self)
Writes the project file.
Writes the project file.
def WriteIfChanged(self): """Writes the project file.""" # First create XML content definition content = [ 'VisualStudioProject', {'ProjectType': 'Visual C++', 'Version': self.version.ProjectVersion(), 'Name': self.name, 'ProjectGUID': self.guid, 'RootNamespace': self.name, 'Keyword': 'Win32Proj' }, self.platform_section, self.tool_files_section, self.configurations_section, ['References'], # empty section self.files_section, ['Globals'] # empty section ] easy_xml.WriteXmlIfChanged(content, self.project_path, encoding="Windows-1252")
[ "def", "WriteIfChanged", "(", "self", ")", ":", "# First create XML content definition", "content", "=", "[", "'VisualStudioProject'", ",", "{", "'ProjectType'", ":", "'Visual C++'", ",", "'Version'", ":", "self", ".", "version", ".", "ProjectVersion", "(", ")", ",", "'Name'", ":", "self", ".", "name", ",", "'ProjectGUID'", ":", "self", ".", "guid", ",", "'RootNamespace'", ":", "self", ".", "name", ",", "'Keyword'", ":", "'Win32Proj'", "}", ",", "self", ".", "platform_section", ",", "self", ".", "tool_files_section", ",", "self", ".", "configurations_section", ",", "[", "'References'", "]", ",", "# empty section", "self", ".", "files_section", ",", "[", "'Globals'", "]", "# empty section", "]", "easy_xml", ".", "WriteXmlIfChanged", "(", "content", ",", "self", ".", "project_path", ",", "encoding", "=", "\"Windows-1252\"", ")" ]
[ 187, 2 ]
[ 207, 55 ]
python
en
['en', 'en', 'en']
True
make_uplift_classification
(n_samples=1000, treatment_name=['control', 'treatment1', 'treatment2', 'treatment3'], y_name='conversion', n_classification_features=10, n_classification_informative=5, n_classification_redundant=0, n_classification_repeated=0, n_uplift_increase_dict={'treatment1': 2, 'treatment2': 2, 'treatment3': 2}, n_uplift_decrease_dict={'treatment1': 0, 'treatment2': 0, 'treatment3': 0}, delta_uplift_increase_dict={'treatment1': 0.02, 'treatment2': 0.05, 'treatment3': 0.1}, delta_uplift_decrease_dict={'treatment1': 0., 'treatment2': 0., 'treatment3': 0.}, n_uplift_increase_mix_informative_dict={'treatment1': 1, 'treatment2': 1, 'treatment3': 1}, n_uplift_decrease_mix_informative_dict={'treatment1': 0, 'treatment2': 0, 'treatment3': 0}, positive_class_proportion=0.5, random_seed=20190101)
Generate a synthetic dataset for classification uplift modeling problem. Parameters ---------- n_samples : int, optional (default=1000) The number of samples to be generated for each treatment group. treatment_name: list, optional (default = ['control','treatment1','treatment2','treatment3']) The list of treatment names. y_name: string, optional (default = 'conversion') The name of the outcome variable to be used as a column in the output dataframe. n_classification_features: int, optional (default = 10) Total number of features for base classification n_classification_informative: int, optional (default = 5) Total number of informative features for base classification n_classification_redundant: int, optional (default = 0) Total number of redundant features for base classification n_classification_repeated: int, optional (default = 0) Total number of repeated features for base classification n_uplift_increase_dict: dictionary, optional (default: {'treatment1': 2, 'treatment2': 2, 'treatment3': 2}) Number of features for generating positive treatment effects for corresponding treatment group. Dictionary of {treatment_key: number_of_features_for_increase_uplift}. n_uplift_decrease_dict: dictionary, optional (default: {'treatment1': 0, 'treatment2': 0, 'treatment3': 0}) Number of features for generating negative treatment effects for corresponding treatment group. Dictionary of {treatment_key: number_of_features_for_increase_uplift}. delta_uplift_increase_dict: dictionary, optional (default: {'treatment1': .02, 'treatment2': .05, 'treatment3': .1}) Positive treatment effect created by the positive uplift features on the base classification label. Dictionary of {treatment_key: increase_delta}. delta_uplift_decrease_dict: dictionary, optional (default: {'treatment1': 0., 'treatment2': 0., 'treatment3': 0.}) Negative treatment effect created by the negative uplift features on the base classification label. Dictionary of {treatment_key: increase_delta}. n_uplift_increase_mix_informative_dict: dictionary, optional (default: {'treatment1': 1, 'treatment2': 1, 'treatment3': 1}) Number of positive mix features for each treatment. The positive mix feature is defined as a linear combination of a randomly selected informative classification feature and a randomly selected positive uplift feature. The linear combination is made by two coefficients sampled from a uniform distribution between -1 and 1. n_uplift_decrease_mix_informative_dict: dictionary, optional (default: {'treatment1': 0, 'treatment2': 0, 'treatment3': 0}) Number of negative mix features for each treatment. The negative mix feature is defined as a linear combination of a randomly selected informative classification feature and a randomly selected negative uplift feature. The linear combination is made by two coefficients sampled from a uniform distribution between -1 and 1. positive_class_proportion: float, optional (default = 0.5) The proportion of positive label (1) in the control group. random_seed : int, optional (default = 20190101) The random seed to be used in the data generation process. Returns ------- df_res : DataFrame A data frame containing the treatment label, features, and outcome variable. x_name : list The list of feature names generated. Notes ----- The algorithm for generating the base classification dataset is adapted from the make_classification method in the sklearn package, that uses the algorithm in Guyon [1] designed to generate the "Madelon" dataset. References ---------- .. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable selection benchmark", 2003.
Generate a synthetic dataset for classification uplift modeling problem.
def make_uplift_classification(n_samples=1000, treatment_name=['control', 'treatment1', 'treatment2', 'treatment3'], y_name='conversion', n_classification_features=10, n_classification_informative=5, n_classification_redundant=0, n_classification_repeated=0, n_uplift_increase_dict={'treatment1': 2, 'treatment2': 2, 'treatment3': 2}, n_uplift_decrease_dict={'treatment1': 0, 'treatment2': 0, 'treatment3': 0}, delta_uplift_increase_dict={'treatment1': 0.02, 'treatment2': 0.05, 'treatment3': 0.1}, delta_uplift_decrease_dict={'treatment1': 0., 'treatment2': 0., 'treatment3': 0.}, n_uplift_increase_mix_informative_dict={'treatment1': 1, 'treatment2': 1, 'treatment3': 1}, n_uplift_decrease_mix_informative_dict={'treatment1': 0, 'treatment2': 0, 'treatment3': 0}, positive_class_proportion=0.5, random_seed=20190101): """Generate a synthetic dataset for classification uplift modeling problem. Parameters ---------- n_samples : int, optional (default=1000) The number of samples to be generated for each treatment group. treatment_name: list, optional (default = ['control','treatment1','treatment2','treatment3']) The list of treatment names. y_name: string, optional (default = 'conversion') The name of the outcome variable to be used as a column in the output dataframe. n_classification_features: int, optional (default = 10) Total number of features for base classification n_classification_informative: int, optional (default = 5) Total number of informative features for base classification n_classification_redundant: int, optional (default = 0) Total number of redundant features for base classification n_classification_repeated: int, optional (default = 0) Total number of repeated features for base classification n_uplift_increase_dict: dictionary, optional (default: {'treatment1': 2, 'treatment2': 2, 'treatment3': 2}) Number of features for generating positive treatment effects for corresponding treatment group. Dictionary of {treatment_key: number_of_features_for_increase_uplift}. n_uplift_decrease_dict: dictionary, optional (default: {'treatment1': 0, 'treatment2': 0, 'treatment3': 0}) Number of features for generating negative treatment effects for corresponding treatment group. Dictionary of {treatment_key: number_of_features_for_increase_uplift}. delta_uplift_increase_dict: dictionary, optional (default: {'treatment1': .02, 'treatment2': .05, 'treatment3': .1}) Positive treatment effect created by the positive uplift features on the base classification label. Dictionary of {treatment_key: increase_delta}. delta_uplift_decrease_dict: dictionary, optional (default: {'treatment1': 0., 'treatment2': 0., 'treatment3': 0.}) Negative treatment effect created by the negative uplift features on the base classification label. Dictionary of {treatment_key: increase_delta}. n_uplift_increase_mix_informative_dict: dictionary, optional (default: {'treatment1': 1, 'treatment2': 1, 'treatment3': 1}) Number of positive mix features for each treatment. The positive mix feature is defined as a linear combination of a randomly selected informative classification feature and a randomly selected positive uplift feature. The linear combination is made by two coefficients sampled from a uniform distribution between -1 and 1. n_uplift_decrease_mix_informative_dict: dictionary, optional (default: {'treatment1': 0, 'treatment2': 0, 'treatment3': 0}) Number of negative mix features for each treatment. The negative mix feature is defined as a linear combination of a randomly selected informative classification feature and a randomly selected negative uplift feature. The linear combination is made by two coefficients sampled from a uniform distribution between -1 and 1. positive_class_proportion: float, optional (default = 0.5) The proportion of positive label (1) in the control group. random_seed : int, optional (default = 20190101) The random seed to be used in the data generation process. Returns ------- df_res : DataFrame A data frame containing the treatment label, features, and outcome variable. x_name : list The list of feature names generated. Notes ----- The algorithm for generating the base classification dataset is adapted from the make_classification method in the sklearn package, that uses the algorithm in Guyon [1] designed to generate the "Madelon" dataset. References ---------- .. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable selection benchmark", 2003. """ # set seed np.random.seed(seed=random_seed) # create data frame df_res = pd.DataFrame() # generate treatment key n_all = n_samples * len(treatment_name) treatment_list = [] for ti in treatment_name: treatment_list += [ti] * n_samples treatment_list = np.random.permutation(treatment_list) df_res['treatment_group_key'] = treatment_list # generate features and labels X1, Y1 = make_classification(n_samples=n_all, n_features=n_classification_features, n_informative=n_classification_informative, n_redundant=n_classification_redundant, n_repeated=n_classification_repeated, n_clusters_per_class=1, weights=[1-positive_class_proportion, positive_class_proportion]) x_name = [] x_informative_name = [] for xi in range(n_classification_informative): x_name_i = 'x' + str(len(x_name)+1) + '_informative' x_name.append(x_name_i) x_informative_name.append(x_name_i) df_res[x_name_i] = X1[:, xi] for xi in range(n_classification_redundant): x_name_i = 'x' + str(len(x_name)+1) + '_redundant' x_name.append(x_name_i) df_res[x_name_i] = X1[:, n_classification_informative+xi] for xi in range(n_classification_repeated): x_name_i = 'x' + str(len(x_name)+1) + '_repeated' x_name.append(x_name_i) df_res[x_name_i] = X1[:, n_classification_informative+n_classification_redundant+xi] for xi in range(n_classification_features - n_classification_informative - n_classification_redundant - n_classification_repeated): x_name_i = 'x' + str(len(x_name)+1) + '_irrelevant' x_name.append(x_name_i) df_res[x_name_i] = np.random.normal(0, 1, n_all) # default treatment effects Y = Y1.copy() Y_increase = np.zeros_like(Y1) Y_decrease = np.zeros_like(Y1) # generate uplift (positive) for treatment_key_i in treatment_name: treatment_index = df_res.index[df_res['treatment_group_key'] == treatment_key_i].tolist() if treatment_key_i in n_uplift_increase_dict and n_uplift_increase_dict[treatment_key_i] > 0: x_uplift_increase_name = [] adjust_class_proportion = (delta_uplift_increase_dict[treatment_key_i]) / (1-positive_class_proportion) X_increase, Y_increase = make_classification(n_samples=n_all, n_features=n_uplift_increase_dict[treatment_key_i], n_informative=n_uplift_increase_dict[treatment_key_i], n_redundant=0, n_clusters_per_class=1, weights=[1-adjust_class_proportion, adjust_class_proportion]) for xi in range(n_uplift_increase_dict[treatment_key_i]): x_name_i = 'x' + str(len(x_name)+1) + '_uplift_increase' x_name.append(x_name_i) x_uplift_increase_name.append(x_name_i) df_res[x_name_i] = X_increase[:, xi] Y[treatment_index] = Y[treatment_index] + Y_increase[treatment_index] if n_uplift_increase_mix_informative_dict[treatment_key_i] > 0: for xi in range(n_uplift_increase_mix_informative_dict[treatment_key_i]): x_name_i = 'x' + str(len(x_name)+1) + '_increase_mix' x_name.append(x_name_i) df_res[x_name_i] = (np.random.uniform(-1, 1) * df_res[np.random.choice(x_informative_name)] + np.random.uniform(-1, 1) * df_res[np.random.choice(x_uplift_increase_name)]) # generate uplift (negative) for treatment_key_i in treatment_name: treatment_index = df_res.index[df_res['treatment_group_key'] == treatment_key_i].tolist() if treatment_key_i in n_uplift_decrease_dict and n_uplift_decrease_dict[treatment_key_i] > 0: x_uplift_decrease_name = [] adjust_class_proportion = (delta_uplift_decrease_dict[treatment_key_i]) / (1-positive_class_proportion) X_decrease, Y_decrease = make_classification(n_samples=n_all, n_features=n_uplift_decrease_dict[treatment_key_i], n_informative=n_uplift_decrease_dict[treatment_key_i], n_redundant=0, n_clusters_per_class=1, weights=[1-adjust_class_proportion, adjust_class_proportion]) for xi in range(n_uplift_decrease_dict[treatment_key_i]): x_name_i = 'x' + str(len(x_name)+1) + '_uplift_decrease' x_name.append(x_name_i) x_uplift_decrease_name.append(x_name_i) df_res[x_name_i] = X_decrease[:, xi] Y[treatment_index] = Y[treatment_index] - Y_decrease[treatment_index] if n_uplift_decrease_mix_informative_dict[treatment_key_i] > 0: for xi in range(n_uplift_decrease_mix_informative_dict[treatment_key_i]): x_name_i = 'x' + str(len(x_name)+1) + '_decrease_mix' x_name.append(x_name_i) df_res[x_name_i] = (np.random.uniform(-1, 1) * df_res[np.random.choice(x_informative_name)] + np.random.uniform(-1, 1) * df_res[np.random.choice(x_uplift_decrease_name)]) # truncate Y Y = np.clip(Y, 0, 1) df_res[y_name] = Y df_res['treatment_effect'] = Y - Y1 return df_res, x_name
[ "def", "make_uplift_classification", "(", "n_samples", "=", "1000", ",", "treatment_name", "=", "[", "'control'", ",", "'treatment1'", ",", "'treatment2'", ",", "'treatment3'", "]", ",", "y_name", "=", "'conversion'", ",", "n_classification_features", "=", "10", ",", "n_classification_informative", "=", "5", ",", "n_classification_redundant", "=", "0", ",", "n_classification_repeated", "=", "0", ",", "n_uplift_increase_dict", "=", "{", "'treatment1'", ":", "2", ",", "'treatment2'", ":", "2", ",", "'treatment3'", ":", "2", "}", ",", "n_uplift_decrease_dict", "=", "{", "'treatment1'", ":", "0", ",", "'treatment2'", ":", "0", ",", "'treatment3'", ":", "0", "}", ",", "delta_uplift_increase_dict", "=", "{", "'treatment1'", ":", "0.02", ",", "'treatment2'", ":", "0.05", ",", "'treatment3'", ":", "0.1", "}", ",", "delta_uplift_decrease_dict", "=", "{", "'treatment1'", ":", "0.", ",", "'treatment2'", ":", "0.", ",", "'treatment3'", ":", "0.", "}", ",", "n_uplift_increase_mix_informative_dict", "=", "{", "'treatment1'", ":", "1", ",", "'treatment2'", ":", "1", ",", "'treatment3'", ":", "1", "}", ",", "n_uplift_decrease_mix_informative_dict", "=", "{", "'treatment1'", ":", "0", ",", "'treatment2'", ":", "0", ",", "'treatment3'", ":", "0", "}", ",", "positive_class_proportion", "=", "0.5", ",", "random_seed", "=", "20190101", ")", ":", "# set seed", "np", ".", "random", ".", "seed", "(", "seed", "=", "random_seed", ")", "# create data frame", "df_res", "=", "pd", ".", "DataFrame", "(", ")", "# generate treatment key", "n_all", "=", "n_samples", "*", "len", "(", "treatment_name", ")", "treatment_list", "=", "[", "]", "for", "ti", "in", "treatment_name", ":", "treatment_list", "+=", "[", "ti", "]", "*", "n_samples", "treatment_list", "=", "np", ".", "random", ".", "permutation", "(", "treatment_list", ")", "df_res", "[", "'treatment_group_key'", "]", "=", "treatment_list", "# generate features and labels", "X1", ",", "Y1", "=", "make_classification", "(", "n_samples", "=", "n_all", ",", "n_features", "=", "n_classification_features", ",", "n_informative", "=", "n_classification_informative", ",", "n_redundant", "=", "n_classification_redundant", ",", "n_repeated", "=", "n_classification_repeated", ",", "n_clusters_per_class", "=", "1", ",", "weights", "=", "[", "1", "-", "positive_class_proportion", ",", "positive_class_proportion", "]", ")", "x_name", "=", "[", "]", "x_informative_name", "=", "[", "]", "for", "xi", "in", "range", "(", "n_classification_informative", ")", ":", "x_name_i", "=", "'x'", "+", "str", "(", "len", "(", "x_name", ")", "+", "1", ")", "+", "'_informative'", "x_name", ".", "append", "(", "x_name_i", ")", "x_informative_name", ".", "append", "(", "x_name_i", ")", "df_res", "[", "x_name_i", "]", "=", "X1", "[", ":", ",", "xi", "]", "for", "xi", "in", "range", "(", "n_classification_redundant", ")", ":", "x_name_i", "=", "'x'", "+", "str", "(", "len", "(", "x_name", ")", "+", "1", ")", "+", "'_redundant'", "x_name", ".", "append", "(", "x_name_i", ")", "df_res", "[", "x_name_i", "]", "=", "X1", "[", ":", ",", "n_classification_informative", "+", "xi", "]", "for", "xi", "in", "range", "(", "n_classification_repeated", ")", ":", "x_name_i", "=", "'x'", "+", "str", "(", "len", "(", "x_name", ")", "+", "1", ")", "+", "'_repeated'", "x_name", ".", "append", "(", "x_name_i", ")", "df_res", "[", "x_name_i", "]", "=", "X1", "[", ":", ",", "n_classification_informative", "+", "n_classification_redundant", "+", "xi", "]", "for", "xi", "in", "range", "(", "n_classification_features", "-", "n_classification_informative", "-", "n_classification_redundant", "-", "n_classification_repeated", ")", ":", "x_name_i", "=", "'x'", "+", "str", "(", "len", "(", "x_name", ")", "+", "1", ")", "+", "'_irrelevant'", "x_name", ".", "append", "(", "x_name_i", ")", "df_res", "[", "x_name_i", "]", "=", "np", ".", "random", ".", "normal", "(", "0", ",", "1", ",", "n_all", ")", "# default treatment effects", "Y", "=", "Y1", ".", "copy", "(", ")", "Y_increase", "=", "np", ".", "zeros_like", "(", "Y1", ")", "Y_decrease", "=", "np", ".", "zeros_like", "(", "Y1", ")", "# generate uplift (positive)", "for", "treatment_key_i", "in", "treatment_name", ":", "treatment_index", "=", "df_res", ".", "index", "[", "df_res", "[", "'treatment_group_key'", "]", "==", "treatment_key_i", "]", ".", "tolist", "(", ")", "if", "treatment_key_i", "in", "n_uplift_increase_dict", "and", "n_uplift_increase_dict", "[", "treatment_key_i", "]", ">", "0", ":", "x_uplift_increase_name", "=", "[", "]", "adjust_class_proportion", "=", "(", "delta_uplift_increase_dict", "[", "treatment_key_i", "]", ")", "/", "(", "1", "-", "positive_class_proportion", ")", "X_increase", ",", "Y_increase", "=", "make_classification", "(", "n_samples", "=", "n_all", ",", "n_features", "=", "n_uplift_increase_dict", "[", "treatment_key_i", "]", ",", "n_informative", "=", "n_uplift_increase_dict", "[", "treatment_key_i", "]", ",", "n_redundant", "=", "0", ",", "n_clusters_per_class", "=", "1", ",", "weights", "=", "[", "1", "-", "adjust_class_proportion", ",", "adjust_class_proportion", "]", ")", "for", "xi", "in", "range", "(", "n_uplift_increase_dict", "[", "treatment_key_i", "]", ")", ":", "x_name_i", "=", "'x'", "+", "str", "(", "len", "(", "x_name", ")", "+", "1", ")", "+", "'_uplift_increase'", "x_name", ".", "append", "(", "x_name_i", ")", "x_uplift_increase_name", ".", "append", "(", "x_name_i", ")", "df_res", "[", "x_name_i", "]", "=", "X_increase", "[", ":", ",", "xi", "]", "Y", "[", "treatment_index", "]", "=", "Y", "[", "treatment_index", "]", "+", "Y_increase", "[", "treatment_index", "]", "if", "n_uplift_increase_mix_informative_dict", "[", "treatment_key_i", "]", ">", "0", ":", "for", "xi", "in", "range", "(", "n_uplift_increase_mix_informative_dict", "[", "treatment_key_i", "]", ")", ":", "x_name_i", "=", "'x'", "+", "str", "(", "len", "(", "x_name", ")", "+", "1", ")", "+", "'_increase_mix'", "x_name", ".", "append", "(", "x_name_i", ")", "df_res", "[", "x_name_i", "]", "=", "(", "np", ".", "random", ".", "uniform", "(", "-", "1", ",", "1", ")", "*", "df_res", "[", "np", ".", "random", ".", "choice", "(", "x_informative_name", ")", "]", "+", "np", ".", "random", ".", "uniform", "(", "-", "1", ",", "1", ")", "*", "df_res", "[", "np", ".", "random", ".", "choice", "(", "x_uplift_increase_name", ")", "]", ")", "# generate uplift (negative)", "for", "treatment_key_i", "in", "treatment_name", ":", "treatment_index", "=", "df_res", ".", "index", "[", "df_res", "[", "'treatment_group_key'", "]", "==", "treatment_key_i", "]", ".", "tolist", "(", ")", "if", "treatment_key_i", "in", "n_uplift_decrease_dict", "and", "n_uplift_decrease_dict", "[", "treatment_key_i", "]", ">", "0", ":", "x_uplift_decrease_name", "=", "[", "]", "adjust_class_proportion", "=", "(", "delta_uplift_decrease_dict", "[", "treatment_key_i", "]", ")", "/", "(", "1", "-", "positive_class_proportion", ")", "X_decrease", ",", "Y_decrease", "=", "make_classification", "(", "n_samples", "=", "n_all", ",", "n_features", "=", "n_uplift_decrease_dict", "[", "treatment_key_i", "]", ",", "n_informative", "=", "n_uplift_decrease_dict", "[", "treatment_key_i", "]", ",", "n_redundant", "=", "0", ",", "n_clusters_per_class", "=", "1", ",", "weights", "=", "[", "1", "-", "adjust_class_proportion", ",", "adjust_class_proportion", "]", ")", "for", "xi", "in", "range", "(", "n_uplift_decrease_dict", "[", "treatment_key_i", "]", ")", ":", "x_name_i", "=", "'x'", "+", "str", "(", "len", "(", "x_name", ")", "+", "1", ")", "+", "'_uplift_decrease'", "x_name", ".", "append", "(", "x_name_i", ")", "x_uplift_decrease_name", ".", "append", "(", "x_name_i", ")", "df_res", "[", "x_name_i", "]", "=", "X_decrease", "[", ":", ",", "xi", "]", "Y", "[", "treatment_index", "]", "=", "Y", "[", "treatment_index", "]", "-", "Y_decrease", "[", "treatment_index", "]", "if", "n_uplift_decrease_mix_informative_dict", "[", "treatment_key_i", "]", ">", "0", ":", "for", "xi", "in", "range", "(", "n_uplift_decrease_mix_informative_dict", "[", "treatment_key_i", "]", ")", ":", "x_name_i", "=", "'x'", "+", "str", "(", "len", "(", "x_name", ")", "+", "1", ")", "+", "'_decrease_mix'", "x_name", ".", "append", "(", "x_name_i", ")", "df_res", "[", "x_name_i", "]", "=", "(", "np", ".", "random", ".", "uniform", "(", "-", "1", ",", "1", ")", "*", "df_res", "[", "np", ".", "random", ".", "choice", "(", "x_informative_name", ")", "]", "+", "np", ".", "random", ".", "uniform", "(", "-", "1", ",", "1", ")", "*", "df_res", "[", "np", ".", "random", ".", "choice", "(", "x_uplift_decrease_name", ")", "]", ")", "# truncate Y", "Y", "=", "np", ".", "clip", "(", "Y", ",", "0", ",", "1", ")", "df_res", "[", "y_name", "]", "=", "Y", "df_res", "[", "'treatment_effect'", "]", "=", "Y", "-", "Y1", "return", "df_res", ",", "x_name" ]
[ 5, 0 ]
[ 182, 25 ]
python
en
['en', 'en', 'en']
True
DiagResult.include_element
(self, key: str)
Note that a particular key is something relevant to this result -- e.g. 'oh, the key foo-mapping.1 is active here'. One problem here is that we don't currently cycle over to make sure that all the requisite higher-level objects are brought in when we mark an element active. This needs fixing. :param key: the key we want to remember as being active.
Note that a particular key is something relevant to this result -- e.g. 'oh, the key foo-mapping.1 is active here'.
def include_element(self, key: str) -> None: """ Note that a particular key is something relevant to this result -- e.g. 'oh, the key foo-mapping.1 is active here'. One problem here is that we don't currently cycle over to make sure that all the requisite higher-level objects are brought in when we mark an element active. This needs fixing. :param key: the key we want to remember as being active. """ self.element_keys[key] = True
[ "def", "include_element", "(", "self", ",", "key", ":", "str", ")", "->", "None", ":", "self", ".", "element_keys", "[", "key", "]", "=", "True" ]
[ 144, 4 ]
[ 155, 37 ]
python
en
['en', 'error', 'th']
False
DiagResult.include_referenced_elements
(self, obj: dict)
Include all of the elements in the given object's _referenced_by array. :param obj: object for which to include referencing keys
Include all of the elements in the given object's _referenced_by array.
def include_referenced_elements(self, obj: dict) -> None: """ Include all of the elements in the given object's _referenced_by array. :param obj: object for which to include referencing keys """ for element_key in obj['_referenced_by']: self.include_element(element_key)
[ "def", "include_referenced_elements", "(", "self", ",", "obj", ":", "dict", ")", "->", "None", ":", "for", "element_key", "in", "obj", "[", "'_referenced_by'", "]", ":", "self", ".", "include_element", "(", "element_key", ")" ]
[ 157, 4 ]
[ 166, 45 ]
python
en
['en', 'error', 'th']
False
DiagResult.include_cluster
(self, cluster: dict)
Note that a particular cluster and everything that references it are relevant to this result. If the cluster has related health information in our cstats, fold that in too. Don't pass an IRCluster here -- turn it into a dict with as_dict() first. Returns the DiagCluster that we actually use to hold everything. :param cluster: dictionary version of a cluster to mark as active. :return: the DiagCluster for this cluster
Note that a particular cluster and everything that references it are relevant to this result. If the cluster has related health information in our cstats, fold that in too.
def include_cluster(self, cluster: dict) -> DiagCluster: """ Note that a particular cluster and everything that references it are relevant to this result. If the cluster has related health information in our cstats, fold that in too. Don't pass an IRCluster here -- turn it into a dict with as_dict() first. Returns the DiagCluster that we actually use to hold everything. :param cluster: dictionary version of a cluster to mark as active. :return: the DiagCluster for this cluster """ c_name = cluster['name'] if c_name not in self.clusters: self.clusters[c_name] = DiagCluster(cluster) if c_name in self.cstats: self.clusters[c_name].update_health(self.cstats[c_name]) self.include_referenced_elements(cluster) return self.clusters[c_name]
[ "def", "include_cluster", "(", "self", ",", "cluster", ":", "dict", ")", "->", "DiagCluster", ":", "c_name", "=", "cluster", "[", "'name'", "]", "if", "c_name", "not", "in", "self", ".", "clusters", ":", "self", ".", "clusters", "[", "c_name", "]", "=", "DiagCluster", "(", "cluster", ")", "if", "c_name", "in", "self", ".", "cstats", ":", "self", ".", "clusters", "[", "c_name", "]", ".", "update_health", "(", "self", ".", "cstats", "[", "c_name", "]", ")", "self", ".", "include_referenced_elements", "(", "cluster", ")", "return", "self", ".", "clusters", "[", "c_name", "]" ]
[ 168, 4 ]
[ 193, 36 ]
python
en
['en', 'error', 'th']
False
DiagResult.include_httpgroup
(self, group: IRHTTPMappingGroup)
Note that a particular IRHTTPMappingGroup, all of the clusters it uses for upstream traffic, and everything that references it are relevant to this result. This method actually does a fair amount of work around handling clusters, shadow clusters, and host_redirects. It would be a horrible mistake to duplicate this elsewhere. :param group: IRHTTPMappingGroup to include
Note that a particular IRHTTPMappingGroup, all of the clusters it uses for upstream traffic, and everything that references it are relevant to this result.
def include_httpgroup(self, group: IRHTTPMappingGroup) -> None: """ Note that a particular IRHTTPMappingGroup, all of the clusters it uses for upstream traffic, and everything that references it are relevant to this result. This method actually does a fair amount of work around handling clusters, shadow clusters, and host_redirects. It would be a horrible mistake to duplicate this elsewhere. :param group: IRHTTPMappingGroup to include """ # self.logger.debug("GROUP %s" % group.as_json()) prefix = group['prefix'] if 'prefix' in group else group['regex'] rewrite = group.get('rewrite', "/") method = '*' host = None route_clusters: List[DiagCluster] = [] for mapping in group.get('mappings', []): cluster = mapping['cluster'] mapping_cluster = self.include_cluster(cluster.as_dict()) mapping_cluster.update({'weight': mapping.get('weight', 100)}) # self.logger.debug("GROUP %s CLUSTER %s %d%% (%s)" % # (group['group_id'], c_name, mapping['weight'], mapping_cluster)) route_clusters.append(mapping_cluster) host_redir = group.get('host_redirect', None) if host_redir: # XXX Stupid hackery here. redirect_cluster should be a real # IRCluster object. redirect_cluster = self.include_cluster({ 'name': host_redir['name'], 'service': host_redir['service'], 'weight': 100, 'type_label': 'redirect', '_referenced_by': [ host_redir['rkey'] ] }) route_clusters.append(redirect_cluster) self.logger.debug("host_redirect route: %s" % group) self.logger.debug("host_redirect cluster: %s" % redirect_cluster) shadows = group.get('shadows', []) for shadow in shadows: # Shadows have a real cluster object. shadow_dict = shadow['cluster'].as_dict() shadow_dict['type_label'] = 'shadow' shadow_cluster = self.include_cluster(shadow_dict) route_clusters.append(shadow_cluster) self.logger.debug("shadow route: %s" % group) self.logger.debug("shadow cluster: %s" % shadow_cluster) headers = [] for header in group.get('headers', []): hdr_name = header.get('name', None) hdr_value = header.get('value', None) if hdr_name == ':authority': host = hdr_value elif hdr_name == ':method': method = hdr_value else: headers.append(header) sep = "" if prefix.startswith("/") else "/" route_key = "%s://%s%s%s" % (self.request_scheme, host if host else self.request_host, sep, prefix) route_info = { '_route': group.as_dict(), '_source': group['location'], '_group_id': group['group_id'], 'key': route_key, 'prefix': prefix, 'rewrite': rewrite, 'method': method, 'headers': headers, 'clusters': [ x.default_missing() for x in route_clusters ], 'host': host if host else '*', } if 'precedence' in group: route_info['precedence'] = group['precedence'] metadata_labels = group.get('metadata_labels') or {} diag_class = metadata_labels.get('ambassador_diag_class') or None if diag_class: route_info['diag_class'] = diag_class self.routes.append(route_info) self.include_referenced_elements(group)
[ "def", "include_httpgroup", "(", "self", ",", "group", ":", "IRHTTPMappingGroup", ")", "->", "None", ":", "# self.logger.debug(\"GROUP %s\" % group.as_json())", "prefix", "=", "group", "[", "'prefix'", "]", "if", "'prefix'", "in", "group", "else", "group", "[", "'regex'", "]", "rewrite", "=", "group", ".", "get", "(", "'rewrite'", ",", "\"/\"", ")", "method", "=", "'*'", "host", "=", "None", "route_clusters", ":", "List", "[", "DiagCluster", "]", "=", "[", "]", "for", "mapping", "in", "group", ".", "get", "(", "'mappings'", ",", "[", "]", ")", ":", "cluster", "=", "mapping", "[", "'cluster'", "]", "mapping_cluster", "=", "self", ".", "include_cluster", "(", "cluster", ".", "as_dict", "(", ")", ")", "mapping_cluster", ".", "update", "(", "{", "'weight'", ":", "mapping", ".", "get", "(", "'weight'", ",", "100", ")", "}", ")", "# self.logger.debug(\"GROUP %s CLUSTER %s %d%% (%s)\" %", "# (group['group_id'], c_name, mapping['weight'], mapping_cluster))", "route_clusters", ".", "append", "(", "mapping_cluster", ")", "host_redir", "=", "group", ".", "get", "(", "'host_redirect'", ",", "None", ")", "if", "host_redir", ":", "# XXX Stupid hackery here. redirect_cluster should be a real", "# IRCluster object.", "redirect_cluster", "=", "self", ".", "include_cluster", "(", "{", "'name'", ":", "host_redir", "[", "'name'", "]", ",", "'service'", ":", "host_redir", "[", "'service'", "]", ",", "'weight'", ":", "100", ",", "'type_label'", ":", "'redirect'", ",", "'_referenced_by'", ":", "[", "host_redir", "[", "'rkey'", "]", "]", "}", ")", "route_clusters", ".", "append", "(", "redirect_cluster", ")", "self", ".", "logger", ".", "debug", "(", "\"host_redirect route: %s\"", "%", "group", ")", "self", ".", "logger", ".", "debug", "(", "\"host_redirect cluster: %s\"", "%", "redirect_cluster", ")", "shadows", "=", "group", ".", "get", "(", "'shadows'", ",", "[", "]", ")", "for", "shadow", "in", "shadows", ":", "# Shadows have a real cluster object.", "shadow_dict", "=", "shadow", "[", "'cluster'", "]", ".", "as_dict", "(", ")", "shadow_dict", "[", "'type_label'", "]", "=", "'shadow'", "shadow_cluster", "=", "self", ".", "include_cluster", "(", "shadow_dict", ")", "route_clusters", ".", "append", "(", "shadow_cluster", ")", "self", ".", "logger", ".", "debug", "(", "\"shadow route: %s\"", "%", "group", ")", "self", ".", "logger", ".", "debug", "(", "\"shadow cluster: %s\"", "%", "shadow_cluster", ")", "headers", "=", "[", "]", "for", "header", "in", "group", ".", "get", "(", "'headers'", ",", "[", "]", ")", ":", "hdr_name", "=", "header", ".", "get", "(", "'name'", ",", "None", ")", "hdr_value", "=", "header", ".", "get", "(", "'value'", ",", "None", ")", "if", "hdr_name", "==", "':authority'", ":", "host", "=", "hdr_value", "elif", "hdr_name", "==", "':method'", ":", "method", "=", "hdr_value", "else", ":", "headers", ".", "append", "(", "header", ")", "sep", "=", "\"\"", "if", "prefix", ".", "startswith", "(", "\"/\"", ")", "else", "\"/\"", "route_key", "=", "\"%s://%s%s%s\"", "%", "(", "self", ".", "request_scheme", ",", "host", "if", "host", "else", "self", ".", "request_host", ",", "sep", ",", "prefix", ")", "route_info", "=", "{", "'_route'", ":", "group", ".", "as_dict", "(", ")", ",", "'_source'", ":", "group", "[", "'location'", "]", ",", "'_group_id'", ":", "group", "[", "'group_id'", "]", ",", "'key'", ":", "route_key", ",", "'prefix'", ":", "prefix", ",", "'rewrite'", ":", "rewrite", ",", "'method'", ":", "method", ",", "'headers'", ":", "headers", ",", "'clusters'", ":", "[", "x", ".", "default_missing", "(", ")", "for", "x", "in", "route_clusters", "]", ",", "'host'", ":", "host", "if", "host", "else", "'*'", ",", "}", "if", "'precedence'", "in", "group", ":", "route_info", "[", "'precedence'", "]", "=", "group", "[", "'precedence'", "]", "metadata_labels", "=", "group", ".", "get", "(", "'metadata_labels'", ")", "or", "{", "}", "diag_class", "=", "metadata_labels", ".", "get", "(", "'ambassador_diag_class'", ")", "or", "None", "if", "diag_class", ":", "route_info", "[", "'diag_class'", "]", "=", "diag_class", "self", ".", "routes", ".", "append", "(", "route_info", ")", "self", ".", "include_referenced_elements", "(", "group", ")" ]
[ 195, 4 ]
[ 297, 47 ]
python
en
['en', 'error', 'th']
False
DiagResult.finalize
(self)
Make sure that all the elements we've marked as included actually appear in the ambassador_resources and envoy_resources dictionaries, so that the UI can properly connect all the dots.
Make sure that all the elements we've marked as included actually appear in the ambassador_resources and envoy_resources dictionaries, so that the UI can properly connect all the dots.
def finalize(self) -> None: """ Make sure that all the elements we've marked as included actually appear in the ambassador_resources and envoy_resources dictionaries, so that the UI can properly connect all the dots. """ for key in self.element_keys.keys(): amb_el_info = self.diag.ambassador_elements.get(key, None) if amb_el_info: serialization = amb_el_info.get('serialization', None) if serialization: self.ambassador_resources[key] = serialization # What about errors? # Also make sure we have Envoy outputs for these things. envoy_el_info = self.diag.envoy_elements.get(key, None) if envoy_el_info: self.envoy_resources[key] = envoy_el_info
[ "def", "finalize", "(", "self", ")", "->", "None", ":", "for", "key", "in", "self", ".", "element_keys", ".", "keys", "(", ")", ":", "amb_el_info", "=", "self", ".", "diag", ".", "ambassador_elements", ".", "get", "(", "key", ",", "None", ")", "if", "amb_el_info", ":", "serialization", "=", "amb_el_info", ".", "get", "(", "'serialization'", ",", "None", ")", "if", "serialization", ":", "self", ".", "ambassador_resources", "[", "key", "]", "=", "serialization", "# What about errors?", "# Also make sure we have Envoy outputs for these things.", "envoy_el_info", "=", "self", ".", "diag", ".", "envoy_elements", ".", "get", "(", "key", ",", "None", ")", "if", "envoy_el_info", ":", "self", ".", "envoy_resources", "[", "key", "]", "=", "envoy_el_info" ]
[ 299, 4 ]
[ 321, 57 ]
python
en
['en', 'error', 'th']
False
Diagnostics.add_ambassador_service
(self, svc, type_name)
Remember information about a given Ambassador-wide service (Auth, RateLimit, Tracing). :param svc: service record :param type_name: what kind of thing is this?
Remember information about a given Ambassador-wide service (Auth, RateLimit, Tracing).
def add_ambassador_service(self, svc, type_name) -> None: """ Remember information about a given Ambassador-wide service (Auth, RateLimit, Tracing). :param svc: service record :param type_name: what kind of thing is this? """ cluster = svc.cluster urls = cluster.urls svc_weight = 100.0 / len(urls) for url in urls: self.ambassador_services.append({ 'type': type_name, '_source': svc.location, 'name': url, 'cluster': cluster.envoy_name, '_service_weight': svc_weight })
[ "def", "add_ambassador_service", "(", "self", ",", "svc", ",", "type_name", ")", "->", "None", ":", "cluster", "=", "svc", ".", "cluster", "urls", "=", "cluster", ".", "urls", "svc_weight", "=", "100.0", "/", "len", "(", "urls", ")", "for", "url", "in", "urls", ":", "self", ".", "ambassador_services", ".", "append", "(", "{", "'type'", ":", "type_name", ",", "'_source'", ":", "svc", ".", "location", ",", "'name'", ":", "url", ",", "'cluster'", ":", "cluster", ".", "envoy_name", ",", "'_service_weight'", ":", "svc_weight", "}", ")" ]
[ 524, 4 ]
[ 544, 14 ]
python
en
['en', 'error', 'th']
False
Diagnostics.add_ambassador_resolver
(self, resolver, group_list)
Remember information about a given Ambassador-wide resolver. :param resolver: resolver record :param group_list: list of groups that use this resolver
Remember information about a given Ambassador-wide resolver.
def add_ambassador_resolver(self, resolver, group_list) -> None: """ Remember information about a given Ambassador-wide resolver. :param resolver: resolver record :param group_list: list of groups that use this resolver """ self.ambassador_resolvers.append({ 'kind': resolver.kind, '_source': resolver.location, 'name': resolver.name, 'groups': group_list })
[ "def", "add_ambassador_resolver", "(", "self", ",", "resolver", ",", "group_list", ")", "->", "None", ":", "self", ".", "ambassador_resolvers", ".", "append", "(", "{", "'kind'", ":", "resolver", ".", "kind", ",", "'_source'", ":", "resolver", ".", "location", ",", "'name'", ":", "resolver", ".", "name", ",", "'groups'", ":", "group_list", "}", ")" ]
[ 546, 4 ]
[ 559, 10 ]
python
en
['en', 'error', 'th']
False
Diagnostics.split_key
(key)
Split a key into its components (the base name and the object index). :param key: possibly-qualified key :return: tuple of the base and a possible index
Split a key into its components (the base name and the object index).
def split_key(key) -> Tuple[str, Optional[str]]: """ Split a key into its components (the base name and the object index). :param key: possibly-qualified key :return: tuple of the base and a possible index """ key_base = key key_index = None m = Diagnostics.reKeyIndex.search(key) if m: key_base = key[:m.start()] key_index = m.group(1) return key_base, key_index
[ "def", "split_key", "(", "key", ")", "->", "Tuple", "[", "str", ",", "Optional", "[", "str", "]", "]", ":", "key_base", "=", "key", "key_index", "=", "None", "m", "=", "Diagnostics", ".", "reKeyIndex", ".", "search", "(", "key", ")", "if", "m", ":", "key_base", "=", "key", "[", ":", "m", ".", "start", "(", ")", "]", "key_index", "=", "m", ".", "group", "(", "1", ")", "return", "key_base", ",", "key_index" ]
[ 562, 4 ]
[ 579, 34 ]
python
en
['en', 'error', 'th']
False
Diagnostics._remember_source
(self, src_key: str, dest_key: str)
Link keys of active sources together. The source map lets us answer questions like 'which objects does ambassador.yaml define?' and this is the primitive that actually populates the map. The src_key is where you start the lookup; the dest_key is something defined by the src_key. They can be the same. :param src_key: the starting key (ambassador.yaml) :param dest_key: the destination key (ambassador.yaml.1)
Link keys of active sources together. The source map lets us answer questions like 'which objects does ambassador.yaml define?' and this is the primitive that actually populates the map.
def _remember_source(self, src_key: str, dest_key: str) -> None: """ Link keys of active sources together. The source map lets us answer questions like 'which objects does ambassador.yaml define?' and this is the primitive that actually populates the map. The src_key is where you start the lookup; the dest_key is something defined by the src_key. They can be the same. :param src_key: the starting key (ambassador.yaml) :param dest_key: the destination key (ambassador.yaml.1) """ src_map = self.source_map.setdefault(src_key, {}) src_map[dest_key] = True
[ "def", "_remember_source", "(", "self", ",", "src_key", ":", "str", ",", "dest_key", ":", "str", ")", "->", "None", ":", "src_map", "=", "self", ".", "source_map", ".", "setdefault", "(", "src_key", ",", "{", "}", ")", "src_map", "[", "dest_key", "]", "=", "True" ]
[ 630, 4 ]
[ 644, 32 ]
python
en
['en', 'error', 'th']
False
Diagnostics.remember_source
(self, uqkey: str, fqkey: Optional[str], location: Optional[str], dest_key: str)
Populate the source map in various ways. A mapping from uqkey to dest_key is always added; mappings for fqkey and location are added if they are unique keys. :param uqkey: unqualified source key :param fqkey: qualified source key :param location: source location :param dest_key: key of object being defined
Populate the source map in various ways. A mapping from uqkey to dest_key is always added; mappings for fqkey and location are added if they are unique keys.
def remember_source(self, uqkey: str, fqkey: Optional[str], location: Optional[str], dest_key: str) -> None: """ Populate the source map in various ways. A mapping from uqkey to dest_key is always added; mappings for fqkey and location are added if they are unique keys. :param uqkey: unqualified source key :param fqkey: qualified source key :param location: source location :param dest_key: key of object being defined """ self._remember_source(uqkey, dest_key) if fqkey and (fqkey != uqkey): self._remember_source(fqkey, dest_key) if location and (location != uqkey) and (location != fqkey): self._remember_source(location, dest_key)
[ "def", "remember_source", "(", "self", ",", "uqkey", ":", "str", ",", "fqkey", ":", "Optional", "[", "str", "]", ",", "location", ":", "Optional", "[", "str", "]", ",", "dest_key", ":", "str", ")", "->", "None", ":", "self", ".", "_remember_source", "(", "uqkey", ",", "dest_key", ")", "if", "fqkey", "and", "(", "fqkey", "!=", "uqkey", ")", ":", "self", ".", "_remember_source", "(", "fqkey", ",", "dest_key", ")", "if", "location", "and", "(", "location", "!=", "uqkey", ")", "and", "(", "location", "!=", "fqkey", ")", ":", "self", ".", "_remember_source", "(", "location", ",", "dest_key", ")" ]
[ 646, 4 ]
[ 664, 53 ]
python
en
['en', 'error', 'th']
False
Diagnostics.overview
(self, request, estat: EnvoyStats)
Generate overview data describing the whole Ambassador setup, most notably the routing table. Returns the dictionary form of a DiagResult. :param request: the Flask request being handled :param estat: current EnvoyStats :return: the dictionary form of a DiagResult
Generate overview data describing the whole Ambassador setup, most notably the routing table. Returns the dictionary form of a DiagResult.
def overview(self, request, estat: EnvoyStats) -> Dict[str, Any]: """ Generate overview data describing the whole Ambassador setup, most notably the routing table. Returns the dictionary form of a DiagResult. :param request: the Flask request being handled :param estat: current EnvoyStats :return: the dictionary form of a DiagResult """ result = DiagResult(self, estat, request) for group in self.ir.ordered_groups(): # TCPMappings are currently handled elsewhere. if isinstance(group, IRHTTPMappingGroup): result.include_httpgroup(group) return result.as_dict()
[ "def", "overview", "(", "self", ",", "request", ",", "estat", ":", "EnvoyStats", ")", "->", "Dict", "[", "str", ",", "Any", "]", ":", "result", "=", "DiagResult", "(", "self", ",", "estat", ",", "request", ")", "for", "group", "in", "self", ".", "ir", ".", "ordered_groups", "(", ")", ":", "# TCPMappings are currently handled elsewhere.", "if", "isinstance", "(", "group", ",", "IRHTTPMappingGroup", ")", ":", "result", ".", "include_httpgroup", "(", "group", ")", "return", "result", ".", "as_dict", "(", ")" ]
[ 666, 4 ]
[ 683, 31 ]
python
en
['en', 'error', 'th']
False
Diagnostics.lookup
(self, request, key: str, estat: EnvoyStats)
Generate data describing a specific key in the Ambassador setup, and all the things connected to it. Returns the dictionary form of a DiagResult. 'key' can be a group key that starts with grp-, a cluster key that starts with cluster_, or a source key. :param request: the Flask request being handled :param key: the key of the thing we want :param estat: current EnvoyStats :return: the dictionary form of a DiagResult
Generate data describing a specific key in the Ambassador setup, and all the things connected to it. Returns the dictionary form of a DiagResult.
def lookup(self, request, key: str, estat: EnvoyStats) -> Optional[Dict[str, Any]]: """ Generate data describing a specific key in the Ambassador setup, and all the things connected to it. Returns the dictionary form of a DiagResult. 'key' can be a group key that starts with grp-, a cluster key that starts with cluster_, or a source key. :param request: the Flask request being handled :param key: the key of the thing we want :param estat: current EnvoyStats :return: the dictionary form of a DiagResult """ result = DiagResult(self, estat, request) # Typically we'll get handed a group identifier here, but we might get # other stuff too, and we have to look for all of it. found: bool = False if key in self.groups: # Yup, group ID. group = self.groups[key] # TCPMappings are currently handled elsewhere. if isinstance(group, IRHTTPMappingGroup): result.include_httpgroup(group) found = True elif key in self.clusters: result.include_cluster(self.clusters[key].as_dict()) found = True elif key in self.source_map: # The source_map is set up like: # # "mapping-qotm.yaml": { # "mapping-qotm.yaml.1": true, # "mapping-qotm.yaml.2": true, # "mapping-qotm.yaml.3": true # } # # so for whatever we found, we need to tell the result to # include every element in the keys of the dict stored for # our key. for subkey in self.source_map[key].keys(): result.include_element(subkey) # Not a typo. Set found here, in case somehow we land on # a key with no subkeys (which should be impossible, but, # y'know). found = True if found: result.finalize() return result.as_dict() else: return None
[ "def", "lookup", "(", "self", ",", "request", ",", "key", ":", "str", ",", "estat", ":", "EnvoyStats", ")", "->", "Optional", "[", "Dict", "[", "str", ",", "Any", "]", "]", ":", "result", "=", "DiagResult", "(", "self", ",", "estat", ",", "request", ")", "# Typically we'll get handed a group identifier here, but we might get", "# other stuff too, and we have to look for all of it.", "found", ":", "bool", "=", "False", "if", "key", "in", "self", ".", "groups", ":", "# Yup, group ID.", "group", "=", "self", ".", "groups", "[", "key", "]", "# TCPMappings are currently handled elsewhere.", "if", "isinstance", "(", "group", ",", "IRHTTPMappingGroup", ")", ":", "result", ".", "include_httpgroup", "(", "group", ")", "found", "=", "True", "elif", "key", "in", "self", ".", "clusters", ":", "result", ".", "include_cluster", "(", "self", ".", "clusters", "[", "key", "]", ".", "as_dict", "(", ")", ")", "found", "=", "True", "elif", "key", "in", "self", ".", "source_map", ":", "# The source_map is set up like:", "#", "# \"mapping-qotm.yaml\": {", "# \"mapping-qotm.yaml.1\": true,", "# \"mapping-qotm.yaml.2\": true,", "# \"mapping-qotm.yaml.3\": true", "# }", "#", "# so for whatever we found, we need to tell the result to", "# include every element in the keys of the dict stored for", "# our key.", "for", "subkey", "in", "self", ".", "source_map", "[", "key", "]", ".", "keys", "(", ")", ":", "result", ".", "include_element", "(", "subkey", ")", "# Not a typo. Set found here, in case somehow we land on", "# a key with no subkeys (which should be impossible, but,", "# y'know).", "found", "=", "True", "if", "found", ":", "result", ".", "finalize", "(", ")", "return", "result", ".", "as_dict", "(", ")", "else", ":", "return", "None" ]
[ 685, 4 ]
[ 741, 23 ]
python
en
['en', 'error', 'th']
False
AbstractDisplay.redirect_display
(self, on)
on: * True -> set $DISPLAY to virtual screen * False -> set $DISPLAY to original screen :param on: bool
on: * True -> set $DISPLAY to virtual screen * False -> set $DISPLAY to original screen
def redirect_display(self, on): ''' on: * True -> set $DISPLAY to virtual screen * False -> set $DISPLAY to original screen :param on: bool ''' d = self.new_display_var if on else self.old_display_var if d is None: del os.environ['DISPLAY'] else: os.environ['DISPLAY'] = d
[ "def", "redirect_display", "(", "self", ",", "on", ")", ":", "d", "=", "self", ".", "new_display_var", "if", "on", "else", "self", ".", "old_display_var", "if", "d", "is", "None", ":", "del", "os", ".", "environ", "[", "'DISPLAY'", "]", "else", ":", "os", ".", "environ", "[", "'DISPLAY'", "]", "=", "d" ]
[ 59, 4 ]
[ 71, 37 ]
python
en
['en', 'error', 'th']
False
AbstractDisplay.start
(self)
start display :rtype: self
start display
def start(self): ''' start display :rtype: self ''' if self.use_xauth: self._setup_xauth() EasyProcess.start(self) # https://github.com/ponty/PyVirtualDisplay/issues/2 # https://github.com/ponty/PyVirtualDisplay/issues/14 self.old_display_var = os.environ.get('DISPLAY', None) self.redirect_display(True) # wait until X server is active # TODO: better method time.sleep(0.1) return self
[ "def", "start", "(", "self", ")", ":", "if", "self", ".", "use_xauth", ":", "self", ".", "_setup_xauth", "(", ")", "EasyProcess", ".", "start", "(", "self", ")", "# https://github.com/ponty/PyVirtualDisplay/issues/2", "# https://github.com/ponty/PyVirtualDisplay/issues/14", "self", ".", "old_display_var", "=", "os", ".", "environ", ".", "get", "(", "'DISPLAY'", ",", "None", ")", "self", ".", "redirect_display", "(", "True", ")", "# wait until X server is active", "# TODO: better method", "time", ".", "sleep", "(", "0.1", ")", "return", "self" ]
[ 73, 4 ]
[ 91, 19 ]
python
en
['en', 'error', 'th']
False
AbstractDisplay.stop
(self)
stop display :rtype: self
stop display
def stop(self): ''' stop display :rtype: self ''' self.redirect_display(False) EasyProcess.stop(self) if self.use_xauth: self._clear_xauth() return self
[ "def", "stop", "(", "self", ")", ":", "self", ".", "redirect_display", "(", "False", ")", "EasyProcess", ".", "stop", "(", "self", ")", "if", "self", ".", "use_xauth", ":", "self", ".", "_clear_xauth", "(", ")", "return", "self" ]
[ 93, 4 ]
[ 103, 19 ]
python
en
['en', 'error', 'th']
False
AbstractDisplay._setup_xauth
(self)
Set up the Xauthority file and the XAUTHORITY environment variable.
Set up the Xauthority file and the XAUTHORITY environment variable.
def _setup_xauth(self): ''' Set up the Xauthority file and the XAUTHORITY environment variable. ''' handle, filename = tempfile.mkstemp(prefix='PyVirtualDisplay.', suffix='.Xauthority') self._xauth_filename = filename os.close(handle) # Save old environment self._old_xauth = {} self._old_xauth['AUTHFILE'] = os.getenv('AUTHFILE') self._old_xauth['XAUTHORITY'] = os.getenv('XAUTHORITY') os.environ['AUTHFILE'] = os.environ['XAUTHORITY'] = filename cookie = xauth.generate_mcookie() xauth.call('add', self.new_display_var, '.', cookie)
[ "def", "_setup_xauth", "(", "self", ")", ":", "handle", ",", "filename", "=", "tempfile", ".", "mkstemp", "(", "prefix", "=", "'PyVirtualDisplay.'", ",", "suffix", "=", "'.Xauthority'", ")", "self", ".", "_xauth_filename", "=", "filename", "os", ".", "close", "(", "handle", ")", "# Save old environment", "self", ".", "_old_xauth", "=", "{", "}", "self", ".", "_old_xauth", "[", "'AUTHFILE'", "]", "=", "os", ".", "getenv", "(", "'AUTHFILE'", ")", "self", ".", "_old_xauth", "[", "'XAUTHORITY'", "]", "=", "os", ".", "getenv", "(", "'XAUTHORITY'", ")", "os", ".", "environ", "[", "'AUTHFILE'", "]", "=", "os", ".", "environ", "[", "'XAUTHORITY'", "]", "=", "filename", "cookie", "=", "xauth", ".", "generate_mcookie", "(", ")", "xauth", ".", "call", "(", "'add'", ",", "self", ".", "new_display_var", ",", "'.'", ",", "cookie", ")" ]
[ 105, 4 ]
[ 120, 60 ]
python
en
['en', 'error', 'th']
False
AbstractDisplay._clear_xauth
(self)
Clear the Xauthority file and restore the environment variables.
Clear the Xauthority file and restore the environment variables.
def _clear_xauth(self): ''' Clear the Xauthority file and restore the environment variables. ''' os.remove(self._xauth_filename) for varname in ['AUTHFILE', 'XAUTHORITY']: if self._old_xauth[varname] is None: del os.environ[varname] else: os.environ[varname] = self._old_xauth[varname] self._old_xauth = None
[ "def", "_clear_xauth", "(", "self", ")", ":", "os", ".", "remove", "(", "self", ".", "_xauth_filename", ")", "for", "varname", "in", "[", "'AUTHFILE'", ",", "'XAUTHORITY'", "]", ":", "if", "self", ".", "_old_xauth", "[", "varname", "]", "is", "None", ":", "del", "os", ".", "environ", "[", "varname", "]", "else", ":", "os", ".", "environ", "[", "varname", "]", "=", "self", ".", "_old_xauth", "[", "varname", "]", "self", ".", "_old_xauth", "=", "None" ]
[ 122, 4 ]
[ 132, 30 ]
python
en
['en', 'error', 'th']
False
send_opsgenie_alert
(query, suite_name, settings)
Creates an alert in Opsgenie.
Creates an alert in Opsgenie.
def send_opsgenie_alert(query, suite_name, settings): """Creates an alert in Opsgenie.""" if settings["region"] != None: url = "https://api.{region}.opsgenie.com/v2/alerts".format( region=settings["region"] ) # accommodate for Europeans else: url = "https://api.opsgenie.com/v2/alerts" headers = { "Authorization": "GenieKey {api_key}".format(api_key=settings["api_key"]) } payload = { "message": "Great Expectations suite {suite_name} failed".format( suite_name=suite_name ), "description": query, "priority": settings["priority"], # allow this to be modified in settings } session = requests.Session() try: response = session.post(url, headers=headers, json=payload) except requests.ConnectionError: logger.warning("Failed to connect to Opsgenie") except Exception as e: logger.error(str(e)) else: if response.status_code != 202: logger.warning( "Request to Opsgenie API at {url} " "returned error {status_code}: {text}".format( url=url, status_code=response.status_code, text=response.text, ) ) else: return "success" return "error"
[ "def", "send_opsgenie_alert", "(", "query", ",", "suite_name", ",", "settings", ")", ":", "if", "settings", "[", "\"region\"", "]", "!=", "None", ":", "url", "=", "\"https://api.{region}.opsgenie.com/v2/alerts\"", ".", "format", "(", "region", "=", "settings", "[", "\"region\"", "]", ")", "# accommodate for Europeans", "else", ":", "url", "=", "\"https://api.opsgenie.com/v2/alerts\"", "headers", "=", "{", "\"Authorization\"", ":", "\"GenieKey {api_key}\"", ".", "format", "(", "api_key", "=", "settings", "[", "\"api_key\"", "]", ")", "}", "payload", "=", "{", "\"message\"", ":", "\"Great Expectations suite {suite_name} failed\"", ".", "format", "(", "suite_name", "=", "suite_name", ")", ",", "\"description\"", ":", "query", ",", "\"priority\"", ":", "settings", "[", "\"priority\"", "]", ",", "# allow this to be modified in settings", "}", "session", "=", "requests", ".", "Session", "(", ")", "try", ":", "response", "=", "session", ".", "post", "(", "url", ",", "headers", "=", "headers", ",", "json", "=", "payload", ")", "except", "requests", ".", "ConnectionError", ":", "logger", ".", "warning", "(", "\"Failed to connect to Opsgenie\"", ")", "except", "Exception", "as", "e", ":", "logger", ".", "error", "(", "str", "(", "e", ")", ")", "else", ":", "if", "response", ".", "status_code", "!=", "202", ":", "logger", ".", "warning", "(", "\"Request to Opsgenie API at {url} \"", "\"returned error {status_code}: {text}\"", ".", "format", "(", "url", "=", "url", ",", "status_code", "=", "response", ".", "status_code", ",", "text", "=", "response", ".", "text", ",", ")", ")", "else", ":", "return", "\"success\"", "return", "\"error\"" ]
[ 44, 0 ]
[ 84, 18 ]
python
en
['en', 'de', 'en']
True
test_database_evaluation_parameter_store_store_backend_id
(in_memory_param_store)
What does this test and why? A Store should be able to report it's store_backend_id which is set when the StoreBackend is instantiated.
What does this test and why? A Store should be able to report it's store_backend_id which is set when the StoreBackend is instantiated.
def test_database_evaluation_parameter_store_store_backend_id(in_memory_param_store): """ What does this test and why? A Store should be able to report it's store_backend_id which is set when the StoreBackend is instantiated. """ # Check that store_backend_id exists can be read assert in_memory_param_store.store_backend_id is not None # Check that store_backend_id is a valid UUID assert test_utils.validate_uuid4(in_memory_param_store.store_backend_id)
[ "def", "test_database_evaluation_parameter_store_store_backend_id", "(", "in_memory_param_store", ")", ":", "# Check that store_backend_id exists can be read", "assert", "in_memory_param_store", ".", "store_backend_id", "is", "not", "None", "# Check that store_backend_id is a valid UUID", "assert", "test_utils", ".", "validate_uuid4", "(", "in_memory_param_store", ".", "store_backend_id", ")" ]
[ 186, 0 ]
[ 195, 76 ]
python
en
['en', 'error', 'th']
False
ParameterBuilder.__init__
( self, parameter_name: str, data_context: Optional[DataContext] = None, batch_request: Optional[Union[dict, str]] = None, )
The ParameterBuilder will build parameters for the active domain from the rule. Args: parameter_name: the name of this parameter -- this is user-specified parameter name (from configuration); it is not the fully-qualified parameter name; a fully-qualified parameter name must start with "$parameter." and may contain one or more subsequent parts (e.g., "$parameter.<my_param_from_config>.<metric_name>"). data_context: DataContext batch_request: specified in ParameterBuilder configuration to get Batch objects for parameter computation.
The ParameterBuilder will build parameters for the active domain from the rule.
def __init__( self, parameter_name: str, data_context: Optional[DataContext] = None, batch_request: Optional[Union[dict, str]] = None, ): """ The ParameterBuilder will build parameters for the active domain from the rule. Args: parameter_name: the name of this parameter -- this is user-specified parameter name (from configuration); it is not the fully-qualified parameter name; a fully-qualified parameter name must start with "$parameter." and may contain one or more subsequent parts (e.g., "$parameter.<my_param_from_config>.<metric_name>"). data_context: DataContext batch_request: specified in ParameterBuilder configuration to get Batch objects for parameter computation. """ self._parameter_name = parameter_name self._data_context = data_context self._batch_request = batch_request
[ "def", "__init__", "(", "self", ",", "parameter_name", ":", "str", ",", "data_context", ":", "Optional", "[", "DataContext", "]", "=", "None", ",", "batch_request", ":", "Optional", "[", "Union", "[", "dict", ",", "str", "]", "]", "=", "None", ",", ")", ":", "self", ".", "_parameter_name", "=", "parameter_name", "self", ".", "_data_context", "=", "data_context", "self", ".", "_batch_request", "=", "batch_request" ]
[ 44, 4 ]
[ 63, 43 ]
python
en
['en', 'error', 'th']
False
bygroups
(*args)
Callback that yields multiple actions for each group in the match.
Callback that yields multiple actions for each group in the match.
def bygroups(*args): """ Callback that yields multiple actions for each group in the match. """ def callback(lexer, match, ctx=None): for i, action in enumerate(args): if action is None: continue elif type(action) is _TokenType: data = match.group(i + 1) if data: yield match.start(i + 1), action, data else: data = match.group(i + 1) if data is not None: if ctx: ctx.pos = match.start(i + 1) for item in action( lexer, _PseudoMatch(match.start(i + 1), data), ctx): if item: yield item if ctx: ctx.pos = match.end() return callback
[ "def", "bygroups", "(", "*", "args", ")", ":", "def", "callback", "(", "lexer", ",", "match", ",", "ctx", "=", "None", ")", ":", "for", "i", ",", "action", "in", "enumerate", "(", "args", ")", ":", "if", "action", "is", "None", ":", "continue", "elif", "type", "(", "action", ")", "is", "_TokenType", ":", "data", "=", "match", ".", "group", "(", "i", "+", "1", ")", "if", "data", ":", "yield", "match", ".", "start", "(", "i", "+", "1", ")", ",", "action", ",", "data", "else", ":", "data", "=", "match", ".", "group", "(", "i", "+", "1", ")", "if", "data", "is", "not", "None", ":", "if", "ctx", ":", "ctx", ".", "pos", "=", "match", ".", "start", "(", "i", "+", "1", ")", "for", "item", "in", "action", "(", "lexer", ",", "_PseudoMatch", "(", "match", ".", "start", "(", "i", "+", "1", ")", ",", "data", ")", ",", "ctx", ")", ":", "if", "item", ":", "yield", "item", "if", "ctx", ":", "ctx", ".", "pos", "=", "match", ".", "end", "(", ")", "return", "callback" ]
[ 304, 0 ]
[ 327, 19 ]
python
en
['en', 'error', 'th']
False
using
(_other, **kwargs)
Callback that processes the match with a different lexer. The keyword arguments are forwarded to the lexer, except `state` which is handled separately. `state` specifies the state that the new lexer will start in, and can be an enumerable such as ('root', 'inline', 'string') or a simple string which is assumed to be on top of the root state. Note: For that to work, `_other` must not be an `ExtendedRegexLexer`.
Callback that processes the match with a different lexer.
def using(_other, **kwargs): """ Callback that processes the match with a different lexer. The keyword arguments are forwarded to the lexer, except `state` which is handled separately. `state` specifies the state that the new lexer will start in, and can be an enumerable such as ('root', 'inline', 'string') or a simple string which is assumed to be on top of the root state. Note: For that to work, `_other` must not be an `ExtendedRegexLexer`. """ gt_kwargs = {} if 'state' in kwargs: s = kwargs.pop('state') if isinstance(s, (list, tuple)): gt_kwargs['stack'] = s else: gt_kwargs['stack'] = ('root', s) if _other is this: def callback(lexer, match, ctx=None): # if keyword arguments are given the callback # function has to create a new lexer instance if kwargs: # XXX: cache that somehow kwargs.update(lexer.options) lx = lexer.__class__(**kwargs) else: lx = lexer s = match.start() for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs): yield i + s, t, v if ctx: ctx.pos = match.end() else: def callback(lexer, match, ctx=None): # XXX: cache that somehow kwargs.update(lexer.options) lx = _other(**kwargs) s = match.start() for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs): yield i + s, t, v if ctx: ctx.pos = match.end() return callback
[ "def", "using", "(", "_other", ",", "*", "*", "kwargs", ")", ":", "gt_kwargs", "=", "{", "}", "if", "'state'", "in", "kwargs", ":", "s", "=", "kwargs", ".", "pop", "(", "'state'", ")", "if", "isinstance", "(", "s", ",", "(", "list", ",", "tuple", ")", ")", ":", "gt_kwargs", "[", "'stack'", "]", "=", "s", "else", ":", "gt_kwargs", "[", "'stack'", "]", "=", "(", "'root'", ",", "s", ")", "if", "_other", "is", "this", ":", "def", "callback", "(", "lexer", ",", "match", ",", "ctx", "=", "None", ")", ":", "# if keyword arguments are given the callback", "# function has to create a new lexer instance", "if", "kwargs", ":", "# XXX: cache that somehow", "kwargs", ".", "update", "(", "lexer", ".", "options", ")", "lx", "=", "lexer", ".", "__class__", "(", "*", "*", "kwargs", ")", "else", ":", "lx", "=", "lexer", "s", "=", "match", ".", "start", "(", ")", "for", "i", ",", "t", ",", "v", "in", "lx", ".", "get_tokens_unprocessed", "(", "match", ".", "group", "(", ")", ",", "*", "*", "gt_kwargs", ")", ":", "yield", "i", "+", "s", ",", "t", ",", "v", "if", "ctx", ":", "ctx", ".", "pos", "=", "match", ".", "end", "(", ")", "else", ":", "def", "callback", "(", "lexer", ",", "match", ",", "ctx", "=", "None", ")", ":", "# XXX: cache that somehow", "kwargs", ".", "update", "(", "lexer", ".", "options", ")", "lx", "=", "_other", "(", "*", "*", "kwargs", ")", "s", "=", "match", ".", "start", "(", ")", "for", "i", ",", "t", ",", "v", "in", "lx", ".", "get_tokens_unprocessed", "(", "match", ".", "group", "(", ")", ",", "*", "*", "gt_kwargs", ")", ":", "yield", "i", "+", "s", ",", "t", ",", "v", "if", "ctx", ":", "ctx", ".", "pos", "=", "match", ".", "end", "(", ")", "return", "callback" ]
[ 338, 0 ]
[ 385, 19 ]
python
en
['en', 'error', 'th']
False
do_insertions
(insertions, tokens)
Helper for lexers which must combine the results of several sublexers. ``insertions`` is a list of ``(index, itokens)`` pairs. Each ``itokens`` iterable should be inserted at position ``index`` into the token stream given by the ``tokens`` argument. The result is a combined token stream. TODO: clean up the code here.
Helper for lexers which must combine the results of several sublexers.
def do_insertions(insertions, tokens): """ Helper for lexers which must combine the results of several sublexers. ``insertions`` is a list of ``(index, itokens)`` pairs. Each ``itokens`` iterable should be inserted at position ``index`` into the token stream given by the ``tokens`` argument. The result is a combined token stream. TODO: clean up the code here. """ insertions = iter(insertions) try: index, itokens = next(insertions) except StopIteration: # no insertions for item in tokens: yield item return realpos = None insleft = True # iterate over the token stream where we want to insert # the tokens from the insertion list. for i, t, v in tokens: # first iteration. store the postition of first item if realpos is None: realpos = i oldi = 0 while insleft and i + len(v) >= index: tmpval = v[oldi:index - i] yield realpos, t, tmpval realpos += len(tmpval) for it_index, it_token, it_value in itokens: yield realpos, it_token, it_value realpos += len(it_value) oldi = index - i try: index, itokens = next(insertions) except StopIteration: insleft = False break # not strictly necessary yield realpos, t, v[oldi:] realpos += len(v) - oldi # leftover tokens while insleft: # no normal tokens, set realpos to zero realpos = realpos or 0 for p, t, v in itokens: yield realpos, t, v realpos += len(v) try: index, itokens = next(insertions) except StopIteration: insleft = False break
[ "def", "do_insertions", "(", "insertions", ",", "tokens", ")", ":", "insertions", "=", "iter", "(", "insertions", ")", "try", ":", "index", ",", "itokens", "=", "next", "(", "insertions", ")", "except", "StopIteration", ":", "# no insertions", "for", "item", "in", "tokens", ":", "yield", "item", "return", "realpos", "=", "None", "insleft", "=", "True", "# iterate over the token stream where we want to insert", "# the tokens from the insertion list.", "for", "i", ",", "t", ",", "v", "in", "tokens", ":", "# first iteration. store the postition of first item", "if", "realpos", "is", "None", ":", "realpos", "=", "i", "oldi", "=", "0", "while", "insleft", "and", "i", "+", "len", "(", "v", ")", ">=", "index", ":", "tmpval", "=", "v", "[", "oldi", ":", "index", "-", "i", "]", "yield", "realpos", ",", "t", ",", "tmpval", "realpos", "+=", "len", "(", "tmpval", ")", "for", "it_index", ",", "it_token", ",", "it_value", "in", "itokens", ":", "yield", "realpos", ",", "it_token", ",", "it_value", "realpos", "+=", "len", "(", "it_value", ")", "oldi", "=", "index", "-", "i", "try", ":", "index", ",", "itokens", "=", "next", "(", "insertions", ")", "except", "StopIteration", ":", "insleft", "=", "False", "break", "# not strictly necessary", "yield", "realpos", ",", "t", ",", "v", "[", "oldi", ":", "]", "realpos", "+=", "len", "(", "v", ")", "-", "oldi", "# leftover tokens", "while", "insleft", ":", "# no normal tokens, set realpos to zero", "realpos", "=", "realpos", "or", "0", "for", "p", ",", "t", ",", "v", "in", "itokens", ":", "yield", "realpos", ",", "t", ",", "v", "realpos", "+=", "len", "(", "v", ")", "try", ":", "index", ",", "itokens", "=", "next", "(", "insertions", ")", "except", "StopIteration", ":", "insleft", "=", "False", "break" ]
[ 757, 0 ]
[ 817, 17 ]
python
en
['en', 'error', 'th']
False
RegexLexerMeta._process_regex
(cls, regex, rflags, state)
Preprocess the regular expression component of a token definition.
Preprocess the regular expression component of a token definition.
def _process_regex(cls, regex, rflags, state): """Preprocess the regular expression component of a token definition.""" if isinstance(regex, Future): regex = regex.get() return re.compile(regex, rflags).match
[ "def", "_process_regex", "(", "cls", ",", "regex", ",", "rflags", ",", "state", ")", ":", "if", "isinstance", "(", "regex", ",", "Future", ")", ":", "regex", "=", "regex", ".", "get", "(", ")", "return", "re", ".", "compile", "(", "regex", ",", "rflags", ")", ".", "match" ]
[ 422, 4 ]
[ 426, 46 ]
python
en
['en', 'en', 'en']
True
RegexLexerMeta._process_token
(cls, token)
Preprocess the token component of a token definition.
Preprocess the token component of a token definition.
def _process_token(cls, token): """Preprocess the token component of a token definition.""" assert type(token) is _TokenType or callable(token), \ 'token type must be simple type or callable, not %r' % (token,) return token
[ "def", "_process_token", "(", "cls", ",", "token", ")", ":", "assert", "type", "(", "token", ")", "is", "_TokenType", "or", "callable", "(", "token", ")", ",", "'token type must be simple type or callable, not %r'", "%", "(", "token", ",", ")", "return", "token" ]
[ 428, 4 ]
[ 432, 20 ]
python
en
['en', 'en', 'en']
True
RegexLexerMeta._process_new_state
(cls, new_state, unprocessed, processed)
Preprocess the state transition action of a token definition.
Preprocess the state transition action of a token definition.
def _process_new_state(cls, new_state, unprocessed, processed): """Preprocess the state transition action of a token definition.""" if isinstance(new_state, str): # an existing state if new_state == '#pop': return -1 elif new_state in unprocessed: return (new_state,) elif new_state == '#push': return new_state elif new_state[:5] == '#pop:': return -int(new_state[5:]) else: assert False, 'unknown new state %r' % new_state elif isinstance(new_state, combined): # combine a new state from existing ones tmp_state = '_tmp_%d' % cls._tmpname cls._tmpname += 1 itokens = [] for istate in new_state: assert istate != new_state, 'circular state ref %r' % istate itokens.extend(cls._process_state(unprocessed, processed, istate)) processed[tmp_state] = itokens return (tmp_state,) elif isinstance(new_state, tuple): # push more than one state for istate in new_state: assert (istate in unprocessed or istate in ('#pop', '#push')), \ 'unknown new state ' + istate return new_state else: assert False, 'unknown new state def %r' % new_state
[ "def", "_process_new_state", "(", "cls", ",", "new_state", ",", "unprocessed", ",", "processed", ")", ":", "if", "isinstance", "(", "new_state", ",", "str", ")", ":", "# an existing state", "if", "new_state", "==", "'#pop'", ":", "return", "-", "1", "elif", "new_state", "in", "unprocessed", ":", "return", "(", "new_state", ",", ")", "elif", "new_state", "==", "'#push'", ":", "return", "new_state", "elif", "new_state", "[", ":", "5", "]", "==", "'#pop:'", ":", "return", "-", "int", "(", "new_state", "[", "5", ":", "]", ")", "else", ":", "assert", "False", ",", "'unknown new state %r'", "%", "new_state", "elif", "isinstance", "(", "new_state", ",", "combined", ")", ":", "# combine a new state from existing ones", "tmp_state", "=", "'_tmp_%d'", "%", "cls", ".", "_tmpname", "cls", ".", "_tmpname", "+=", "1", "itokens", "=", "[", "]", "for", "istate", "in", "new_state", ":", "assert", "istate", "!=", "new_state", ",", "'circular state ref %r'", "%", "istate", "itokens", ".", "extend", "(", "cls", ".", "_process_state", "(", "unprocessed", ",", "processed", ",", "istate", ")", ")", "processed", "[", "tmp_state", "]", "=", "itokens", "return", "(", "tmp_state", ",", ")", "elif", "isinstance", "(", "new_state", ",", "tuple", ")", ":", "# push more than one state", "for", "istate", "in", "new_state", ":", "assert", "(", "istate", "in", "unprocessed", "or", "istate", "in", "(", "'#pop'", ",", "'#push'", ")", ")", ",", "'unknown new state '", "+", "istate", "return", "new_state", "else", ":", "assert", "False", ",", "'unknown new state def %r'", "%", "new_state" ]
[ 434, 4 ]
[ 467, 64 ]
python
en
['en', 'en', 'en']
True
RegexLexerMeta._process_state
(cls, unprocessed, processed, state)
Preprocess a single state definition.
Preprocess a single state definition.
def _process_state(cls, unprocessed, processed, state): """Preprocess a single state definition.""" assert type(state) is str, "wrong state name %r" % state assert state[0] != '#', "invalid state name %r" % state if state in processed: return processed[state] tokens = processed[state] = [] rflags = cls.flags for tdef in unprocessed[state]: if isinstance(tdef, include): # it's a state reference assert tdef != state, "circular state reference %r" % state tokens.extend(cls._process_state(unprocessed, processed, str(tdef))) continue if isinstance(tdef, _inherit): # should be processed already, but may not in the case of: # 1. the state has no counterpart in any parent # 2. the state includes more than one 'inherit' continue if isinstance(tdef, default): new_state = cls._process_new_state(tdef.state, unprocessed, processed) tokens.append((re.compile('').match, None, new_state)) continue assert type(tdef) is tuple, "wrong rule def %r" % tdef try: rex = cls._process_regex(tdef[0], rflags, state) except Exception as err: raise ValueError("uncompilable regex %r in state %r of %r: %s" % (tdef[0], state, cls, err)) token = cls._process_token(tdef[1]) if len(tdef) == 2: new_state = None else: new_state = cls._process_new_state(tdef[2], unprocessed, processed) tokens.append((rex, token, new_state)) return tokens
[ "def", "_process_state", "(", "cls", ",", "unprocessed", ",", "processed", ",", "state", ")", ":", "assert", "type", "(", "state", ")", "is", "str", ",", "\"wrong state name %r\"", "%", "state", "assert", "state", "[", "0", "]", "!=", "'#'", ",", "\"invalid state name %r\"", "%", "state", "if", "state", "in", "processed", ":", "return", "processed", "[", "state", "]", "tokens", "=", "processed", "[", "state", "]", "=", "[", "]", "rflags", "=", "cls", ".", "flags", "for", "tdef", "in", "unprocessed", "[", "state", "]", ":", "if", "isinstance", "(", "tdef", ",", "include", ")", ":", "# it's a state reference", "assert", "tdef", "!=", "state", ",", "\"circular state reference %r\"", "%", "state", "tokens", ".", "extend", "(", "cls", ".", "_process_state", "(", "unprocessed", ",", "processed", ",", "str", "(", "tdef", ")", ")", ")", "continue", "if", "isinstance", "(", "tdef", ",", "_inherit", ")", ":", "# should be processed already, but may not in the case of:", "# 1. the state has no counterpart in any parent", "# 2. the state includes more than one 'inherit'", "continue", "if", "isinstance", "(", "tdef", ",", "default", ")", ":", "new_state", "=", "cls", ".", "_process_new_state", "(", "tdef", ".", "state", ",", "unprocessed", ",", "processed", ")", "tokens", ".", "append", "(", "(", "re", ".", "compile", "(", "''", ")", ".", "match", ",", "None", ",", "new_state", ")", ")", "continue", "assert", "type", "(", "tdef", ")", "is", "tuple", ",", "\"wrong rule def %r\"", "%", "tdef", "try", ":", "rex", "=", "cls", ".", "_process_regex", "(", "tdef", "[", "0", "]", ",", "rflags", ",", "state", ")", "except", "Exception", "as", "err", ":", "raise", "ValueError", "(", "\"uncompilable regex %r in state %r of %r: %s\"", "%", "(", "tdef", "[", "0", "]", ",", "state", ",", "cls", ",", "err", ")", ")", "token", "=", "cls", ".", "_process_token", "(", "tdef", "[", "1", "]", ")", "if", "len", "(", "tdef", ")", "==", "2", ":", "new_state", "=", "None", "else", ":", "new_state", "=", "cls", ".", "_process_new_state", "(", "tdef", "[", "2", "]", ",", "unprocessed", ",", "processed", ")", "tokens", ".", "append", "(", "(", "rex", ",", "token", ",", "new_state", ")", ")", "return", "tokens" ]
[ 469, 4 ]
[ 511, 21 ]
python
en
['en', 'en', 'en']
True
RegexLexerMeta.process_tokendef
(cls, name, tokendefs=None)
Preprocess a dictionary of token definitions.
Preprocess a dictionary of token definitions.
def process_tokendef(cls, name, tokendefs=None): """Preprocess a dictionary of token definitions.""" processed = cls._all_tokens[name] = {} tokendefs = tokendefs or cls.tokens[name] for state in list(tokendefs): cls._process_state(tokendefs, processed, state) return processed
[ "def", "process_tokendef", "(", "cls", ",", "name", ",", "tokendefs", "=", "None", ")", ":", "processed", "=", "cls", ".", "_all_tokens", "[", "name", "]", "=", "{", "}", "tokendefs", "=", "tokendefs", "or", "cls", ".", "tokens", "[", "name", "]", "for", "state", "in", "list", "(", "tokendefs", ")", ":", "cls", ".", "_process_state", "(", "tokendefs", ",", "processed", ",", "state", ")", "return", "processed" ]
[ 513, 4 ]
[ 519, 24 ]
python
en
['en', 'en', 'en']
True
RegexLexerMeta.get_tokendefs
(cls)
Merge tokens from superclasses in MRO order, returning a single tokendef dictionary. Any state that is not defined by a subclass will be inherited automatically. States that *are* defined by subclasses will, by default, override that state in the superclass. If a subclass wishes to inherit definitions from a superclass, it can use the special value "inherit", which will cause the superclass' state definition to be included at that point in the state.
Merge tokens from superclasses in MRO order, returning a single tokendef dictionary.
def get_tokendefs(cls): """ Merge tokens from superclasses in MRO order, returning a single tokendef dictionary. Any state that is not defined by a subclass will be inherited automatically. States that *are* defined by subclasses will, by default, override that state in the superclass. If a subclass wishes to inherit definitions from a superclass, it can use the special value "inherit", which will cause the superclass' state definition to be included at that point in the state. """ tokens = {} inheritable = {} for c in cls.__mro__: toks = c.__dict__.get('tokens', {}) for state, items in iteritems(toks): curitems = tokens.get(state) if curitems is None: # N.b. because this is assigned by reference, sufficiently # deep hierarchies are processed incrementally (e.g. for # A(B), B(C), C(RegexLexer), B will be premodified so X(B) # will not see any inherits in B). tokens[state] = items try: inherit_ndx = items.index(inherit) except ValueError: continue inheritable[state] = inherit_ndx continue inherit_ndx = inheritable.pop(state, None) if inherit_ndx is None: continue # Replace the "inherit" value with the items curitems[inherit_ndx:inherit_ndx+1] = items try: # N.b. this is the index in items (that is, the superclass # copy), so offset required when storing below. new_inh_ndx = items.index(inherit) except ValueError: pass else: inheritable[state] = inherit_ndx + new_inh_ndx return tokens
[ "def", "get_tokendefs", "(", "cls", ")", ":", "tokens", "=", "{", "}", "inheritable", "=", "{", "}", "for", "c", "in", "cls", ".", "__mro__", ":", "toks", "=", "c", ".", "__dict__", ".", "get", "(", "'tokens'", ",", "{", "}", ")", "for", "state", ",", "items", "in", "iteritems", "(", "toks", ")", ":", "curitems", "=", "tokens", ".", "get", "(", "state", ")", "if", "curitems", "is", "None", ":", "# N.b. because this is assigned by reference, sufficiently", "# deep hierarchies are processed incrementally (e.g. for", "# A(B), B(C), C(RegexLexer), B will be premodified so X(B)", "# will not see any inherits in B).", "tokens", "[", "state", "]", "=", "items", "try", ":", "inherit_ndx", "=", "items", ".", "index", "(", "inherit", ")", "except", "ValueError", ":", "continue", "inheritable", "[", "state", "]", "=", "inherit_ndx", "continue", "inherit_ndx", "=", "inheritable", ".", "pop", "(", "state", ",", "None", ")", "if", "inherit_ndx", "is", "None", ":", "continue", "# Replace the \"inherit\" value with the items", "curitems", "[", "inherit_ndx", ":", "inherit_ndx", "+", "1", "]", "=", "items", "try", ":", "# N.b. this is the index in items (that is, the superclass", "# copy), so offset required when storing below.", "new_inh_ndx", "=", "items", ".", "index", "(", "inherit", ")", "except", "ValueError", ":", "pass", "else", ":", "inheritable", "[", "state", "]", "=", "inherit_ndx", "+", "new_inh_ndx", "return", "tokens" ]
[ 521, 4 ]
[ 568, 21 ]
python
en
['en', 'error', 'th']
False
RegexLexerMeta.__call__
(cls, *args, **kwds)
Instantiate cls after preprocessing its token definitions.
Instantiate cls after preprocessing its token definitions.
def __call__(cls, *args, **kwds): """Instantiate cls after preprocessing its token definitions.""" if '_tokens' not in cls.__dict__: cls._all_tokens = {} cls._tmpname = 0 if hasattr(cls, 'token_variants') and cls.token_variants: # don't process yet pass else: cls._tokens = cls.process_tokendef('', cls.get_tokendefs()) return type.__call__(cls, *args, **kwds)
[ "def", "__call__", "(", "cls", ",", "*", "args", ",", "*", "*", "kwds", ")", ":", "if", "'_tokens'", "not", "in", "cls", ".", "__dict__", ":", "cls", ".", "_all_tokens", "=", "{", "}", "cls", ".", "_tmpname", "=", "0", "if", "hasattr", "(", "cls", ",", "'token_variants'", ")", "and", "cls", ".", "token_variants", ":", "# don't process yet", "pass", "else", ":", "cls", ".", "_tokens", "=", "cls", ".", "process_tokendef", "(", "''", ",", "cls", ".", "get_tokendefs", "(", ")", ")", "return", "type", ".", "__call__", "(", "cls", ",", "*", "args", ",", "*", "*", "kwds", ")" ]
[ 570, 4 ]
[ 581, 48 ]
python
en
['en', 'en', 'en']
True
ExtendedRegexLexer.get_tokens_unprocessed
(self, text=None, context=None)
Split ``text`` into (tokentype, text) pairs. If ``context`` is given, use this lexer context instead.
Split ``text`` into (tokentype, text) pairs. If ``context`` is given, use this lexer context instead.
def get_tokens_unprocessed(self, text=None, context=None): """ Split ``text`` into (tokentype, text) pairs. If ``context`` is given, use this lexer context instead. """ tokendefs = self._tokens if not context: ctx = LexerContext(text, 0) statetokens = tokendefs['root'] else: ctx = context statetokens = tokendefs[ctx.stack[-1]] text = ctx.text while 1: for rexmatch, action, new_state in statetokens: m = rexmatch(text, ctx.pos, ctx.end) if m: if action is not None: if type(action) is _TokenType: yield ctx.pos, action, m.group() ctx.pos = m.end() else: for item in action(self, m, ctx): yield item if not new_state: # altered the state stack? statetokens = tokendefs[ctx.stack[-1]] # CAUTION: callback must set ctx.pos! if new_state is not None: # state transition if isinstance(new_state, tuple): for state in new_state: if state == '#pop': ctx.stack.pop() elif state == '#push': ctx.stack.append(ctx.stack[-1]) else: ctx.stack.append(state) elif isinstance(new_state, int): # pop del ctx.stack[new_state:] elif new_state == '#push': ctx.stack.append(ctx.stack[-1]) else: assert False, "wrong state def: %r" % new_state statetokens = tokendefs[ctx.stack[-1]] break else: try: if ctx.pos >= ctx.end: break if text[ctx.pos] == '\n': # at EOL, reset state to "root" ctx.stack = ['root'] statetokens = tokendefs['root'] yield ctx.pos, Text, u'\n' ctx.pos += 1 continue yield ctx.pos, Error, text[ctx.pos] ctx.pos += 1 except IndexError: break
[ "def", "get_tokens_unprocessed", "(", "self", ",", "text", "=", "None", ",", "context", "=", "None", ")", ":", "tokendefs", "=", "self", ".", "_tokens", "if", "not", "context", ":", "ctx", "=", "LexerContext", "(", "text", ",", "0", ")", "statetokens", "=", "tokendefs", "[", "'root'", "]", "else", ":", "ctx", "=", "context", "statetokens", "=", "tokendefs", "[", "ctx", ".", "stack", "[", "-", "1", "]", "]", "text", "=", "ctx", ".", "text", "while", "1", ":", "for", "rexmatch", ",", "action", ",", "new_state", "in", "statetokens", ":", "m", "=", "rexmatch", "(", "text", ",", "ctx", ".", "pos", ",", "ctx", ".", "end", ")", "if", "m", ":", "if", "action", "is", "not", "None", ":", "if", "type", "(", "action", ")", "is", "_TokenType", ":", "yield", "ctx", ".", "pos", ",", "action", ",", "m", ".", "group", "(", ")", "ctx", ".", "pos", "=", "m", ".", "end", "(", ")", "else", ":", "for", "item", "in", "action", "(", "self", ",", "m", ",", "ctx", ")", ":", "yield", "item", "if", "not", "new_state", ":", "# altered the state stack?", "statetokens", "=", "tokendefs", "[", "ctx", ".", "stack", "[", "-", "1", "]", "]", "# CAUTION: callback must set ctx.pos!", "if", "new_state", "is", "not", "None", ":", "# state transition", "if", "isinstance", "(", "new_state", ",", "tuple", ")", ":", "for", "state", "in", "new_state", ":", "if", "state", "==", "'#pop'", ":", "ctx", ".", "stack", ".", "pop", "(", ")", "elif", "state", "==", "'#push'", ":", "ctx", ".", "stack", ".", "append", "(", "ctx", ".", "stack", "[", "-", "1", "]", ")", "else", ":", "ctx", ".", "stack", ".", "append", "(", "state", ")", "elif", "isinstance", "(", "new_state", ",", "int", ")", ":", "# pop", "del", "ctx", ".", "stack", "[", "new_state", ":", "]", "elif", "new_state", "==", "'#push'", ":", "ctx", ".", "stack", ".", "append", "(", "ctx", ".", "stack", "[", "-", "1", "]", ")", "else", ":", "assert", "False", ",", "\"wrong state def: %r\"", "%", "new_state", "statetokens", "=", "tokendefs", "[", "ctx", ".", "stack", "[", "-", "1", "]", "]", "break", "else", ":", "try", ":", "if", "ctx", ".", "pos", ">=", "ctx", ".", "end", ":", "break", "if", "text", "[", "ctx", ".", "pos", "]", "==", "'\\n'", ":", "# at EOL, reset state to \"root\"", "ctx", ".", "stack", "=", "[", "'root'", "]", "statetokens", "=", "tokendefs", "[", "'root'", "]", "yield", "ctx", ".", "pos", ",", "Text", ",", "u'\\n'", "ctx", ".", "pos", "+=", "1", "continue", "yield", "ctx", ".", "pos", ",", "Error", ",", "text", "[", "ctx", ".", "pos", "]", "ctx", ".", "pos", "+=", "1", "except", "IndexError", ":", "break" ]
[ 693, 4 ]
[ 754, 25 ]
python
en
['en', 'error', 'th']
False
GlobReaderBatchKwargsGenerator._get_data_asset_paths
(self, data_asset_name)
Returns a list of filepaths associated with the given data_asset_name Args: data_asset_name: Returns: paths (list)
Returns a list of filepaths associated with the given data_asset_name
def _get_data_asset_paths(self, data_asset_name): """ Returns a list of filepaths associated with the given data_asset_name Args: data_asset_name: Returns: paths (list) """ glob_config = self._get_data_asset_config(data_asset_name) return glob.glob(os.path.join(self.base_directory, glob_config["glob"]))
[ "def", "_get_data_asset_paths", "(", "self", ",", "data_asset_name", ")", ":", "glob_config", "=", "self", ".", "_get_data_asset_config", "(", "data_asset_name", ")", "return", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "self", ".", "base_directory", ",", "glob_config", "[", "\"glob\"", "]", ")", ")" ]
[ 177, 4 ]
[ 188, 80 ]
python
en
['en', 'error', 'th']
False