Search is not available for this dataset
identifier
stringlengths
1
155
parameters
stringlengths
2
6.09k
docstring
stringlengths
11
63.4k
docstring_summary
stringlengths
0
63.4k
function
stringlengths
29
99.8k
function_tokens
sequence
start_point
sequence
end_point
sequence
language
stringclasses
1 value
docstring_language
stringlengths
2
7
docstring_language_predictions
stringlengths
18
23
is_langid_reliable
stringclasses
2 values
PrivateIngredientsApiTests.test_create_ingredient_invalid
(self)
Test creating invalid ingredient fails
Test creating invalid ingredient fails
def test_create_ingredient_invalid(self): """Test creating invalid ingredient fails""" payload = {'name': ''} res = self.client.post(INGREDIENTS_URL, payload) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
[ "def", "test_create_ingredient_invalid", "(", "self", ")", ":", "payload", "=", "{", "'name'", ":", "''", "}", "res", "=", "self", ".", "client", ".", "post", "(", "INGREDIENTS_URL", ",", "payload", ")", "self", ".", "assertEqual", "(", "res", ".", "status_code", ",", "status", ".", "HTTP_400_BAD_REQUEST", ")" ]
[ 79, 4 ]
[ 84, 70 ]
python
en
['af', 'en', 'en']
True
PrivateIngredientsApiTests.test_retrieve_ingredients_assigned_to_recipes
(self)
Test filtering ingredients by those assigned to recipes
Test filtering ingredients by those assigned to recipes
def test_retrieve_ingredients_assigned_to_recipes(self): """Test filtering ingredients by those assigned to recipes""" ingredient1 = Ingredient.objects.create(user=self.user, name='Apples') ingredient2 = Ingredient.objects.create(user=self.user, name='Turkey') recipe = Recipe.objects.create( title='Apple crumble', time_minutes=10, price=5.00, user=self.user ) recipe.ingredients.add(ingredient1) res = self.client.get(INGREDIENTS_URL, {'assigned_only': 1}) serializer1 = IngredientSerializer(ingredient1) serializer2 = IngredientSerializer(ingredient2) self.assertIn(serializer1.data, res.data) self.assertNotIn(serializer2.data, res.data)
[ "def", "test_retrieve_ingredients_assigned_to_recipes", "(", "self", ")", ":", "ingredient1", "=", "Ingredient", ".", "objects", ".", "create", "(", "user", "=", "self", ".", "user", ",", "name", "=", "'Apples'", ")", "ingredient2", "=", "Ingredient", ".", "objects", ".", "create", "(", "user", "=", "self", ".", "user", ",", "name", "=", "'Turkey'", ")", "recipe", "=", "Recipe", ".", "objects", ".", "create", "(", "title", "=", "'Apple crumble'", ",", "time_minutes", "=", "10", ",", "price", "=", "5.00", ",", "user", "=", "self", ".", "user", ")", "recipe", ".", "ingredients", ".", "add", "(", "ingredient1", ")", "res", "=", "self", ".", "client", ".", "get", "(", "INGREDIENTS_URL", ",", "{", "'assigned_only'", ":", "1", "}", ")", "serializer1", "=", "IngredientSerializer", "(", "ingredient1", ")", "serializer2", "=", "IngredientSerializer", "(", "ingredient2", ")", "self", ".", "assertIn", "(", "serializer1", ".", "data", ",", "res", ".", "data", ")", "self", ".", "assertNotIn", "(", "serializer2", ".", "data", ",", "res", ".", "data", ")" ]
[ 86, 4 ]
[ 103, 52 ]
python
en
['en', 'en', 'en']
True
PrivateIngredientsApiTests.test_retrieve_ingredients_assigned_unique
(self)
Test filtering ingredients by those assigned unique items
Test filtering ingredients by those assigned unique items
def test_retrieve_ingredients_assigned_unique(self): """Test filtering ingredients by those assigned unique items""" ingredient = Ingredient.objects.create(user=self.user, name='Eggs') Ingredient.objects.create(user=self.user, name='Cheese') recipe1 = Recipe.objects.create( title='Eggs on toast', time_minutes=10, price=5.00, user=self.user ) recipe2 = Recipe.objects.create( title='Coriander eggs on toast', time_minutes=4, price=3.00, user=self.user ) recipe1.ingredients.add(ingredient) recipe2.ingredients.add(ingredient) res = self.client.get(INGREDIENTS_URL, {'assigned_only': 1}) self.assertEqual(len(res.data), 1)
[ "def", "test_retrieve_ingredients_assigned_unique", "(", "self", ")", ":", "ingredient", "=", "Ingredient", ".", "objects", ".", "create", "(", "user", "=", "self", ".", "user", ",", "name", "=", "'Eggs'", ")", "Ingredient", ".", "objects", ".", "create", "(", "user", "=", "self", ".", "user", ",", "name", "=", "'Cheese'", ")", "recipe1", "=", "Recipe", ".", "objects", ".", "create", "(", "title", "=", "'Eggs on toast'", ",", "time_minutes", "=", "10", ",", "price", "=", "5.00", ",", "user", "=", "self", ".", "user", ")", "recipe2", "=", "Recipe", ".", "objects", ".", "create", "(", "title", "=", "'Coriander eggs on toast'", ",", "time_minutes", "=", "4", ",", "price", "=", "3.00", ",", "user", "=", "self", ".", "user", ")", "recipe1", ".", "ingredients", ".", "add", "(", "ingredient", ")", "recipe2", ".", "ingredients", ".", "add", "(", "ingredient", ")", "res", "=", "self", ".", "client", ".", "get", "(", "INGREDIENTS_URL", ",", "{", "'assigned_only'", ":", "1", "}", ")", "self", ".", "assertEqual", "(", "len", "(", "res", ".", "data", ")", ",", "1", ")" ]
[ 105, 4 ]
[ 126, 42 ]
python
en
['en', 'en', 'en']
True
S3SubdirReaderBatchKwargsGenerator._build_batch_kwargs
(self, batch_parameters)
Args: batch_parameters: Returns: batch_kwargs
def _build_batch_kwargs(self, batch_parameters): """ Args: batch_parameters: Returns: batch_kwargs """ try: data_asset_name = batch_parameters.pop("data_asset_name") except KeyError: raise BatchKwargsError( "Unable to build BatchKwargs: no name provided in batch_parameters.", batch_kwargs=batch_parameters, ) if "partition_id" in batch_parameters: partition_id = batch_parameters.pop("partition_id") # Find the path path = None for extension in self.known_extensions: if self.fs.isfile( os.path.join( self.base_directory, data_asset_name, partition_id + extension ) ): path = os.path.join( self.base_directory, data_asset_name, partition_id + extension ) if path is None: logger.warning( "Unable to find path with the provided partition; searching for asset-name partitions." ) # Fall through to this case in the event that there is not a subdir available, or if partition_id was # not provided if self.fs.isfile(os.path.join(self.base_directory, data_asset_name)): path = os.path.join(self.base_directory, data_asset_name) for extension in self.known_extensions: if self.fs.isfile( os.path.join(self.base_directory, data_asset_name + extension) ): path = os.path.join( self.base_directory, data_asset_name + extension ) if path is None: raise BatchKwargsError( "Unable to build batch kwargs from for asset '%s'" % data_asset_name, batch_parameters, ) return self._build_batch_kwargs_from_path(path, **batch_parameters) else: return self.yield_batch_kwargs( data_asset_name=data_asset_name, **batch_parameters )
[ "def", "_build_batch_kwargs", "(", "self", ",", "batch_parameters", ")", ":", "try", ":", "data_asset_name", "=", "batch_parameters", ".", "pop", "(", "\"data_asset_name\"", ")", "except", "KeyError", ":", "raise", "BatchKwargsError", "(", "\"Unable to build BatchKwargs: no name provided in batch_parameters.\"", ",", "batch_kwargs", "=", "batch_parameters", ",", ")", "if", "\"partition_id\"", "in", "batch_parameters", ":", "partition_id", "=", "batch_parameters", ".", "pop", "(", "\"partition_id\"", ")", "# Find the path", "path", "=", "None", "for", "extension", "in", "self", ".", "known_extensions", ":", "if", "self", ".", "fs", ".", "isfile", "(", "os", ".", "path", ".", "join", "(", "self", ".", "base_directory", ",", "data_asset_name", ",", "partition_id", "+", "extension", ")", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "base_directory", ",", "data_asset_name", ",", "partition_id", "+", "extension", ")", "if", "path", "is", "None", ":", "logger", ".", "warning", "(", "\"Unable to find path with the provided partition; searching for asset-name partitions.\"", ")", "# Fall through to this case in the event that there is not a subdir available, or if partition_id was", "# not provided", "if", "self", ".", "fs", ".", "isfile", "(", "os", ".", "path", ".", "join", "(", "self", ".", "base_directory", ",", "data_asset_name", ")", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "base_directory", ",", "data_asset_name", ")", "for", "extension", "in", "self", ".", "known_extensions", ":", "if", "self", ".", "fs", ".", "isfile", "(", "os", ".", "path", ".", "join", "(", "self", ".", "base_directory", ",", "data_asset_name", "+", "extension", ")", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "base_directory", ",", "data_asset_name", "+", "extension", ")", "if", "path", "is", "None", ":", "raise", "BatchKwargsError", "(", "\"Unable to build batch kwargs from for asset '%s'\"", "%", "data_asset_name", ",", "batch_parameters", ",", ")", "return", "self", ".", "_build_batch_kwargs_from_path", "(", "path", ",", "*", "*", "batch_parameters", ")", "else", ":", "return", "self", ".", "yield_batch_kwargs", "(", "data_asset_name", "=", "data_asset_name", ",", "*", "*", "batch_parameters", ")" ]
[ 137, 4 ]
[ 197, 13 ]
python
en
['en', 'error', 'th']
False
S3SubdirReaderBatchKwargsGenerator._window_to_s3_path
(self, path: str)
To handle window "\" path. "s3://bucket\\prefix" => "s3://bucket/prefix" >>> path = os.path.join("s3://bucket", "prefix") >>> window_to_s3_path(path) >>>
To handle window "\" path. "s3://bucket\\prefix" => "s3://bucket/prefix" >>> path = os.path.join("s3://bucket", "prefix") >>> window_to_s3_path(path) >>>
def _window_to_s3_path(self, path: str): """ To handle window "\" path. "s3://bucket\\prefix" => "s3://bucket/prefix" >>> path = os.path.join("s3://bucket", "prefix") >>> window_to_s3_path(path) >>> """ s3_url = urlparse(path) s3_path = Path(s3_url.path) s3_new_url = urlunparse( ( s3_url.scheme, s3_url.netloc, s3_path.as_posix(), s3_url.params, s3_url.query, s3_url.fragment, ) ) return s3_new_url
[ "def", "_window_to_s3_path", "(", "self", ",", "path", ":", "str", ")", ":", "s3_url", "=", "urlparse", "(", "path", ")", "s3_path", "=", "Path", "(", "s3_url", ".", "path", ")", "s3_new_url", "=", "urlunparse", "(", "(", "s3_url", ".", "scheme", ",", "s3_url", ".", "netloc", ",", "s3_path", ".", "as_posix", "(", ")", ",", "s3_url", ".", "params", ",", "s3_url", ".", "query", ",", "s3_url", ".", "fragment", ",", ")", ")", "return", "s3_new_url" ]
[ 299, 4 ]
[ 319, 25 ]
python
en
['en', 'error', 'th']
False
_convert_anomalies_to_contextual
(X, interval=1)
Convert list of timestamps to list of tuples. Convert a list of anomalies identified by timestamps, to a list of tuples marking the start and end interval of anomalies; make it contextually defined. Args: X (list): contains timestamp of anomalies. interval (int): allowed gap between anomalies. Returns: list: tuple (start, end, `None`) timestamp.
Convert list of timestamps to list of tuples.
def _convert_anomalies_to_contextual(X, interval=1): """ Convert list of timestamps to list of tuples. Convert a list of anomalies identified by timestamps, to a list of tuples marking the start and end interval of anomalies; make it contextually defined. Args: X (list): contains timestamp of anomalies. interval (int): allowed gap between anomalies. Returns: list: tuple (start, end, `None`) timestamp. """ if len(X) == 0: return [] X = sorted(X) start_ts = 0 max_ts = len(X) - 1 anomalies = list() break_point = start_ts while break_point < max_ts: if X[break_point + 1] - X[break_point] <= interval: break_point += 1 continue anomalies.append((X[start_ts], X[break_point], None)) break_point += 1 start_ts = break_point anomalies.append((X[start_ts], X[break_point], None)) return anomalies
[ "def", "_convert_anomalies_to_contextual", "(", "X", ",", "interval", "=", "1", ")", ":", "if", "len", "(", "X", ")", "==", "0", ":", "return", "[", "]", "X", "=", "sorted", "(", "X", ")", "start_ts", "=", "0", "max_ts", "=", "len", "(", "X", ")", "-", "1", "anomalies", "=", "list", "(", ")", "break_point", "=", "start_ts", "while", "break_point", "<", "max_ts", ":", "if", "X", "[", "break_point", "+", "1", "]", "-", "X", "[", "break_point", "]", "<=", "interval", ":", "break_point", "+=", "1", "continue", "anomalies", ".", "append", "(", "(", "X", "[", "start_ts", "]", ",", "X", "[", "break_point", "]", ",", "None", ")", ")", "break_point", "+=", "1", "start_ts", "=", "break_point", "anomalies", ".", "append", "(", "(", "X", "[", "start_ts", "]", ",", "X", "[", "break_point", "]", ",", "None", ")", ")", "return", "anomalies" ]
[ 15, 0 ]
[ 50, 20 ]
python
en
['en', 'en', 'en']
True
split_sequence
(X, index, target_column, sequence_size, overlap_size)
Split sequences of time series data. The function creates a list of input sequences by splitting the input sequence into partitions with a specified size and pads it with values from previous sequence according to the overlap size. Args: X (ndarray): N-dimensional value sequence to iterate over. index (ndarray): N-dimensional index sequence to iterate over. target_column (int): Indicating which column of X is the target. sequence_size (int): Length of the input sequences. overlap_size (int): Length of the values from previous window. Returns: tuple: * List of sliced value as ndarray. * List of sliced index as ndarray.
Split sequences of time series data.
def split_sequence(X, index, target_column, sequence_size, overlap_size): """Split sequences of time series data. The function creates a list of input sequences by splitting the input sequence into partitions with a specified size and pads it with values from previous sequence according to the overlap size. Args: X (ndarray): N-dimensional value sequence to iterate over. index (ndarray): N-dimensional index sequence to iterate over. target_column (int): Indicating which column of X is the target. sequence_size (int): Length of the input sequences. overlap_size (int): Length of the values from previous window. Returns: tuple: * List of sliced value as ndarray. * List of sliced index as ndarray. """ X_ = list() index_ = list() overlap = 0 start = 0 max_start = len(X) - 1 target = X[:, target_column] while start < max_start: end = start + sequence_size X_.append(target[start - overlap:end]) index_.append(index[start - overlap:end]) start = end overlap = overlap_size return X_, index_
[ "def", "split_sequence", "(", "X", ",", "index", ",", "target_column", ",", "sequence_size", ",", "overlap_size", ")", ":", "X_", "=", "list", "(", ")", "index_", "=", "list", "(", ")", "overlap", "=", "0", "start", "=", "0", "max_start", "=", "len", "(", "X", ")", "-", "1", "target", "=", "X", "[", ":", ",", "target_column", "]", "while", "start", "<", "max_start", ":", "end", "=", "start", "+", "sequence_size", "X_", ".", "append", "(", "target", "[", "start", "-", "overlap", ":", "end", "]", ")", "index_", ".", "append", "(", "index", "[", "start", "-", "overlap", ":", "end", "]", ")", "start", "=", "end", "overlap", "=", "overlap_size", "return", "X_", ",", "index_" ]
[ 53, 0 ]
[ 95, 21 ]
python
en
['en', 'en', 'en']
True
detect_anomalies
(X, index, interval, overlap_size, subscription_key, endpoint, granularity, custom_interval=None, period=None, max_anomaly_ratio=None, sensitivity=None, timezone="UTC")
Microsoft's Azure Anomaly Detection tool. Args: X (list): Array containing the input value sequences. index (list): Array containing the input index sequences. interval (int): Integer denoting time span frequency of the data. overlap_size (int): Length of the values from previous sequence that overlaps with current sequnce. subscription_key (str): Resource key for authenticating your requests. endpoint (str): Resource endpoint for sending API requests. granularity (str or Granularity): Can only be one of yearly, monthly, weekly, daily, hourly or minutely. Granularity is used for verify whether input series is valid. Possible values include: 'yearly', 'monthly', 'weekly', 'daily', 'hourly', 'minutely'. custom_interval (int): Integer used to set non-standard time interval, for example, if the series is 5 minutes, request can be set as `{"granularity":"minutely", "custom_interval":5}`. If not given, `None` is used. period (int): Periodic value of a time series. If not given, `None` is used, and the API will determine the period automatically. max_anomaly_ratio (float): Advanced model parameter, max anomaly ratio in a time series. If not given, `None` is used. sensitivity (int): Advanced model parameter, between 0-99, the lower the value is, the larger the margin value will be which means less anomalies will be accepted. If not given, `None` is used. timezone (str): String indicating the timezone of the timestamps. If not given, will use UTC as default. The format of the string should be complaint with ``pytz`` which can be found in http://pytz.sourceforge.net/. Returns: list: Array containing start-index, end-index, score for each anomalous sequence. Note that the API does not have an anomaly score, and so score is set to `None`.
Microsoft's Azure Anomaly Detection tool.
def detect_anomalies(X, index, interval, overlap_size, subscription_key, endpoint, granularity, custom_interval=None, period=None, max_anomaly_ratio=None, sensitivity=None, timezone="UTC"): """Microsoft's Azure Anomaly Detection tool. Args: X (list): Array containing the input value sequences. index (list): Array containing the input index sequences. interval (int): Integer denoting time span frequency of the data. overlap_size (int): Length of the values from previous sequence that overlaps with current sequnce. subscription_key (str): Resource key for authenticating your requests. endpoint (str): Resource endpoint for sending API requests. granularity (str or Granularity): Can only be one of yearly, monthly, weekly, daily, hourly or minutely. Granularity is used for verify whether input series is valid. Possible values include: 'yearly', 'monthly', 'weekly', 'daily', 'hourly', 'minutely'. custom_interval (int): Integer used to set non-standard time interval, for example, if the series is 5 minutes, request can be set as `{"granularity":"minutely", "custom_interval":5}`. If not given, `None` is used. period (int): Periodic value of a time series. If not given, `None` is used, and the API will determine the period automatically. max_anomaly_ratio (float): Advanced model parameter, max anomaly ratio in a time series. If not given, `None` is used. sensitivity (int): Advanced model parameter, between 0-99, the lower the value is, the larger the margin value will be which means less anomalies will be accepted. If not given, `None` is used. timezone (str): String indicating the timezone of the timestamps. If not given, will use UTC as default. The format of the string should be complaint with ``pytz`` which can be found in http://pytz.sourceforge.net/. Returns: list: Array containing start-index, end-index, score for each anomalous sequence. Note that the API does not have an anomaly score, and so score is set to `None`. """ client = AnomalyDetectorClient(endpoint, CognitiveServicesCredentials(subscription_key)) tz = pytz.timezone(timezone) overlap = 0 result = list() for x, idx in zip(X, index): series = [] for i in range(len(x)): idx_ = _convert_date(idx[i], tz) series.append(Point(timestamp=idx_, value=x[i])) request = Request( series=series, granularity=granularity, custom_interval=custom_interval, period=period, max_anomaly_ratio=max_anomaly_ratio, sensitivity=sensitivity) response = client.entire_detect(request) if response.is_anomaly: anomalous = response.is_anomaly[overlap:] index_ = idx[overlap:] result.extend(index_[anomalous]) overlap = overlap_size return _convert_anomalies_to_contextual(result, interval)
[ "def", "detect_anomalies", "(", "X", ",", "index", ",", "interval", ",", "overlap_size", ",", "subscription_key", ",", "endpoint", ",", "granularity", ",", "custom_interval", "=", "None", ",", "period", "=", "None", ",", "max_anomaly_ratio", "=", "None", ",", "sensitivity", "=", "None", ",", "timezone", "=", "\"UTC\"", ")", ":", "client", "=", "AnomalyDetectorClient", "(", "endpoint", ",", "CognitiveServicesCredentials", "(", "subscription_key", ")", ")", "tz", "=", "pytz", ".", "timezone", "(", "timezone", ")", "overlap", "=", "0", "result", "=", "list", "(", ")", "for", "x", ",", "idx", "in", "zip", "(", "X", ",", "index", ")", ":", "series", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "x", ")", ")", ":", "idx_", "=", "_convert_date", "(", "idx", "[", "i", "]", ",", "tz", ")", "series", ".", "append", "(", "Point", "(", "timestamp", "=", "idx_", ",", "value", "=", "x", "[", "i", "]", ")", ")", "request", "=", "Request", "(", "series", "=", "series", ",", "granularity", "=", "granularity", ",", "custom_interval", "=", "custom_interval", ",", "period", "=", "period", ",", "max_anomaly_ratio", "=", "max_anomaly_ratio", ",", "sensitivity", "=", "sensitivity", ")", "response", "=", "client", ".", "entire_detect", "(", "request", ")", "if", "response", ".", "is_anomaly", ":", "anomalous", "=", "response", ".", "is_anomaly", "[", "overlap", ":", "]", "index_", "=", "idx", "[", "overlap", ":", "]", "result", ".", "extend", "(", "index_", "[", "anomalous", "]", ")", "overlap", "=", "overlap_size", "return", "_convert_anomalies_to_contextual", "(", "result", ",", "interval", ")" ]
[ 98, 0 ]
[ 168, 61 ]
python
en
['en', 'en', 'en']
True
hierarchy_info_t.__init__
(self, related_class=None, access=None, is_virtual=False)
creates class that contains partial information about class relationship
creates class that contains partial information about class relationship
def __init__(self, related_class=None, access=None, is_virtual=False): """creates class that contains partial information about class relationship""" if related_class: assert isinstance(related_class, class_t) self._related_class = related_class if access: assert access in ACCESS_TYPES.ALL self._access = access self._is_virtual = is_virtual self._declaration_path = None self._declaration_path_hash = None
[ "def", "__init__", "(", "self", ",", "related_class", "=", "None", ",", "access", "=", "None", ",", "is_virtual", "=", "False", ")", ":", "if", "related_class", ":", "assert", "isinstance", "(", "related_class", ",", "class_t", ")", "self", ".", "_related_class", "=", "related_class", "if", "access", ":", "assert", "access", "in", "ACCESS_TYPES", ".", "ALL", "self", ".", "_access", "=", "access", "self", ".", "_is_virtual", "=", "is_virtual", "self", ".", "_declaration_path", "=", "None", "self", ".", "_declaration_path_hash", "=", "None" ]
[ 60, 4 ]
[ 71, 42 ]
python
en
['en', 'en', 'en']
True
hierarchy_info_t.related_class
(self)
reference to base or derived :class:`class <class_t>`
reference to base or derived :class:`class <class_t>`
def related_class(self): """reference to base or derived :class:`class <class_t>`""" return self._related_class
[ "def", "related_class", "(", "self", ")", ":", "return", "self", ".", "_related_class" ]
[ 95, 4 ]
[ 97, 34 ]
python
en
['en', 'en', 'en']
True
hierarchy_info_t.access_type
(self)
describes :class:`hierarchy type <ACCESS_TYPES>`
describes :class:`hierarchy type <ACCESS_TYPES>`
def access_type(self): """describes :class:`hierarchy type <ACCESS_TYPES>`""" return self.access
[ "def", "access_type", "(", "self", ")", ":", "return", "self", ".", "access" ]
[ 118, 4 ]
[ 120, 26 ]
python
en
['en', 'gl', 'en']
True
hierarchy_info_t.is_virtual
(self)
indicates whether the inheritance is virtual or not
indicates whether the inheritance is virtual or not
def is_virtual(self): """indicates whether the inheritance is virtual or not""" return self._is_virtual
[ "def", "is_virtual", "(", "self", ")", ":", "return", "self", ".", "_is_virtual" ]
[ 129, 4 ]
[ 131, 31 ]
python
en
['en', 'en', 'en']
True
class_declaration_t.__init__
(self, name='')
creates class that describes C++ class declaration ( and not definition )
creates class that describes C++ class declaration ( and not definition )
def __init__(self, name=''): """creates class that describes C++ class declaration ( and not definition )""" declaration.declaration_t.__init__(self, name) self._aliases = []
[ "def", "__init__", "(", "self", ",", "name", "=", "''", ")", ":", "declaration", ".", "declaration_t", ".", "__init__", "(", "self", ",", "name", ")", "self", ".", "_aliases", "=", "[", "]" ]
[ 155, 4 ]
[ 159, 26 ]
python
en
['en', 'lb', 'en']
True
class_declaration_t._get__cmp__items
(self)
implementation details
implementation details
def _get__cmp__items(self): """implementation details""" return []
[ "def", "_get__cmp__items", "(", "self", ")", ":", "return", "[", "]" ]
[ 161, 4 ]
[ 163, 17 ]
python
da
['eo', 'da', 'en']
False
class_declaration_t.aliases
(self)
List of :class:`aliases <typedef_t>` to this instance
List of :class:`aliases <typedef_t>` to this instance
def aliases(self): """List of :class:`aliases <typedef_t>` to this instance""" return self._aliases
[ "def", "aliases", "(", "self", ")", ":", "return", "self", ".", "_aliases" ]
[ 169, 4 ]
[ 171, 28 ]
python
en
['en', 'en', 'en']
True
class_t.__init__
( self, name='', class_type=CLASS_TYPES.CLASS, is_abstract=False)
creates class that describes C++ class definition
creates class that describes C++ class definition
def __init__( self, name='', class_type=CLASS_TYPES.CLASS, is_abstract=False): """creates class that describes C++ class definition""" scopedef.scopedef_t.__init__(self, name) byte_info.byte_info.__init__(self) elaborated_info.elaborated_info.__init__(self, class_type) if class_type: assert class_type in CLASS_TYPES.ALL self._class_type = class_type self._bases = [] self._derived = [] self._is_abstract = is_abstract self._public_members = [] self._private_members = [] self._protected_members = [] self._aliases = [] self._recursive_bases = None self._recursive_derived = None self._use_demangled_as_name = False
[ "def", "__init__", "(", "self", ",", "name", "=", "''", ",", "class_type", "=", "CLASS_TYPES", ".", "CLASS", ",", "is_abstract", "=", "False", ")", ":", "scopedef", ".", "scopedef_t", ".", "__init__", "(", "self", ",", "name", ")", "byte_info", ".", "byte_info", ".", "__init__", "(", "self", ")", "elaborated_info", ".", "elaborated_info", ".", "__init__", "(", "self", ",", "class_type", ")", "if", "class_type", ":", "assert", "class_type", "in", "CLASS_TYPES", ".", "ALL", "self", ".", "_class_type", "=", "class_type", "self", ".", "_bases", "=", "[", "]", "self", ".", "_derived", "=", "[", "]", "self", ".", "_is_abstract", "=", "is_abstract", "self", ".", "_public_members", "=", "[", "]", "self", ".", "_private_members", "=", "[", "]", "self", ".", "_protected_members", "=", "[", "]", "self", ".", "_aliases", "=", "[", "]", "self", ".", "_recursive_bases", "=", "None", "self", ".", "_recursive_derived", "=", "None", "self", ".", "_use_demangled_as_name", "=", "False" ]
[ 191, 4 ]
[ 212, 43 ]
python
en
['en', 'lb', 'en']
True
class_t._get__cmp__scope_items
(self)
implementation details
implementation details
def _get__cmp__scope_items(self): """implementation details""" return [ self.class_type, [declaration_utils.declaration_path(base.related_class) for base in self.bases].sort(), [declaration_utils.declaration_path(derive.related_class) for derive in self.derived].sort(), self.is_abstract, self.public_members.sort(), self.private_members.sort(), self.protected_members.sort()]
[ "def", "_get__cmp__scope_items", "(", "self", ")", ":", "return", "[", "self", ".", "class_type", ",", "[", "declaration_utils", ".", "declaration_path", "(", "base", ".", "related_class", ")", "for", "base", "in", "self", ".", "bases", "]", ".", "sort", "(", ")", ",", "[", "declaration_utils", ".", "declaration_path", "(", "derive", ".", "related_class", ")", "for", "derive", "in", "self", ".", "derived", "]", ".", "sort", "(", ")", ",", "self", ".", "is_abstract", ",", "self", ".", "public_members", ".", "sort", "(", ")", ",", "self", ".", "private_members", ".", "sort", "(", ")", ",", "self", ".", "protected_members", ".", "sort", "(", ")", "]" ]
[ 260, 4 ]
[ 271, 42 ]
python
da
['eo', 'da', 'en']
False
class_t.class_type
(self)
describes class :class:`type <CLASS_TYPES>`
describes class :class:`type <CLASS_TYPES>`
def class_type(self): """describes class :class:`type <CLASS_TYPES>`""" return self._class_type
[ "def", "class_type", "(", "self", ")", ":", "return", "self", ".", "_class_type" ]
[ 297, 4 ]
[ 299, 31 ]
python
en
['en', 'lb', 'en']
True
class_t.bases
(self)
list of :class:`base classes <hierarchy_info_t>`
list of :class:`base classes <hierarchy_info_t>`
def bases(self): """list of :class:`base classes <hierarchy_info_t>`""" return self._bases
[ "def", "bases", "(", "self", ")", ":", "return", "self", ".", "_bases" ]
[ 308, 4 ]
[ 310, 26 ]
python
en
['en', 'en', 'en']
True
class_t.recursive_bases
(self)
list of all :class:`base classes <hierarchy_info_t>`
list of all :class:`base classes <hierarchy_info_t>`
def recursive_bases(self): """list of all :class:`base classes <hierarchy_info_t>`""" if self._recursive_bases is None: to_go = self.bases[:] all_bases = [] while to_go: base = to_go.pop() if base not in all_bases: all_bases.append(base) to_go.extend(base.related_class.bases) self._recursive_bases = all_bases return self._recursive_bases
[ "def", "recursive_bases", "(", "self", ")", ":", "if", "self", ".", "_recursive_bases", "is", "None", ":", "to_go", "=", "self", ".", "bases", "[", ":", "]", "all_bases", "=", "[", "]", "while", "to_go", ":", "base", "=", "to_go", ".", "pop", "(", ")", "if", "base", "not", "in", "all_bases", ":", "all_bases", ".", "append", "(", "base", ")", "to_go", ".", "extend", "(", "base", ".", "related_class", ".", "bases", ")", "self", ".", "_recursive_bases", "=", "all_bases", "return", "self", ".", "_recursive_bases" ]
[ 317, 4 ]
[ 328, 36 ]
python
en
['en', 'en', 'en']
True
class_t.derived
(self)
list of :class:`derived classes <hierarchy_info_t>`
list of :class:`derived classes <hierarchy_info_t>`
def derived(self): """list of :class:`derived classes <hierarchy_info_t>`""" return self._derived
[ "def", "derived", "(", "self", ")", ":", "return", "self", ".", "_derived" ]
[ 331, 4 ]
[ 333, 28 ]
python
en
['en', 'en', 'ur']
True
class_t.recursive_derived
(self)
list of all :class:`derive classes <hierarchy_info_t>`
list of all :class:`derive classes <hierarchy_info_t>`
def recursive_derived(self): """list of all :class:`derive classes <hierarchy_info_t>`""" if self._recursive_derived is None: to_go = self.derived[:] all_derived = [] while to_go: derive = to_go.pop() if derive not in all_derived: all_derived.append(derive) to_go.extend(derive.related_class.derived) self._recursive_derived = all_derived return self._recursive_derived
[ "def", "recursive_derived", "(", "self", ")", ":", "if", "self", ".", "_recursive_derived", "is", "None", ":", "to_go", "=", "self", ".", "derived", "[", ":", "]", "all_derived", "=", "[", "]", "while", "to_go", ":", "derive", "=", "to_go", ".", "pop", "(", ")", "if", "derive", "not", "in", "all_derived", ":", "all_derived", ".", "append", "(", "derive", ")", "to_go", ".", "extend", "(", "derive", ".", "related_class", ".", "derived", ")", "self", ".", "_recursive_derived", "=", "all_derived", "return", "self", ".", "_recursive_derived" ]
[ 340, 4 ]
[ 351, 38 ]
python
en
['en', 'en', 'en']
True
class_t.is_abstract
(self)
describes whether class abstract or not
describes whether class abstract or not
def is_abstract(self): """describes whether class abstract or not""" return self._is_abstract
[ "def", "is_abstract", "(", "self", ")", ":", "return", "self", ".", "_is_abstract" ]
[ 354, 4 ]
[ 356, 32 ]
python
en
['en', 'en', 'en']
True
class_t.public_members
(self)
list of all public :class:`members <declarationt_>`
list of all public :class:`members <declarationt_>`
def public_members(self): """list of all public :class:`members <declarationt_>`""" return self._public_members
[ "def", "public_members", "(", "self", ")", ":", "return", "self", ".", "_public_members" ]
[ 363, 4 ]
[ 365, 35 ]
python
en
['en', 'en', 'en']
True
class_t.private_members
(self)
list of all private :class:`members <declarationt_>`
list of all private :class:`members <declarationt_>`
def private_members(self): """list of all private :class:`members <declarationt_>`""" return self._private_members
[ "def", "private_members", "(", "self", ")", ":", "return", "self", ".", "_private_members" ]
[ 372, 4 ]
[ 374, 36 ]
python
en
['en', 'en', 'en']
True
class_t.protected_members
(self)
list of all protected :class:`members <declarationt_>`
list of all protected :class:`members <declarationt_>`
def protected_members(self): """list of all protected :class:`members <declarationt_>`""" return self._protected_members
[ "def", "protected_members", "(", "self", ")", ":", "return", "self", ".", "_protected_members" ]
[ 381, 4 ]
[ 383, 38 ]
python
en
['en', 'en', 'en']
True
class_t.aliases
(self)
List of :class:`aliases <typedef_t>` to this instance
List of :class:`aliases <typedef_t>` to this instance
def aliases(self): """List of :class:`aliases <typedef_t>` to this instance""" return self._aliases
[ "def", "aliases", "(", "self", ")", ":", "return", "self", ".", "_aliases" ]
[ 390, 4 ]
[ 392, 28 ]
python
en
['en', 'en', 'en']
True
class_t.get_members
(self, access=None)
returns list of members according to access type If access equals to None, then returned list will contain all members. You should not modify the list content, otherwise different optimization data will stop work and may to give you wrong results. :param access: describes desired members :type access: :class:ACCESS_TYPES :rtype: [ members ]
returns list of members according to access type
def get_members(self, access=None): """ returns list of members according to access type If access equals to None, then returned list will contain all members. You should not modify the list content, otherwise different optimization data will stop work and may to give you wrong results. :param access: describes desired members :type access: :class:ACCESS_TYPES :rtype: [ members ] """ if access == ACCESS_TYPES.PUBLIC: return self.public_members elif access == ACCESS_TYPES.PROTECTED: return self.protected_members elif access == ACCESS_TYPES.PRIVATE: return self.private_members else: all_members = [] all_members.extend(self.public_members) all_members.extend(self.protected_members) all_members.extend(self.private_members) return all_members
[ "def", "get_members", "(", "self", ",", "access", "=", "None", ")", ":", "if", "access", "==", "ACCESS_TYPES", ".", "PUBLIC", ":", "return", "self", ".", "public_members", "elif", "access", "==", "ACCESS_TYPES", ".", "PROTECTED", ":", "return", "self", ".", "protected_members", "elif", "access", "==", "ACCESS_TYPES", ".", "PRIVATE", ":", "return", "self", ".", "private_members", "else", ":", "all_members", "=", "[", "]", "all_members", ".", "extend", "(", "self", ".", "public_members", ")", "all_members", ".", "extend", "(", "self", ".", "protected_members", ")", "all_members", ".", "extend", "(", "self", ".", "private_members", ")", "return", "all_members" ]
[ 401, 4 ]
[ 425, 30 ]
python
en
['en', 'error', 'th']
False
class_t.adopt_declaration
(self, decl, access)
adds new declaration to the class :param decl: reference to a :class:`declaration_t` :param access: member access type :type access: :class:ACCESS_TYPES
adds new declaration to the class
def adopt_declaration(self, decl, access): """adds new declaration to the class :param decl: reference to a :class:`declaration_t` :param access: member access type :type access: :class:ACCESS_TYPES """ if access == ACCESS_TYPES.PUBLIC: self.public_members.append(decl) elif access == ACCESS_TYPES.PROTECTED: self.protected_members.append(decl) elif access == ACCESS_TYPES.PRIVATE: self.private_members.append(decl) else: raise RuntimeError("Invalid access type: %s." % access) decl.parent = self decl.cache.reset() decl.cache.access_type = access
[ "def", "adopt_declaration", "(", "self", ",", "decl", ",", "access", ")", ":", "if", "access", "==", "ACCESS_TYPES", ".", "PUBLIC", ":", "self", ".", "public_members", ".", "append", "(", "decl", ")", "elif", "access", "==", "ACCESS_TYPES", ".", "PROTECTED", ":", "self", ".", "protected_members", ".", "append", "(", "decl", ")", "elif", "access", "==", "ACCESS_TYPES", ".", "PRIVATE", ":", "self", ".", "private_members", ".", "append", "(", "decl", ")", "else", ":", "raise", "RuntimeError", "(", "\"Invalid access type: %s.\"", "%", "access", ")", "decl", ".", "parent", "=", "self", "decl", ".", "cache", ".", "reset", "(", ")", "decl", ".", "cache", ".", "access_type", "=", "access" ]
[ 427, 4 ]
[ 445, 39 ]
python
en
['en', 'en', 'en']
True
class_t.remove_declaration
(self, decl)
removes decl from members list :param decl: declaration to be removed :type decl: :class:`declaration_t`
removes decl from members list
def remove_declaration(self, decl): """ removes decl from members list :param decl: declaration to be removed :type decl: :class:`declaration_t` """ access_type = self.find_out_member_access_type(decl) if access_type == ACCESS_TYPES.PUBLIC: container = self.public_members elif access_type == ACCESS_TYPES.PROTECTED: container = self.protected_members else: # decl.cache.access_type == ACCESS_TYPES.PRVATE container = self.private_members del container[container.index(decl)] decl.cache.reset()
[ "def", "remove_declaration", "(", "self", ",", "decl", ")", ":", "access_type", "=", "self", ".", "find_out_member_access_type", "(", "decl", ")", "if", "access_type", "==", "ACCESS_TYPES", ".", "PUBLIC", ":", "container", "=", "self", ".", "public_members", "elif", "access_type", "==", "ACCESS_TYPES", ".", "PROTECTED", ":", "container", "=", "self", ".", "protected_members", "else", ":", "# decl.cache.access_type == ACCESS_TYPES.PRVATE", "container", "=", "self", ".", "private_members", "del", "container", "[", "container", ".", "index", "(", "decl", ")", "]", "decl", ".", "cache", ".", "reset", "(", ")" ]
[ 447, 4 ]
[ 463, 26 ]
python
en
['en', 'error', 'th']
False
class_t.find_out_member_access_type
(self, member)
returns member access type :param member: member of the class :type member: :class:`declaration_t` :rtype: :class:ACCESS_TYPES
returns member access type
def find_out_member_access_type(self, member): """ returns member access type :param member: member of the class :type member: :class:`declaration_t` :rtype: :class:ACCESS_TYPES """ assert member.parent is self if not member.cache.access_type: if member in self.public_members: access_type = ACCESS_TYPES.PUBLIC elif member in self.protected_members: access_type = ACCESS_TYPES.PROTECTED elif member in self.private_members: access_type = ACCESS_TYPES.PRIVATE else: raise RuntimeError( "Unable to find member within internal members list.") member.cache.access_type = access_type return access_type else: return member.cache.access_type
[ "def", "find_out_member_access_type", "(", "self", ",", "member", ")", ":", "assert", "member", ".", "parent", "is", "self", "if", "not", "member", ".", "cache", ".", "access_type", ":", "if", "member", "in", "self", ".", "public_members", ":", "access_type", "=", "ACCESS_TYPES", ".", "PUBLIC", "elif", "member", "in", "self", ".", "protected_members", ":", "access_type", "=", "ACCESS_TYPES", ".", "PROTECTED", "elif", "member", "in", "self", ".", "private_members", ":", "access_type", "=", "ACCESS_TYPES", ".", "PRIVATE", "else", ":", "raise", "RuntimeError", "(", "\"Unable to find member within internal members list.\"", ")", "member", ".", "cache", ".", "access_type", "=", "access_type", "return", "access_type", "else", ":", "return", "member", ".", "cache", ".", "access_type" ]
[ 465, 4 ]
[ 488, 43 ]
python
en
['en', 'error', 'th']
False
class_t.top_class
(self)
reference to a parent class, which contains this class and defined within a namespace if this class is defined under a namespace, self will be returned
reference to a parent class, which contains this class and defined within a namespace
def top_class(self): """reference to a parent class, which contains this class and defined within a namespace if this class is defined under a namespace, self will be returned""" curr = self parent = self.parent while isinstance(parent, class_t): curr = parent parent = parent.parent return curr
[ "def", "top_class", "(", "self", ")", ":", "curr", "=", "self", "parent", "=", "self", ".", "parent", "while", "isinstance", "(", "parent", ",", "class_t", ")", ":", "curr", "=", "parent", "parent", "=", "parent", ".", "parent", "return", "curr" ]
[ 529, 4 ]
[ 539, 19 ]
python
en
['en', 'en', 'en']
True
dependency_info_t.decl
(self)
Deprecated since 1.9.0. Will be removed in 2.0.0.
Deprecated since 1.9.0. Will be removed in 2.0.0.
def decl(self): """ Deprecated since 1.9.0. Will be removed in 2.0.0. """ warnings.warn( "The decl attribute is deprecated.\n" + "Please use the declaration attribute instead.", DeprecationWarning) return self._decl
[ "def", "decl", "(", "self", ")", ":", "warnings", ".", "warn", "(", "\"The decl attribute is deprecated.\\n\"", "+", "\"Please use the declaration attribute instead.\"", ",", "DeprecationWarning", ")", "return", "self", ".", "_decl" ]
[ 588, 4 ]
[ 597, 25 ]
python
en
['en', 'error', 'th']
False
dependency_info_t.hint
(self)
The declaration, that report dependency can put some additional inforamtion about dependency. It can be used later
The declaration, that report dependency can put some additional inforamtion about dependency. It can be used later
def hint(self): """The declaration, that report dependency can put some additional inforamtion about dependency. It can be used later""" return self._hint
[ "def", "hint", "(", "self", ")", ":", "return", "self", ".", "_hint" ]
[ 616, 4 ]
[ 619, 25 ]
python
en
['en', 'en', 'en']
True
dependency_info_t.find_out_depend_on_it_declarations
(self)
If declaration depends on other declaration and not on some type this function will return reference to it. Otherwise None will be returned
If declaration depends on other declaration and not on some type this function will return reference to it. Otherwise None will be returned
def find_out_depend_on_it_declarations(self): """If declaration depends on other declaration and not on some type this function will return reference to it. Otherwise None will be returned """ return impl_details.dig_declarations(self.depend_on_it)
[ "def", "find_out_depend_on_it_declarations", "(", "self", ")", ":", "return", "impl_details", ".", "dig_declarations", "(", "self", ".", "depend_on_it", ")" ]
[ 621, 4 ]
[ 626, 63 ]
python
en
['en', 'en', 'en']
True
dependency_info_t.i_depend_on_them
(decl)
Returns set of declarations. every item in the returned set, depends on a declaration from the input
Returns set of declarations. every item in the returned set, depends on a declaration from the input
def i_depend_on_them(decl): """Returns set of declarations. every item in the returned set, depends on a declaration from the input""" to_be_included = set() for dependency_info in decl.i_depend_on_them(): for ddecl in dependency_info.find_out_depend_on_it_declarations(): if ddecl: to_be_included.add(ddecl) if isinstance(decl.parent, class_t): to_be_included.add(decl.parent) return to_be_included
[ "def", "i_depend_on_them", "(", "decl", ")", ":", "to_be_included", "=", "set", "(", ")", "for", "dependency_info", "in", "decl", ".", "i_depend_on_them", "(", ")", ":", "for", "ddecl", "in", "dependency_info", ".", "find_out_depend_on_it_declarations", "(", ")", ":", "if", "ddecl", ":", "to_be_included", ".", "add", "(", "ddecl", ")", "if", "isinstance", "(", "decl", ".", "parent", ",", "class_t", ")", ":", "to_be_included", ".", "add", "(", "decl", ".", "parent", ")", "return", "to_be_included" ]
[ 629, 4 ]
[ 641, 29 ]
python
en
['en', 'en', 'en']
True
dependency_info_t.we_depend_on_them
(decls)
Returns set of declarations. every item in the returned set, depends on a declaration from the input
Returns set of declarations. every item in the returned set, depends on a declaration from the input
def we_depend_on_them(decls): """Returns set of declarations. every item in the returned set, depends on a declaration from the input""" to_be_included = set() for decl in decls: to_be_included.update(dependency_info_t.i_depend_on_them(decl)) return to_be_included
[ "def", "we_depend_on_them", "(", "decls", ")", ":", "to_be_included", "=", "set", "(", ")", "for", "decl", "in", "decls", ":", "to_be_included", ".", "update", "(", "dependency_info_t", ".", "i_depend_on_them", "(", "decl", ")", ")", "return", "to_be_included" ]
[ 644, 4 ]
[ 650, 29 ]
python
en
['en', 'en', 'en']
True
enumeration_t.__init__
(self, name='', values=None)
creates class that describes C++ `enum` declaration The items of the list 'values' may either be strings containing the enumeration value name or tuples (name, numeric value). :param name: `enum` name :type name: str :param parent: Parent declaration :type parent: declaration_t :param values: Enumeration values :type values: list
creates class that describes C++ `enum` declaration
def __init__(self, name='', values=None): """creates class that describes C++ `enum` declaration The items of the list 'values' may either be strings containing the enumeration value name or tuples (name, numeric value). :param name: `enum` name :type name: str :param parent: Parent declaration :type parent: declaration_t :param values: Enumeration values :type values: list """ declaration.declaration_t.__init__(self, name) byte_info.byte_info.__init__(self) elaborated_info.elaborated_info.__init__(self, "enum") # A list of tuples (valname(str), valnum(int)). The order of the list # should be the same as the order in the C/C++ source file. self._values = [] # Initialize values via property access self.values = values
[ "def", "__init__", "(", "self", ",", "name", "=", "''", ",", "values", "=", "None", ")", ":", "declaration", ".", "declaration_t", ".", "__init__", "(", "self", ",", "name", ")", "byte_info", ".", "byte_info", ".", "__init__", "(", "self", ")", "elaborated_info", ".", "elaborated_info", ".", "__init__", "(", "self", ",", "\"enum\"", ")", "# A list of tuples (valname(str), valnum(int)). The order of the list", "# should be the same as the order in the C/C++ source file.", "self", ".", "_values", "=", "[", "]", "# Initialize values via property access", "self", ".", "values", "=", "values" ]
[ 24, 4 ]
[ 46, 28 ]
python
en
['en', 'la', 'en']
True
enumeration_t._get__cmp__items
(self)
implementation details
implementation details
def _get__cmp__items(self): """implementation details""" return [self.values]
[ "def", "_get__cmp__items", "(", "self", ")", ":", "return", "[", "self", ".", "values", "]" ]
[ 56, 4 ]
[ 58, 28 ]
python
da
['eo', 'da', 'en']
False
enumeration_t.values
(self)
A list of tuples (valname(str), valnum(int)) that contain the enumeration values. @type: list
A list of tuples (valname(str), valnum(int)) that contain the enumeration values.
def values(self): """A list of tuples (valname(str), valnum(int)) that contain the enumeration values. @type: list""" return copy.copy(self._values)
[ "def", "values", "(", "self", ")", ":", "return", "copy", ".", "copy", "(", "self", ".", "_values", ")" ]
[ 61, 4 ]
[ 65, 38 ]
python
en
['en', 'da', 'en']
True
enumeration_t.append_value
(self, valuename, valuenum=None)
Append another enumeration value to the `enum`. The numeric value may be None in which case it is automatically determined by increasing the value of the last item. When the 'values' attribute is accessed the resulting list will be in the same order as append_value() was called. :param valuename: The name of the value. :type valuename: str :param valuenum: The numeric value or None. :type valuenum: int
Append another enumeration value to the `enum`.
def append_value(self, valuename, valuenum=None): """Append another enumeration value to the `enum`. The numeric value may be None in which case it is automatically determined by increasing the value of the last item. When the 'values' attribute is accessed the resulting list will be in the same order as append_value() was called. :param valuename: The name of the value. :type valuename: str :param valuenum: The numeric value or None. :type valuenum: int """ # No number given? Then use the previous one + 1 if valuenum is None: if not self._values: valuenum = 0 else: valuenum = self._values[-1][1] + 1 # Store the new value self._values.append((valuename, int(valuenum)))
[ "def", "append_value", "(", "self", ",", "valuename", ",", "valuenum", "=", "None", ")", ":", "# No number given? Then use the previous one + 1", "if", "valuenum", "is", "None", ":", "if", "not", "self", ".", "_values", ":", "valuenum", "=", "0", "else", ":", "valuenum", "=", "self", ".", "_values", "[", "-", "1", "]", "[", "1", "]", "+", "1", "# Store the new value", "self", ".", "_values", ".", "append", "(", "(", "valuename", ",", "int", "(", "valuenum", ")", ")", ")" ]
[ 92, 4 ]
[ 114, 55 ]
python
en
['en', 'en', 'en']
True
enumeration_t.has_value_name
(self, name)
Check if this `enum` has a particular name among its values. :param name: Enumeration value name :type name: str :rtype: True if there is an enumeration value with the given name
Check if this `enum` has a particular name among its values.
def has_value_name(self, name): """Check if this `enum` has a particular name among its values. :param name: Enumeration value name :type name: str :rtype: True if there is an enumeration value with the given name """ for val, _ in self._values: if val == name: return True return False
[ "def", "has_value_name", "(", "self", ",", "name", ")", ":", "for", "val", ",", "_", "in", "self", ".", "_values", ":", "if", "val", "==", "name", ":", "return", "True", "return", "False" ]
[ 116, 4 ]
[ 126, 20 ]
python
en
['en', 'en', 'en']
True
enumeration_t.get_name2value_dict
(self)
returns a dictionary, that maps between `enum` name( key ) and `enum` value( value )
returns a dictionary, that maps between `enum` name( key ) and `enum` value( value )
def get_name2value_dict(self): """returns a dictionary, that maps between `enum` name( key ) and `enum` value( value )""" x = {} for val, num in self._values: x[val] = num return x
[ "def", "get_name2value_dict", "(", "self", ")", ":", "x", "=", "{", "}", "for", "val", ",", "num", "in", "self", ".", "_values", ":", "x", "[", "val", "]", "=", "num", "return", "x" ]
[ 128, 4 ]
[ 134, 16 ]
python
en
['en', 'en', 'en']
True
ColumnMedian._pandas
(cls, column, **kwargs)
Pandas Median Implementation
Pandas Median Implementation
def _pandas(cls, column, **kwargs): """Pandas Median Implementation""" return column.median()
[ "def", "_pandas", "(", "cls", ",", "column", ",", "*", "*", "kwargs", ")", ":", "return", "column", ".", "median", "(", ")" ]
[ 32, 4 ]
[ 34, 30 ]
python
en
['pt', 'jv', 'en']
False
ColumnMedian._sqlalchemy
( cls, execution_engine: "SqlAlchemyExecutionEngine", metric_domain_kwargs: Dict, metric_value_kwargs: Dict, metrics: Dict[Tuple, Any], runtime_configuration: Dict, )
SqlAlchemy Median Implementation
SqlAlchemy Median Implementation
def _sqlalchemy( cls, execution_engine: "SqlAlchemyExecutionEngine", metric_domain_kwargs: Dict, metric_value_kwargs: Dict, metrics: Dict[Tuple, Any], runtime_configuration: Dict, ): ( selectable, compute_domain_kwargs, accessor_domain_kwargs, ) = execution_engine.get_compute_domain( metric_domain_kwargs, MetricDomainTypes.COLUMN ) column_name = accessor_domain_kwargs["column"] column = sa.column(column_name) sqlalchemy_engine = execution_engine.engine dialect = sqlalchemy_engine.dialect """SqlAlchemy Median Implementation""" if dialect.name.lower() == "awsathena": raise NotImplementedError("AWS Athena does not support OFFSET.") nonnull_count = metrics.get("column_values.nonnull.count") if not nonnull_count: return None element_values = sqlalchemy_engine.execute( sa.select([column]) .order_by(column) .where(column != None) .offset(max(nonnull_count // 2 - 1, 0)) .limit(2) .select_from(selectable) ) column_values = list(element_values.fetchall()) if len(column_values) == 0: column_median = None elif nonnull_count % 2 == 0: # An even number of column values: take the average of the two center values column_median = ( float( column_values[0][0] + column_values[1][0] # left center value # right center value ) / 2.0 ) # Average center values else: # An odd number of column values, we can just take the center value column_median = column_values[1][0] # True center value return column_median
[ "def", "_sqlalchemy", "(", "cls", ",", "execution_engine", ":", "\"SqlAlchemyExecutionEngine\"", ",", "metric_domain_kwargs", ":", "Dict", ",", "metric_value_kwargs", ":", "Dict", ",", "metrics", ":", "Dict", "[", "Tuple", ",", "Any", "]", ",", "runtime_configuration", ":", "Dict", ",", ")", ":", "(", "selectable", ",", "compute_domain_kwargs", ",", "accessor_domain_kwargs", ",", ")", "=", "execution_engine", ".", "get_compute_domain", "(", "metric_domain_kwargs", ",", "MetricDomainTypes", ".", "COLUMN", ")", "column_name", "=", "accessor_domain_kwargs", "[", "\"column\"", "]", "column", "=", "sa", ".", "column", "(", "column_name", ")", "sqlalchemy_engine", "=", "execution_engine", ".", "engine", "dialect", "=", "sqlalchemy_engine", ".", "dialect", "if", "dialect", ".", "name", ".", "lower", "(", ")", "==", "\"awsathena\"", ":", "raise", "NotImplementedError", "(", "\"AWS Athena does not support OFFSET.\"", ")", "nonnull_count", "=", "metrics", ".", "get", "(", "\"column_values.nonnull.count\"", ")", "if", "not", "nonnull_count", ":", "return", "None", "element_values", "=", "sqlalchemy_engine", ".", "execute", "(", "sa", ".", "select", "(", "[", "column", "]", ")", ".", "order_by", "(", "column", ")", ".", "where", "(", "column", "!=", "None", ")", ".", "offset", "(", "max", "(", "nonnull_count", "//", "2", "-", "1", ",", "0", ")", ")", ".", "limit", "(", "2", ")", ".", "select_from", "(", "selectable", ")", ")", "column_values", "=", "list", "(", "element_values", ".", "fetchall", "(", ")", ")", "if", "len", "(", "column_values", ")", "==", "0", ":", "column_median", "=", "None", "elif", "nonnull_count", "%", "2", "==", "0", ":", "# An even number of column values: take the average of the two center values", "column_median", "=", "(", "float", "(", "column_values", "[", "0", "]", "[", "0", "]", "+", "column_values", "[", "1", "]", "[", "0", "]", "# left center value # right center value", ")", "/", "2.0", ")", "# Average center values", "else", ":", "# An odd number of column values, we can just take the center value", "column_median", "=", "column_values", "[", "1", "]", "[", "0", "]", "# True center value", "return", "column_median" ]
[ 37, 4 ]
[ 87, 28 ]
python
en
['en', 'en', 'en']
True
ColumnMedian._spark
( cls, execution_engine: "SqlAlchemyExecutionEngine", metric_domain_kwargs: Dict, metric_value_kwargs: Dict, metrics: Dict[Tuple, Any], runtime_configuration: Dict, )
Spark Median Implementation
Spark Median Implementation
def _spark( cls, execution_engine: "SqlAlchemyExecutionEngine", metric_domain_kwargs: Dict, metric_value_kwargs: Dict, metrics: Dict[Tuple, Any], runtime_configuration: Dict, ): ( df, compute_domain_kwargs, accessor_domain_kwargs, ) = execution_engine.get_compute_domain( metric_domain_kwargs, MetricDomainTypes.COLUMN ) column = accessor_domain_kwargs["column"] # We will get the two middle values by choosing an epsilon to add # to the 50th percentile such that we always get exactly the middle two values # (i.e. 0 < epsilon < 1 / (2 * values)) # Note that this can be an expensive computation; we are not exposing # spark's ability to estimate. # We add two to 2 * n_values to maintain a legitimate quantile # in the degenerate case when n_values = 0 """Spark Median Implementation""" table_row_count = metrics.get("table.row_count") result = df.approxQuantile( column, [0.5, 0.5 + (1 / (2 + (2 * table_row_count)))], 0 ) return np.mean(result)
[ "def", "_spark", "(", "cls", ",", "execution_engine", ":", "\"SqlAlchemyExecutionEngine\"", ",", "metric_domain_kwargs", ":", "Dict", ",", "metric_value_kwargs", ":", "Dict", ",", "metrics", ":", "Dict", "[", "Tuple", ",", "Any", "]", ",", "runtime_configuration", ":", "Dict", ",", ")", ":", "(", "df", ",", "compute_domain_kwargs", ",", "accessor_domain_kwargs", ",", ")", "=", "execution_engine", ".", "get_compute_domain", "(", "metric_domain_kwargs", ",", "MetricDomainTypes", ".", "COLUMN", ")", "column", "=", "accessor_domain_kwargs", "[", "\"column\"", "]", "# We will get the two middle values by choosing an epsilon to add", "# to the 50th percentile such that we always get exactly the middle two values", "# (i.e. 0 < epsilon < 1 / (2 * values))", "# Note that this can be an expensive computation; we are not exposing", "# spark's ability to estimate.", "# We add two to 2 * n_values to maintain a legitimate quantile", "# in the degenerate case when n_values = 0", "table_row_count", "=", "metrics", ".", "get", "(", "\"table.row_count\"", ")", "result", "=", "df", ".", "approxQuantile", "(", "column", ",", "[", "0.5", ",", "0.5", "+", "(", "1", "/", "(", "2", "+", "(", "2", "*", "table_row_count", ")", ")", ")", "]", ",", "0", ")", "return", "np", ".", "mean", "(", "result", ")" ]
[ 90, 4 ]
[ 120, 30 ]
python
en
['en', 'da', 'en']
True
ColumnMedian._get_evaluation_dependencies
( cls, metric: MetricConfiguration, configuration: Optional[ExpectationConfiguration] = None, execution_engine: Optional[ExecutionEngine] = None, runtime_configuration: Optional[dict] = None, )
This should return a dictionary: { "dependency_name": MetricConfiguration, ... }
This should return a dictionary: { "dependency_name": MetricConfiguration, ... }
def _get_evaluation_dependencies( cls, metric: MetricConfiguration, configuration: Optional[ExpectationConfiguration] = None, execution_engine: Optional[ExecutionEngine] = None, runtime_configuration: Optional[dict] = None, ): """This should return a dictionary: { "dependency_name": MetricConfiguration, ... } """ dependencies: dict = super()._get_evaluation_dependencies( metric=metric, configuration=configuration, execution_engine=execution_engine, runtime_configuration=runtime_configuration, ) if isinstance(execution_engine, SqlAlchemyExecutionEngine): dependencies["column_values.nonnull.count"] = MetricConfiguration( metric_name="column_values.nonnull.count", metric_domain_kwargs=metric.metric_domain_kwargs, ) table_domain_kwargs: dict = { k: v for k, v in metric.metric_domain_kwargs.items() if k != MetricDomainTypes.COLUMN.value } dependencies["table.row_count"] = MetricConfiguration( metric_name="table.row_count", metric_domain_kwargs=table_domain_kwargs, metric_value_kwargs=None, metric_dependencies=None, ) return dependencies
[ "def", "_get_evaluation_dependencies", "(", "cls", ",", "metric", ":", "MetricConfiguration", ",", "configuration", ":", "Optional", "[", "ExpectationConfiguration", "]", "=", "None", ",", "execution_engine", ":", "Optional", "[", "ExecutionEngine", "]", "=", "None", ",", "runtime_configuration", ":", "Optional", "[", "dict", "]", "=", "None", ",", ")", ":", "dependencies", ":", "dict", "=", "super", "(", ")", ".", "_get_evaluation_dependencies", "(", "metric", "=", "metric", ",", "configuration", "=", "configuration", ",", "execution_engine", "=", "execution_engine", ",", "runtime_configuration", "=", "runtime_configuration", ",", ")", "if", "isinstance", "(", "execution_engine", ",", "SqlAlchemyExecutionEngine", ")", ":", "dependencies", "[", "\"column_values.nonnull.count\"", "]", "=", "MetricConfiguration", "(", "metric_name", "=", "\"column_values.nonnull.count\"", ",", "metric_domain_kwargs", "=", "metric", ".", "metric_domain_kwargs", ",", ")", "table_domain_kwargs", ":", "dict", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "metric", ".", "metric_domain_kwargs", ".", "items", "(", ")", "if", "k", "!=", "MetricDomainTypes", ".", "COLUMN", ".", "value", "}", "dependencies", "[", "\"table.row_count\"", "]", "=", "MetricConfiguration", "(", "metric_name", "=", "\"table.row_count\"", ",", "metric_domain_kwargs", "=", "table_domain_kwargs", ",", "metric_value_kwargs", "=", "None", ",", "metric_dependencies", "=", "None", ",", ")", "return", "dependencies" ]
[ 123, 4 ]
[ 161, 27 ]
python
en
['en', 'en', 'en']
True
test_incomplete_uncommitted
()
When a project is shared between users, it is common to have an incomplete uncommitted directory present. We should fail gracefully when config variables are missing.
When a project is shared between users, it is common to have an incomplete uncommitted directory present. We should fail gracefully when config variables are missing.
def test_incomplete_uncommitted(): """ When a project is shared between users, it is common to have an incomplete uncommitted directory present. We should fail gracefully when config variables are missing. """ with pytest.raises(InvalidConfigError) as exc: _ = DataContext( file_relative_path( __file__, "./fixtures/contexts/incomplete_uncommitted/great_expectations", ) ) assert ( "Unable to find match for config variable my_postgres_db. See " "https://great-expectations.readthedocs.io/en/latest/reference/data_context_reference.html#managing-environment-and-secrets" in exc.value.message )
[ "def", "test_incomplete_uncommitted", "(", ")", ":", "with", "pytest", ".", "raises", "(", "InvalidConfigError", ")", "as", "exc", ":", "_", "=", "DataContext", "(", "file_relative_path", "(", "__file__", ",", "\"./fixtures/contexts/incomplete_uncommitted/great_expectations\"", ",", ")", ")", "assert", "(", "\"Unable to find match for config variable my_postgres_db. See \"", "\"https://great-expectations.readthedocs.io/en/latest/reference/data_context_reference.html#managing-environment-and-secrets\"", "in", "exc", ".", "value", ".", "message", ")" ]
[ 7, 0 ]
[ 24, 9 ]
python
en
['en', 'error', 'th']
False
language_callback
(lexer, match)
Parse the content of a $-string using a lexer The lexer is chosen looking for a nearby LANGUAGE.
Parse the content of a $-string using a lexer
def language_callback(lexer, match): """Parse the content of a $-string using a lexer The lexer is chosen looking for a nearby LANGUAGE. """ l = None m = language_re.match(lexer.text[match.end():match.end()+100]) if m is not None: l = lexer._get_lexer(m.group(1)) else: m = list(language_re.finditer( lexer.text[max(0, match.start()-100):match.start()])) if m: l = lexer._get_lexer(m[-1].group(1)) if l: yield (match.start(1), String, match.group(1)) for x in l.get_tokens_unprocessed(match.group(2)): yield x yield (match.start(3), String, match.group(3)) else: yield (match.start(), String, match.group())
[ "def", "language_callback", "(", "lexer", ",", "match", ")", ":", "l", "=", "None", "m", "=", "language_re", ".", "match", "(", "lexer", ".", "text", "[", "match", ".", "end", "(", ")", ":", "match", ".", "end", "(", ")", "+", "100", "]", ")", "if", "m", "is", "not", "None", ":", "l", "=", "lexer", ".", "_get_lexer", "(", "m", ".", "group", "(", "1", ")", ")", "else", ":", "m", "=", "list", "(", "language_re", ".", "finditer", "(", "lexer", ".", "text", "[", "max", "(", "0", ",", "match", ".", "start", "(", ")", "-", "100", ")", ":", "match", ".", "start", "(", ")", "]", ")", ")", "if", "m", ":", "l", "=", "lexer", ".", "_get_lexer", "(", "m", "[", "-", "1", "]", ".", "group", "(", "1", ")", ")", "if", "l", ":", "yield", "(", "match", ".", "start", "(", "1", ")", ",", "String", ",", "match", ".", "group", "(", "1", ")", ")", "for", "x", "in", "l", ".", "get_tokens_unprocessed", "(", "match", ".", "group", "(", "2", ")", ")", ":", "yield", "x", "yield", "(", "match", ".", "start", "(", "3", ")", ",", "String", ",", "match", ".", "group", "(", "3", ")", ")", "else", ":", "yield", "(", "match", ".", "start", "(", ")", ",", "String", ",", "match", ".", "group", "(", ")", ")" ]
[ 60, 0 ]
[ 82, 52 ]
python
en
['en', 'en', 'en']
True
wait_for_ready_state_complete
(driver, timeout=settings.EXTREME_TIMEOUT)
The DOM (Document Object Model) has a property called "readyState". When the value of this becomes "complete", page resources are considered fully loaded (although AJAX and other loads might still be happening). This method will wait until document.readyState == "complete".
The DOM (Document Object Model) has a property called "readyState". When the value of this becomes "complete", page resources are considered fully loaded (although AJAX and other loads might still be happening). This method will wait until document.readyState == "complete".
def wait_for_ready_state_complete(driver, timeout=settings.EXTREME_TIMEOUT): """ The DOM (Document Object Model) has a property called "readyState". When the value of this becomes "complete", page resources are considered fully loaded (although AJAX and other loads might still be happening). This method will wait until document.readyState == "complete". """ start_ms = time.time() * 1000.0 stop_ms = start_ms + (timeout * 1000.0) for x in range(int(timeout * 10)): shared_utils.check_if_time_limit_exceeded() try: # If there's an alert, skip driver.switch_to.alert return except Exception: # If there's no alert, continue pass try: ready_state = driver.execute_script("return document.readyState") except WebDriverException: # Bug fix for: [Permission denied to access property "document"] time.sleep(0.03) return True if ready_state == u'complete': time.sleep(0.01) # Better be sure everything is done loading return True else: now_ms = time.time() * 1000.0 if now_ms >= stop_ms: break time.sleep(0.1) raise Exception( "Page elements never fully loaded after %s seconds!" % timeout)
[ "def", "wait_for_ready_state_complete", "(", "driver", ",", "timeout", "=", "settings", ".", "EXTREME_TIMEOUT", ")", ":", "start_ms", "=", "time", ".", "time", "(", ")", "*", "1000.0", "stop_ms", "=", "start_ms", "+", "(", "timeout", "*", "1000.0", ")", "for", "x", "in", "range", "(", "int", "(", "timeout", "*", "10", ")", ")", ":", "shared_utils", ".", "check_if_time_limit_exceeded", "(", ")", "try", ":", "# If there's an alert, skip", "driver", ".", "switch_to", ".", "alert", "return", "except", "Exception", ":", "# If there's no alert, continue", "pass", "try", ":", "ready_state", "=", "driver", ".", "execute_script", "(", "\"return document.readyState\"", ")", "except", "WebDriverException", ":", "# Bug fix for: [Permission denied to access property \"document\"]", "time", ".", "sleep", "(", "0.03", ")", "return", "True", "if", "ready_state", "==", "u'complete'", ":", "time", ".", "sleep", "(", "0.01", ")", "# Better be sure everything is done loading", "return", "True", "else", ":", "now_ms", "=", "time", ".", "time", "(", ")", "*", "1000.0", "if", "now_ms", ">=", "stop_ms", ":", "break", "time", ".", "sleep", "(", "0.1", ")", "raise", "Exception", "(", "\"Page elements never fully loaded after %s seconds!\"", "%", "timeout", ")" ]
[ 15, 0 ]
[ 48, 71 ]
python
en
['en', 'error', 'th']
False
raise_unable_to_load_jquery_exception
(driver)
The most-likely reason for jQuery not loading on web pages.
The most-likely reason for jQuery not loading on web pages.
def raise_unable_to_load_jquery_exception(driver): """ The most-likely reason for jQuery not loading on web pages. """ raise Exception( '''Unable to load jQuery on "%s" due to a possible violation ''' '''of the website's Content Security Policy directive. ''' '''To override this policy, add "--disable-csp" on the ''' '''command-line when running your tests.''' % driver.current_url)
[ "def", "raise_unable_to_load_jquery_exception", "(", "driver", ")", ":", "raise", "Exception", "(", "'''Unable to load jQuery on \"%s\" due to a possible violation '''", "'''of the website's Content Security Policy directive. '''", "'''To override this policy, add \"--disable-csp\" on the '''", "'''command-line when running your tests.'''", "%", "driver", ".", "current_url", ")" ]
[ 124, 0 ]
[ 130, 73 ]
python
en
['en', 'en', 'en']
True
activate_jquery
(driver)
If "jQuery is not defined", use this method to activate it for use. This happens because jQuery is not always defined on web sites.
If "jQuery is not defined", use this method to activate it for use. This happens because jQuery is not always defined on web sites.
def activate_jquery(driver): """ If "jQuery is not defined", use this method to activate it for use. This happens because jQuery is not always defined on web sites. """ try: # Let's first find out if jQuery is already defined. driver.execute_script("jQuery('html')") # Since that command worked, jQuery is defined. Let's return. return except Exception: # jQuery is not currently defined. Let's proceed by defining it. pass jquery_js = constants.JQuery.MIN_JS activate_jquery_script = ( '''var script = document.createElement('script');''' '''script.src = "%s";document.getElementsByTagName('head')[0]''' '''.appendChild(script);''' % jquery_js) driver.execute_script(activate_jquery_script) for x in range(int(settings.MINI_TIMEOUT * 10.0)): # jQuery needs a small amount of time to activate. try: driver.execute_script("jQuery('html')") return except Exception: time.sleep(0.1) # Since jQuery still isn't activating, give up and raise an exception raise_unable_to_load_jquery_exception(driver)
[ "def", "activate_jquery", "(", "driver", ")", ":", "try", ":", "# Let's first find out if jQuery is already defined.", "driver", ".", "execute_script", "(", "\"jQuery('html')\"", ")", "# Since that command worked, jQuery is defined. Let's return.", "return", "except", "Exception", ":", "# jQuery is not currently defined. Let's proceed by defining it.", "pass", "jquery_js", "=", "constants", ".", "JQuery", ".", "MIN_JS", "activate_jquery_script", "=", "(", "'''var script = document.createElement('script');'''", "'''script.src = \"%s\";document.getElementsByTagName('head')[0]'''", "'''.appendChild(script);'''", "%", "jquery_js", ")", "driver", ".", "execute_script", "(", "activate_jquery_script", ")", "for", "x", "in", "range", "(", "int", "(", "settings", ".", "MINI_TIMEOUT", "*", "10.0", ")", ")", ":", "# jQuery needs a small amount of time to activate.", "try", ":", "driver", ".", "execute_script", "(", "\"jQuery('html')\"", ")", "return", "except", "Exception", ":", "time", ".", "sleep", "(", "0.1", ")", "# Since jQuery still isn't activating, give up and raise an exception", "raise_unable_to_load_jquery_exception", "(", "driver", ")" ]
[ 133, 0 ]
[ 158, 49 ]
python
en
['en', 'en', 'en']
True
escape_quotes_if_needed
(string)
re.escape() works differently in Python 3.7.0 than earlier versions: Python 3.6.5: >>> import re >>> re.escape('"') '\\"' Python 3.7.0: >>> import re >>> re.escape('"') '"' SeleniumBase needs quotes to be properly escaped for Javascript calls.
re.escape() works differently in Python 3.7.0 than earlier versions:
def escape_quotes_if_needed(string): """ re.escape() works differently in Python 3.7.0 than earlier versions: Python 3.6.5: >>> import re >>> re.escape('"') '\\"' Python 3.7.0: >>> import re >>> re.escape('"') '"' SeleniumBase needs quotes to be properly escaped for Javascript calls. """ if are_quotes_escaped(string): if string.count("'") != string.count("\\'"): string = string.replace("'", "\\'") if string.count('"') != string.count('\\"'): string = string.replace('"', '\\"') return string
[ "def", "escape_quotes_if_needed", "(", "string", ")", ":", "if", "are_quotes_escaped", "(", "string", ")", ":", "if", "string", ".", "count", "(", "\"'\"", ")", "!=", "string", ".", "count", "(", "\"\\\\'\"", ")", ":", "string", "=", "string", ".", "replace", "(", "\"'\"", ",", "\"\\\\'\"", ")", "if", "string", ".", "count", "(", "'\"'", ")", "!=", "string", ".", "count", "(", "'\\\\\"'", ")", ":", "string", "=", "string", ".", "replace", "(", "'\"'", ",", "'\\\\\"'", ")", "return", "string" ]
[ 168, 0 ]
[ 189, 17 ]
python
en
['en', 'error', 'th']
False
safe_execute_script
(driver, script)
When executing a script that contains a jQuery command, it's important that the jQuery library has been loaded first. This method will load jQuery if it wasn't already loaded.
When executing a script that contains a jQuery command, it's important that the jQuery library has been loaded first. This method will load jQuery if it wasn't already loaded.
def safe_execute_script(driver, script): """ When executing a script that contains a jQuery command, it's important that the jQuery library has been loaded first. This method will load jQuery if it wasn't already loaded. """ try: driver.execute_script(script) except Exception: # The likely reason this fails is because: "jQuery is not defined" activate_jquery(driver) # It's a good thing we can define it here driver.execute_script(script)
[ "def", "safe_execute_script", "(", "driver", ",", "script", ")", ":", "try", ":", "driver", ".", "execute_script", "(", "script", ")", "except", "Exception", ":", "# The likely reason this fails is because: \"jQuery is not defined\"", "activate_jquery", "(", "driver", ")", "# It's a good thing we can define it here", "driver", ".", "execute_script", "(", "script", ")" ]
[ 192, 0 ]
[ 201, 37 ]
python
en
['en', 'en', 'en']
True
post_message
(driver, message, msg_dur, style="info")
A helper method to post a message on the screen with Messenger. (Should only be called from post_message() in base_case.py)
A helper method to post a message on the screen with Messenger. (Should only be called from post_message() in base_case.py)
def post_message(driver, message, msg_dur, style="info"): """ A helper method to post a message on the screen with Messenger. (Should only be called from post_message() in base_case.py) """ if not msg_dur: msg_dur = settings.DEFAULT_MESSAGE_DURATION msg_dur = float(msg_dur) message = re.escape(message) message = escape_quotes_if_needed(message) messenger_script = ('''Messenger().post({message: "%s", type: "%s", ''' '''hideAfter: %s, hideOnNavigate: true});''' % (message, style, msg_dur)) try: driver.execute_script(messenger_script) except Exception: activate_messenger(driver) set_messenger_theme(driver) try: driver.execute_script(messenger_script) except Exception: time.sleep(0.2) activate_messenger(driver) time.sleep(0.2) set_messenger_theme(driver) time.sleep(0.5) driver.execute_script(messenger_script)
[ "def", "post_message", "(", "driver", ",", "message", ",", "msg_dur", ",", "style", "=", "\"info\"", ")", ":", "if", "not", "msg_dur", ":", "msg_dur", "=", "settings", ".", "DEFAULT_MESSAGE_DURATION", "msg_dur", "=", "float", "(", "msg_dur", ")", "message", "=", "re", ".", "escape", "(", "message", ")", "message", "=", "escape_quotes_if_needed", "(", "message", ")", "messenger_script", "=", "(", "'''Messenger().post({message: \"%s\", type: \"%s\", '''", "'''hideAfter: %s, hideOnNavigate: true});'''", "%", "(", "message", ",", "style", ",", "msg_dur", ")", ")", "try", ":", "driver", ".", "execute_script", "(", "messenger_script", ")", "except", "Exception", ":", "activate_messenger", "(", "driver", ")", "set_messenger_theme", "(", "driver", ")", "try", ":", "driver", ".", "execute_script", "(", "messenger_script", ")", "except", "Exception", ":", "time", ".", "sleep", "(", "0.2", ")", "activate_messenger", "(", "driver", ")", "time", ".", "sleep", "(", "0.2", ")", "set_messenger_theme", "(", "driver", ")", "time", ".", "sleep", "(", "0.5", ")", "driver", ".", "execute_script", "(", "messenger_script", ")" ]
[ 550, 0 ]
[ 574, 51 ]
python
en
['en', 'en', 'en']
True
_jq_format
(code)
DEPRECATED - Use re.escape() instead, which performs the intended action. Use before throwing raw code such as 'div[tab="advanced"]' into jQuery. Selectors with quotes inside of quotes would otherwise break jQuery. If you just want to escape quotes, there's escape_quotes_if_needed(). This is similar to "json.dumps(value)", but with one less layer of quotes.
DEPRECATED - Use re.escape() instead, which performs the intended action. Use before throwing raw code such as 'div[tab="advanced"]' into jQuery. Selectors with quotes inside of quotes would otherwise break jQuery. If you just want to escape quotes, there's escape_quotes_if_needed(). This is similar to "json.dumps(value)", but with one less layer of quotes.
def _jq_format(code): """ DEPRECATED - Use re.escape() instead, which performs the intended action. Use before throwing raw code such as 'div[tab="advanced"]' into jQuery. Selectors with quotes inside of quotes would otherwise break jQuery. If you just want to escape quotes, there's escape_quotes_if_needed(). This is similar to "json.dumps(value)", but with one less layer of quotes. """ code = code.replace('\\', '\\\\').replace('\t', '\\t').replace('\n', '\\n') code = code.replace('\"', '\\\"').replace('\'', '\\\'') code = code.replace('\v', '\\v').replace('\a', '\\a').replace('\f', '\\f') code = code.replace('\b', '\\b').replace(r'\u', '\\u').replace('\r', '\\r') return code
[ "def", "_jq_format", "(", "code", ")", ":", "code", "=", "code", ".", "replace", "(", "'\\\\'", ",", "'\\\\\\\\'", ")", ".", "replace", "(", "'\\t'", ",", "'\\\\t'", ")", ".", "replace", "(", "'\\n'", ",", "'\\\\n'", ")", "code", "=", "code", ".", "replace", "(", "'\\\"'", ",", "'\\\\\\\"'", ")", ".", "replace", "(", "'\\''", ",", "'\\\\\\''", ")", "code", "=", "code", ".", "replace", "(", "'\\v'", ",", "'\\\\v'", ")", ".", "replace", "(", "'\\a'", ",", "'\\\\a'", ")", ".", "replace", "(", "'\\f'", ",", "'\\\\f'", ")", "code", "=", "code", ".", "replace", "(", "'\\b'", ",", "'\\\\b'", ")", ".", "replace", "(", "r'\\u'", ",", "'\\\\u'", ")", ".", "replace", "(", "'\\r'", ",", "'\\\\r'", ")", "return", "code" ]
[ 735, 0 ]
[ 747, 15 ]
python
en
['en', 'error', 'th']
False
xarray_sortby_coord
(dataset, coord)
Sort an xarray.Dataset by a coordinate. xarray.Dataset.sortby() sometimes fails, so this is an alternative. Credit to https://stackoverflow.com/a/42600594/5449970.
Sort an xarray.Dataset by a coordinate. xarray.Dataset.sortby() sometimes fails, so this is an alternative. Credit to https://stackoverflow.com/a/42600594/5449970.
def xarray_sortby_coord(dataset, coord): """ Sort an xarray.Dataset by a coordinate. xarray.Dataset.sortby() sometimes fails, so this is an alternative. Credit to https://stackoverflow.com/a/42600594/5449970. """ return dataset.loc[{coord:np.sort(dataset.coords[coord].values)}]
[ "def", "xarray_sortby_coord", "(", "dataset", ",", "coord", ")", ":", "return", "dataset", ".", "loc", "[", "{", "coord", ":", "np", ".", "sort", "(", "dataset", ".", "coords", "[", "coord", "]", ".", "values", ")", "}", "]" ]
[ 2, 0 ]
[ 7, 69 ]
python
en
['en', 'error', 'th']
False
parse_requirements
(filename)
Parse a requirements pip file returning the list of required packages. It exclude commented lines and --find-links directives. Args: filename: pip requirements requirements Returns: list of required package with versions constraints
Parse a requirements pip file returning the list of required packages. It exclude commented lines and --find-links directives.
def parse_requirements(filename): """ Parse a requirements pip file returning the list of required packages. It exclude commented lines and --find-links directives. Args: filename: pip requirements requirements Returns: list of required package with versions constraints """ with open(filename) as file: parsed_requirements = file.read().splitlines() parsed_requirements = [line.strip() for line in parsed_requirements if not ((line.strip()[0] == "#") or line.strip().startswith('--find-links') or ("git+https" in line))] return parsed_requirements
[ "def", "parse_requirements", "(", "filename", ")", ":", "with", "open", "(", "filename", ")", "as", "file", ":", "parsed_requirements", "=", "file", ".", "read", "(", ")", ".", "splitlines", "(", ")", "parsed_requirements", "=", "[", "line", ".", "strip", "(", ")", "for", "line", "in", "parsed_requirements", "if", "not", "(", "(", "line", ".", "strip", "(", ")", "[", "0", "]", "==", "\"#\"", ")", "or", "line", ".", "strip", "(", ")", ".", "startswith", "(", "'--find-links'", ")", "or", "(", "\"git+https\"", "in", "line", ")", ")", "]", "return", "parsed_requirements" ]
[ 5, 0 ]
[ 22, 30 ]
python
en
['en', 'error', 'th']
False
get_dependency_links
(filename)
Parse a requirements pip file looking for the --find-links directive. Args: filename: pip requirements requirements Returns: list of find-links's url
Parse a requirements pip file looking for the --find-links directive. Args: filename: pip requirements requirements
def get_dependency_links(filename): """ Parse a requirements pip file looking for the --find-links directive. Args: filename: pip requirements requirements Returns: list of find-links's url """ with open(filename) as file: parsed_requirements = file.read().splitlines() dependency_links = list() for line in parsed_requirements: line = line.strip() if line.startswith('--find-links'): dependency_links.append(line.split('=')[1]) return dependency_links
[ "def", "get_dependency_links", "(", "filename", ")", ":", "with", "open", "(", "filename", ")", "as", "file", ":", "parsed_requirements", "=", "file", ".", "read", "(", ")", ".", "splitlines", "(", ")", "dependency_links", "=", "list", "(", ")", "for", "line", "in", "parsed_requirements", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "line", ".", "startswith", "(", "'--find-links'", ")", ":", "dependency_links", ".", "append", "(", "line", ".", "split", "(", "'='", ")", "[", "1", "]", ")", "return", "dependency_links" ]
[ 25, 0 ]
[ 41, 27 ]
python
en
['en', 'error', 'th']
False
init
(target_directory, view, usage_stats)
Initialize a new Great Expectations project. This guided input walks the user through setting up a new project and also onboards a new developer in an existing project. It scaffolds directories, sets up notebooks, creates a project file, and appends to a `.gitignore` file.
Initialize a new Great Expectations project.
def init(target_directory, view, usage_stats): """ Initialize a new Great Expectations project. This guided input walks the user through setting up a new project and also onboards a new developer in an existing project. It scaffolds directories, sets up notebooks, creates a project file, and appends to a `.gitignore` file. """ target_directory = os.path.abspath(target_directory) ge_dir = _get_full_path_to_ge_dir(target_directory) cli_message(GREETING) if DataContext.does_config_exist_on_disk(ge_dir): try: if DataContext.is_project_initialized(ge_dir): # Ensure the context can be instantiated cli_message(PROJECT_IS_COMPLETE) except (DataContextError, DatasourceInitializationError) as e: cli_message("<red>{}</red>".format(e.message)) sys.exit(1) try: context = DataContext.create( target_directory, usage_statistics_enabled=usage_stats ) cli_message(ONBOARDING_COMPLETE) # TODO if this is correct, ensure this is covered by a test # cli_message(SETUP_SUCCESS) # exit(0) except DataContextError as e: cli_message("<red>{}</red>".format(e.message)) # TODO ensure this is covered by a test exit(5) else: if not click.confirm(LETS_BEGIN_PROMPT, default=True): cli_message(RUN_INIT_AGAIN) # TODO ensure this is covered by a test exit(0) try: context = DataContext.create( target_directory, usage_statistics_enabled=usage_stats ) toolkit.send_usage_message( data_context=context, event="cli.init.create", success=True ) except DataContextError as e: # TODO ensure this is covered by a test cli_message("<red>{}</red>".format(e)) try: # if expectations exist, offer to build docs context = DataContext(ge_dir) if context.list_expectation_suites(): if click.confirm(BUILD_DOCS_PROMPT, default=True): build_docs(context, view=view) else: datasources = context.list_datasources() if len(datasources) == 0: cli_message(SECTION_SEPARATOR) if not click.confirm( "Would you like to configure a Datasource?", default=True ): cli_message("Okay, bye!") sys.exit(1) datasource_name, data_source_type = add_datasource_impl( context, choose_one_data_asset=False ) if not datasource_name: # no datasource was created sys.exit(1) datasources = context.list_datasources() if len(datasources) == 1: datasource_name = datasources[0]["name"] cli_message(SECTION_SEPARATOR) if not click.confirm( "Would you like to profile new Expectations for a single data asset within your new Datasource?", default=True, ): cli_message( "Okay, exiting now. To learn more about Profilers, run great_expectations profile --help or visit docs.greatexpectations.io!" ) sys.exit(1) ( success, suite_name, profiling_results, ) = toolkit.create_expectation_suite( context, datasource_name=datasource_name, additional_batch_kwargs={"limit": 1000}, flag_build_docs=False, open_docs=False, ) cli_message(SECTION_SEPARATOR) if not click.confirm( "Would you like to build Data Docs?", default=True ): cli_message( "Okay, exiting now. To learn more about Data Docs, run great_expectations docs --help or visit docs.greatexpectations.io!" ) sys.exit(1) build_docs(context, view=False) if not click.confirm( "\nWould you like to view your new Expectations in Data Docs? This will open a new browser window.", default=True, ): cli_message( "Okay, exiting now. You can view the site that has been created in a browser, or visit docs.greatexpectations.io for more information!" ) sys.exit(1) toolkit.attempt_to_open_validation_results_in_data_docs( context, profiling_results ) cli_message(SECTION_SEPARATOR) cli_message(SETUP_SUCCESS) sys.exit(0) except ( DataContextError, ge_exceptions.ProfilerError, OSError, SQLAlchemyError, ) as e: cli_message("<red>{}</red>".format(e)) sys.exit(1)
[ "def", "init", "(", "target_directory", ",", "view", ",", "usage_stats", ")", ":", "target_directory", "=", "os", ".", "path", ".", "abspath", "(", "target_directory", ")", "ge_dir", "=", "_get_full_path_to_ge_dir", "(", "target_directory", ")", "cli_message", "(", "GREETING", ")", "if", "DataContext", ".", "does_config_exist_on_disk", "(", "ge_dir", ")", ":", "try", ":", "if", "DataContext", ".", "is_project_initialized", "(", "ge_dir", ")", ":", "# Ensure the context can be instantiated", "cli_message", "(", "PROJECT_IS_COMPLETE", ")", "except", "(", "DataContextError", ",", "DatasourceInitializationError", ")", "as", "e", ":", "cli_message", "(", "\"<red>{}</red>\"", ".", "format", "(", "e", ".", "message", ")", ")", "sys", ".", "exit", "(", "1", ")", "try", ":", "context", "=", "DataContext", ".", "create", "(", "target_directory", ",", "usage_statistics_enabled", "=", "usage_stats", ")", "cli_message", "(", "ONBOARDING_COMPLETE", ")", "# TODO if this is correct, ensure this is covered by a test", "# cli_message(SETUP_SUCCESS)", "# exit(0)", "except", "DataContextError", "as", "e", ":", "cli_message", "(", "\"<red>{}</red>\"", ".", "format", "(", "e", ".", "message", ")", ")", "# TODO ensure this is covered by a test", "exit", "(", "5", ")", "else", ":", "if", "not", "click", ".", "confirm", "(", "LETS_BEGIN_PROMPT", ",", "default", "=", "True", ")", ":", "cli_message", "(", "RUN_INIT_AGAIN", ")", "# TODO ensure this is covered by a test", "exit", "(", "0", ")", "try", ":", "context", "=", "DataContext", ".", "create", "(", "target_directory", ",", "usage_statistics_enabled", "=", "usage_stats", ")", "toolkit", ".", "send_usage_message", "(", "data_context", "=", "context", ",", "event", "=", "\"cli.init.create\"", ",", "success", "=", "True", ")", "except", "DataContextError", "as", "e", ":", "# TODO ensure this is covered by a test", "cli_message", "(", "\"<red>{}</red>\"", ".", "format", "(", "e", ")", ")", "try", ":", "# if expectations exist, offer to build docs", "context", "=", "DataContext", "(", "ge_dir", ")", "if", "context", ".", "list_expectation_suites", "(", ")", ":", "if", "click", ".", "confirm", "(", "BUILD_DOCS_PROMPT", ",", "default", "=", "True", ")", ":", "build_docs", "(", "context", ",", "view", "=", "view", ")", "else", ":", "datasources", "=", "context", ".", "list_datasources", "(", ")", "if", "len", "(", "datasources", ")", "==", "0", ":", "cli_message", "(", "SECTION_SEPARATOR", ")", "if", "not", "click", ".", "confirm", "(", "\"Would you like to configure a Datasource?\"", ",", "default", "=", "True", ")", ":", "cli_message", "(", "\"Okay, bye!\"", ")", "sys", ".", "exit", "(", "1", ")", "datasource_name", ",", "data_source_type", "=", "add_datasource_impl", "(", "context", ",", "choose_one_data_asset", "=", "False", ")", "if", "not", "datasource_name", ":", "# no datasource was created", "sys", ".", "exit", "(", "1", ")", "datasources", "=", "context", ".", "list_datasources", "(", ")", "if", "len", "(", "datasources", ")", "==", "1", ":", "datasource_name", "=", "datasources", "[", "0", "]", "[", "\"name\"", "]", "cli_message", "(", "SECTION_SEPARATOR", ")", "if", "not", "click", ".", "confirm", "(", "\"Would you like to profile new Expectations for a single data asset within your new Datasource?\"", ",", "default", "=", "True", ",", ")", ":", "cli_message", "(", "\"Okay, exiting now. To learn more about Profilers, run great_expectations profile --help or visit docs.greatexpectations.io!\"", ")", "sys", ".", "exit", "(", "1", ")", "(", "success", ",", "suite_name", ",", "profiling_results", ",", ")", "=", "toolkit", ".", "create_expectation_suite", "(", "context", ",", "datasource_name", "=", "datasource_name", ",", "additional_batch_kwargs", "=", "{", "\"limit\"", ":", "1000", "}", ",", "flag_build_docs", "=", "False", ",", "open_docs", "=", "False", ",", ")", "cli_message", "(", "SECTION_SEPARATOR", ")", "if", "not", "click", ".", "confirm", "(", "\"Would you like to build Data Docs?\"", ",", "default", "=", "True", ")", ":", "cli_message", "(", "\"Okay, exiting now. To learn more about Data Docs, run great_expectations docs --help or visit docs.greatexpectations.io!\"", ")", "sys", ".", "exit", "(", "1", ")", "build_docs", "(", "context", ",", "view", "=", "False", ")", "if", "not", "click", ".", "confirm", "(", "\"\\nWould you like to view your new Expectations in Data Docs? This will open a new browser window.\"", ",", "default", "=", "True", ",", ")", ":", "cli_message", "(", "\"Okay, exiting now. You can view the site that has been created in a browser, or visit docs.greatexpectations.io for more information!\"", ")", "sys", ".", "exit", "(", "1", ")", "toolkit", ".", "attempt_to_open_validation_results_in_data_docs", "(", "context", ",", "profiling_results", ")", "cli_message", "(", "SECTION_SEPARATOR", ")", "cli_message", "(", "SETUP_SUCCESS", ")", "sys", ".", "exit", "(", "0", ")", "except", "(", "DataContextError", ",", "ge_exceptions", ".", "ProfilerError", ",", "OSError", ",", "SQLAlchemyError", ",", ")", "as", "e", ":", "cli_message", "(", "\"<red>{}</red>\"", ".", "format", "(", "e", ")", ")", "sys", ".", "exit", "(", "1", ")" ]
[ 58, 0 ]
[ 191, 19 ]
python
en
['en', 'error', 'th']
False
LinearInversionDirectApp.set_G
(self, N=20, M=100, p=-0.25, q=0.25, j1=1, jn=60)
Parameters ---------- N: # of data M: # of model parameters ...
Parameters ---------- N: # of data M: # of model parameters ...
def set_G(self, N=20, M=100, p=-0.25, q=0.25, j1=1, jn=60): """ Parameters ---------- N: # of data M: # of model parameters ... """ self.N = N self.M = M self._mesh = TensorMesh([M]) jk = np.linspace(j1, jn, N) self._G = np.zeros((N, self.mesh.nC), dtype=float, order="C") def g(k): return np.exp(p * jk[k] * self.mesh.vectorCCx) * np.cos( np.pi * q * jk[k] * self.mesh.vectorCCx ) for i in range(N): self._G[i, :] = g(i) * self.mesh.hx self._jk = jk
[ "def", "set_G", "(", "self", ",", "N", "=", "20", ",", "M", "=", "100", ",", "p", "=", "-", "0.25", ",", "q", "=", "0.25", ",", "j1", "=", "1", ",", "jn", "=", "60", ")", ":", "self", ".", "N", "=", "N", "self", ".", "M", "=", "M", "self", ".", "_mesh", "=", "TensorMesh", "(", "[", "M", "]", ")", "jk", "=", "np", ".", "linspace", "(", "j1", ",", "jn", ",", "N", ")", "self", ".", "_G", "=", "np", ".", "zeros", "(", "(", "N", ",", "self", ".", "mesh", ".", "nC", ")", ",", "dtype", "=", "float", ",", "order", "=", "\"C\"", ")", "def", "g", "(", "k", ")", ":", "return", "np", ".", "exp", "(", "p", "*", "jk", "[", "k", "]", "*", "self", ".", "mesh", ".", "vectorCCx", ")", "*", "np", ".", "cos", "(", "np", ".", "pi", "*", "q", "*", "jk", "[", "k", "]", "*", "self", ".", "mesh", ".", "vectorCCx", ")", "for", "i", "in", "range", "(", "N", ")", ":", "self", ".", "_G", "[", "i", ",", ":", "]", "=", "g", "(", "i", ")", "*", "self", ".", "mesh", ".", "hx", "self", ".", "_jk", "=", "jk" ]
[ 81, 4 ]
[ 103, 21 ]
python
en
['en', 'error', 'th']
False
MetaSqlAlchemyDataset.column_map_expectation
(cls, func)
For SqlAlchemy, this decorator allows individual column_map_expectations to simply return the filter that describes the expected condition on their data. The decorator will then use that filter to obtain unexpected elements, relevant counts, and return the formatted object.
For SqlAlchemy, this decorator allows individual column_map_expectations to simply return the filter that describes the expected condition on their data.
def column_map_expectation(cls, func): """For SqlAlchemy, this decorator allows individual column_map_expectations to simply return the filter that describes the expected condition on their data. The decorator will then use that filter to obtain unexpected elements, relevant counts, and return the formatted object. """ argspec = inspect.getfullargspec(func)[0][1:] @cls.expectation(argspec) @wraps(func) def inner_wrapper( self, column, mostly=None, result_format=None, *args, **kwargs ): if self.batch_kwargs.get("use_quoted_name"): column = quoted_name(column, quote=True) if result_format is None: result_format = self.default_expectation_args["result_format"] result_format = parse_result_format(result_format) if result_format["result_format"] == "COMPLETE": warnings.warn( "Setting result format to COMPLETE for a SqlAlchemyDataset can be dangerous because it will not limit the number of returned results." ) unexpected_count_limit = None else: unexpected_count_limit = result_format["partial_unexpected_count"] expected_condition: BinaryExpression = func(self, column, *args, **kwargs) # Added to prepare for when an ignore_values argument is added to the expectation ignore_values: list = [None] if func.__name__ in [ "expect_column_values_to_not_be_null", "expect_column_values_to_be_null", ]: ignore_values = [] # Counting the number of unexpected values can be expensive when there is a large # number of np.nan values. # This only happens on expect_column_values_to_not_be_null expectations. # Since there is no reason to look for most common unexpected values in this case, # we will instruct the result formatting method to skip this step. result_format["partial_unexpected_count"] = 0 ignore_values_conditions: List[BinaryExpression] = [] if ( len(ignore_values) > 0 and None not in ignore_values or len(ignore_values) > 1 and None in ignore_values ): ignore_values_conditions += [ sa.column(column).in_( [val for val in ignore_values if val is not None] ) ] if None in ignore_values: ignore_values_conditions += [sa.column(column).is_(None)] ignore_values_condition: BinaryExpression if len(ignore_values_conditions) > 1: ignore_values_condition = sa.or_(*ignore_values_conditions) elif len(ignore_values_conditions) == 1: ignore_values_condition = ignore_values_conditions[0] else: ignore_values_condition = BinaryExpression( sa.literal(False), sa.literal(True), custom_op("=") ) count_query: Select if self.sql_engine_dialect.name.lower() == "mssql": count_query = self._get_count_query_mssql( expected_condition=expected_condition, ignore_values_condition=ignore_values_condition, ) else: count_query = self._get_count_query_generic_sqlalchemy( expected_condition=expected_condition, ignore_values_condition=ignore_values_condition, ) count_results: dict = dict(self.engine.execute(count_query).fetchone()) # Handle case of empty table gracefully: if ( "element_count" not in count_results or count_results["element_count"] is None ): count_results["element_count"] = 0 if "null_count" not in count_results or count_results["null_count"] is None: count_results["null_count"] = 0 if ( "unexpected_count" not in count_results or count_results["unexpected_count"] is None ): count_results["unexpected_count"] = 0 # Some engines may return Decimal from count queries (lookin' at you MSSQL) # Convert to integers count_results["element_count"] = int(count_results["element_count"]) count_results["null_count"] = int(count_results["null_count"]) count_results["unexpected_count"] = int(count_results["unexpected_count"]) # limit doesn't compile properly for oracle so we will append rownum to query string later if self.engine.dialect.name.lower() == "oracle": raw_query = ( sa.select([sa.column(column)]) .select_from(self._table) .where( sa.and_( sa.not_(expected_condition), sa.not_(ignore_values_condition), ) ) ) query = str( raw_query.compile( self.engine, compile_kwargs={"literal_binds": True} ) ) query += "\nAND ROWNUM <= %d" % unexpected_count_limit else: query = ( sa.select([sa.column(column)]) .select_from(self._table) .where( sa.and_( sa.not_(expected_condition), sa.not_(ignore_values_condition), ) ) .limit(unexpected_count_limit) ) unexpected_query_results = self.engine.execute(query) nonnull_count: int = ( count_results["element_count"] - count_results["null_count"] ) if "output_strftime_format" in kwargs: output_strftime_format = kwargs["output_strftime_format"] maybe_limited_unexpected_list = [] for x in unexpected_query_results.fetchall(): if isinstance(x[column], str): col = parse(x[column]) else: col = x[column] maybe_limited_unexpected_list.append( datetime.strftime(col, output_strftime_format) ) else: maybe_limited_unexpected_list = [ x[column] for x in unexpected_query_results.fetchall() ] success_count = nonnull_count - count_results["unexpected_count"] success, percent_success = self._calc_map_expectation_success( success_count, nonnull_count, mostly ) return_obj = self._format_map_output( result_format, success, count_results["element_count"], nonnull_count, count_results["unexpected_count"], maybe_limited_unexpected_list, None, ) if func.__name__ in [ "expect_column_values_to_not_be_null", "expect_column_values_to_be_null", ]: # These results are unnecessary for the above expectations del return_obj["result"]["unexpected_percent_nonmissing"] del return_obj["result"]["missing_count"] del return_obj["result"]["missing_percent"] try: del return_obj["result"]["partial_unexpected_counts"] del return_obj["result"]["partial_unexpected_list"] except KeyError: pass return return_obj inner_wrapper.__name__ = func.__name__ inner_wrapper.__doc__ = func.__doc__ return inner_wrapper
[ "def", "column_map_expectation", "(", "cls", ",", "func", ")", ":", "argspec", "=", "inspect", ".", "getfullargspec", "(", "func", ")", "[", "0", "]", "[", "1", ":", "]", "@", "cls", ".", "expectation", "(", "argspec", ")", "@", "wraps", "(", "func", ")", "def", "inner_wrapper", "(", "self", ",", "column", ",", "mostly", "=", "None", ",", "result_format", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "batch_kwargs", ".", "get", "(", "\"use_quoted_name\"", ")", ":", "column", "=", "quoted_name", "(", "column", ",", "quote", "=", "True", ")", "if", "result_format", "is", "None", ":", "result_format", "=", "self", ".", "default_expectation_args", "[", "\"result_format\"", "]", "result_format", "=", "parse_result_format", "(", "result_format", ")", "if", "result_format", "[", "\"result_format\"", "]", "==", "\"COMPLETE\"", ":", "warnings", ".", "warn", "(", "\"Setting result format to COMPLETE for a SqlAlchemyDataset can be dangerous because it will not limit the number of returned results.\"", ")", "unexpected_count_limit", "=", "None", "else", ":", "unexpected_count_limit", "=", "result_format", "[", "\"partial_unexpected_count\"", "]", "expected_condition", ":", "BinaryExpression", "=", "func", "(", "self", ",", "column", ",", "*", "args", ",", "*", "*", "kwargs", ")", "# Added to prepare for when an ignore_values argument is added to the expectation", "ignore_values", ":", "list", "=", "[", "None", "]", "if", "func", ".", "__name__", "in", "[", "\"expect_column_values_to_not_be_null\"", ",", "\"expect_column_values_to_be_null\"", ",", "]", ":", "ignore_values", "=", "[", "]", "# Counting the number of unexpected values can be expensive when there is a large", "# number of np.nan values.", "# This only happens on expect_column_values_to_not_be_null expectations.", "# Since there is no reason to look for most common unexpected values in this case,", "# we will instruct the result formatting method to skip this step.", "result_format", "[", "\"partial_unexpected_count\"", "]", "=", "0", "ignore_values_conditions", ":", "List", "[", "BinaryExpression", "]", "=", "[", "]", "if", "(", "len", "(", "ignore_values", ")", ">", "0", "and", "None", "not", "in", "ignore_values", "or", "len", "(", "ignore_values", ")", ">", "1", "and", "None", "in", "ignore_values", ")", ":", "ignore_values_conditions", "+=", "[", "sa", ".", "column", "(", "column", ")", ".", "in_", "(", "[", "val", "for", "val", "in", "ignore_values", "if", "val", "is", "not", "None", "]", ")", "]", "if", "None", "in", "ignore_values", ":", "ignore_values_conditions", "+=", "[", "sa", ".", "column", "(", "column", ")", ".", "is_", "(", "None", ")", "]", "ignore_values_condition", ":", "BinaryExpression", "if", "len", "(", "ignore_values_conditions", ")", ">", "1", ":", "ignore_values_condition", "=", "sa", ".", "or_", "(", "*", "ignore_values_conditions", ")", "elif", "len", "(", "ignore_values_conditions", ")", "==", "1", ":", "ignore_values_condition", "=", "ignore_values_conditions", "[", "0", "]", "else", ":", "ignore_values_condition", "=", "BinaryExpression", "(", "sa", ".", "literal", "(", "False", ")", ",", "sa", ".", "literal", "(", "True", ")", ",", "custom_op", "(", "\"=\"", ")", ")", "count_query", ":", "Select", "if", "self", ".", "sql_engine_dialect", ".", "name", ".", "lower", "(", ")", "==", "\"mssql\"", ":", "count_query", "=", "self", ".", "_get_count_query_mssql", "(", "expected_condition", "=", "expected_condition", ",", "ignore_values_condition", "=", "ignore_values_condition", ",", ")", "else", ":", "count_query", "=", "self", ".", "_get_count_query_generic_sqlalchemy", "(", "expected_condition", "=", "expected_condition", ",", "ignore_values_condition", "=", "ignore_values_condition", ",", ")", "count_results", ":", "dict", "=", "dict", "(", "self", ".", "engine", ".", "execute", "(", "count_query", ")", ".", "fetchone", "(", ")", ")", "# Handle case of empty table gracefully:", "if", "(", "\"element_count\"", "not", "in", "count_results", "or", "count_results", "[", "\"element_count\"", "]", "is", "None", ")", ":", "count_results", "[", "\"element_count\"", "]", "=", "0", "if", "\"null_count\"", "not", "in", "count_results", "or", "count_results", "[", "\"null_count\"", "]", "is", "None", ":", "count_results", "[", "\"null_count\"", "]", "=", "0", "if", "(", "\"unexpected_count\"", "not", "in", "count_results", "or", "count_results", "[", "\"unexpected_count\"", "]", "is", "None", ")", ":", "count_results", "[", "\"unexpected_count\"", "]", "=", "0", "# Some engines may return Decimal from count queries (lookin' at you MSSQL)", "# Convert to integers", "count_results", "[", "\"element_count\"", "]", "=", "int", "(", "count_results", "[", "\"element_count\"", "]", ")", "count_results", "[", "\"null_count\"", "]", "=", "int", "(", "count_results", "[", "\"null_count\"", "]", ")", "count_results", "[", "\"unexpected_count\"", "]", "=", "int", "(", "count_results", "[", "\"unexpected_count\"", "]", ")", "# limit doesn't compile properly for oracle so we will append rownum to query string later", "if", "self", ".", "engine", ".", "dialect", ".", "name", ".", "lower", "(", ")", "==", "\"oracle\"", ":", "raw_query", "=", "(", "sa", ".", "select", "(", "[", "sa", ".", "column", "(", "column", ")", "]", ")", ".", "select_from", "(", "self", ".", "_table", ")", ".", "where", "(", "sa", ".", "and_", "(", "sa", ".", "not_", "(", "expected_condition", ")", ",", "sa", ".", "not_", "(", "ignore_values_condition", ")", ",", ")", ")", ")", "query", "=", "str", "(", "raw_query", ".", "compile", "(", "self", ".", "engine", ",", "compile_kwargs", "=", "{", "\"literal_binds\"", ":", "True", "}", ")", ")", "query", "+=", "\"\\nAND ROWNUM <= %d\"", "%", "unexpected_count_limit", "else", ":", "query", "=", "(", "sa", ".", "select", "(", "[", "sa", ".", "column", "(", "column", ")", "]", ")", ".", "select_from", "(", "self", ".", "_table", ")", ".", "where", "(", "sa", ".", "and_", "(", "sa", ".", "not_", "(", "expected_condition", ")", ",", "sa", ".", "not_", "(", "ignore_values_condition", ")", ",", ")", ")", ".", "limit", "(", "unexpected_count_limit", ")", ")", "unexpected_query_results", "=", "self", ".", "engine", ".", "execute", "(", "query", ")", "nonnull_count", ":", "int", "=", "(", "count_results", "[", "\"element_count\"", "]", "-", "count_results", "[", "\"null_count\"", "]", ")", "if", "\"output_strftime_format\"", "in", "kwargs", ":", "output_strftime_format", "=", "kwargs", "[", "\"output_strftime_format\"", "]", "maybe_limited_unexpected_list", "=", "[", "]", "for", "x", "in", "unexpected_query_results", ".", "fetchall", "(", ")", ":", "if", "isinstance", "(", "x", "[", "column", "]", ",", "str", ")", ":", "col", "=", "parse", "(", "x", "[", "column", "]", ")", "else", ":", "col", "=", "x", "[", "column", "]", "maybe_limited_unexpected_list", ".", "append", "(", "datetime", ".", "strftime", "(", "col", ",", "output_strftime_format", ")", ")", "else", ":", "maybe_limited_unexpected_list", "=", "[", "x", "[", "column", "]", "for", "x", "in", "unexpected_query_results", ".", "fetchall", "(", ")", "]", "success_count", "=", "nonnull_count", "-", "count_results", "[", "\"unexpected_count\"", "]", "success", ",", "percent_success", "=", "self", ".", "_calc_map_expectation_success", "(", "success_count", ",", "nonnull_count", ",", "mostly", ")", "return_obj", "=", "self", ".", "_format_map_output", "(", "result_format", ",", "success", ",", "count_results", "[", "\"element_count\"", "]", ",", "nonnull_count", ",", "count_results", "[", "\"unexpected_count\"", "]", ",", "maybe_limited_unexpected_list", ",", "None", ",", ")", "if", "func", ".", "__name__", "in", "[", "\"expect_column_values_to_not_be_null\"", ",", "\"expect_column_values_to_be_null\"", ",", "]", ":", "# These results are unnecessary for the above expectations", "del", "return_obj", "[", "\"result\"", "]", "[", "\"unexpected_percent_nonmissing\"", "]", "del", "return_obj", "[", "\"result\"", "]", "[", "\"missing_count\"", "]", "del", "return_obj", "[", "\"result\"", "]", "[", "\"missing_percent\"", "]", "try", ":", "del", "return_obj", "[", "\"result\"", "]", "[", "\"partial_unexpected_counts\"", "]", "del", "return_obj", "[", "\"result\"", "]", "[", "\"partial_unexpected_list\"", "]", "except", "KeyError", ":", "pass", "return", "return_obj", "inner_wrapper", ".", "__name__", "=", "func", ".", "__name__", "inner_wrapper", ".", "__doc__", "=", "func", ".", "__doc__", "return", "inner_wrapper" ]
[ 169, 4 ]
[ 359, 28 ]
python
en
['en', 'en', 'en']
True
SqlAlchemyDataset.head
(self, n=5)
Returns a *PandasDataset* with the first *n* rows of the given Dataset
Returns a *PandasDataset* with the first *n* rows of the given Dataset
def head(self, n=5): """Returns a *PandasDataset* with the first *n* rows of the given Dataset""" try: df = next( pd.read_sql_table( table_name=self._table.name, schema=self._table.schema, con=self.engine, chunksize=n, ) ) except (ValueError, NotImplementedError): # it looks like MetaData that is used by pd.read_sql_table # cannot work on a temp table. # If it fails, we are trying to get the data using read_sql head_sql_str = "select * from " if self._table.schema and self.engine.dialect.name.lower() != "bigquery": head_sql_str += self._table.schema + "." + self._table.name elif self.engine.dialect.name.lower() == "bigquery": head_sql_str += "`" + self._table.name + "`" else: head_sql_str += self._table.name head_sql_str += " limit {:d}".format(n) # Limit is unknown in mssql! Use top instead! if self.engine.dialect.name.lower() == "mssql": head_sql_str = "select top({n}) * from {table}".format( n=n, table=self._table.name ) # Limit doesn't work in oracle either if self.engine.dialect.name.lower() == "oracle": head_sql_str = "select * from {table} WHERE ROWNUM <= {n}".format( table=self._table.name, n=n ) df = pd.read_sql(head_sql_str, con=self.engine) except StopIteration: df = pd.DataFrame(columns=self.get_table_columns()) return PandasDataset( df, expectation_suite=self.get_expectation_suite( discard_failed_expectations=False, discard_result_format_kwargs=False, discard_catch_exceptions_kwargs=False, discard_include_config_kwargs=False, ), )
[ "def", "head", "(", "self", ",", "n", "=", "5", ")", ":", "try", ":", "df", "=", "next", "(", "pd", ".", "read_sql_table", "(", "table_name", "=", "self", ".", "_table", ".", "name", ",", "schema", "=", "self", ".", "_table", ".", "schema", ",", "con", "=", "self", ".", "engine", ",", "chunksize", "=", "n", ",", ")", ")", "except", "(", "ValueError", ",", "NotImplementedError", ")", ":", "# it looks like MetaData that is used by pd.read_sql_table", "# cannot work on a temp table.", "# If it fails, we are trying to get the data using read_sql", "head_sql_str", "=", "\"select * from \"", "if", "self", ".", "_table", ".", "schema", "and", "self", ".", "engine", ".", "dialect", ".", "name", ".", "lower", "(", ")", "!=", "\"bigquery\"", ":", "head_sql_str", "+=", "self", ".", "_table", ".", "schema", "+", "\".\"", "+", "self", ".", "_table", ".", "name", "elif", "self", ".", "engine", ".", "dialect", ".", "name", ".", "lower", "(", ")", "==", "\"bigquery\"", ":", "head_sql_str", "+=", "\"`\"", "+", "self", ".", "_table", ".", "name", "+", "\"`\"", "else", ":", "head_sql_str", "+=", "self", ".", "_table", ".", "name", "head_sql_str", "+=", "\" limit {:d}\"", ".", "format", "(", "n", ")", "# Limit is unknown in mssql! Use top instead!", "if", "self", ".", "engine", ".", "dialect", ".", "name", ".", "lower", "(", ")", "==", "\"mssql\"", ":", "head_sql_str", "=", "\"select top({n}) * from {table}\"", ".", "format", "(", "n", "=", "n", ",", "table", "=", "self", ".", "_table", ".", "name", ")", "# Limit doesn't work in oracle either", "if", "self", ".", "engine", ".", "dialect", ".", "name", ".", "lower", "(", ")", "==", "\"oracle\"", ":", "head_sql_str", "=", "\"select * from {table} WHERE ROWNUM <= {n}\"", ".", "format", "(", "table", "=", "self", ".", "_table", ".", "name", ",", "n", "=", "n", ")", "df", "=", "pd", ".", "read_sql", "(", "head_sql_str", ",", "con", "=", "self", ".", "engine", ")", "except", "StopIteration", ":", "df", "=", "pd", ".", "DataFrame", "(", "columns", "=", "self", ".", "get_table_columns", "(", ")", ")", "return", "PandasDataset", "(", "df", ",", "expectation_suite", "=", "self", ".", "get_expectation_suite", "(", "discard_failed_expectations", "=", "False", ",", "discard_result_format_kwargs", "=", "False", ",", "discard_catch_exceptions_kwargs", "=", "False", ",", "discard_include_config_kwargs", "=", "False", ",", ")", ",", ")" ]
[ 652, 4 ]
[ 701, 9 ]
python
en
['en', 'en', 'en']
True
SqlAlchemyDataset.get_column_hist
(self, column, bins)
return a list of counts corresponding to bins Args: column: the name of the column for which to get the histogram bins: tuple of bin edges for which to get histogram values; *must* be tuple to support caching
return a list of counts corresponding to bins
def get_column_hist(self, column, bins): """return a list of counts corresponding to bins Args: column: the name of the column for which to get the histogram bins: tuple of bin edges for which to get histogram values; *must* be tuple to support caching """ case_conditions = [] idx = 0 bins = list(bins) # If we have an infinite lower bound, don't express that in sql if ( bins[0] == get_sql_dialect_floating_point_infinity_value( schema="api_np", negative=True ) ) or ( bins[0] == get_sql_dialect_floating_point_infinity_value( schema="api_cast", negative=True ) ): case_conditions.append( sa.func.sum( sa.case([(sa.column(column) < bins[idx + 1], 1)], else_=0) ).label("bin_" + str(idx)) ) idx += 1 for idx in range(idx, len(bins) - 2): case_conditions.append( sa.func.sum( sa.case( [ ( sa.and_( sa.column(column) >= bins[idx], sa.column(column) < bins[idx + 1], ), 1, ) ], else_=0, ) ).label("bin_" + str(idx)) ) if ( bins[-1] == get_sql_dialect_floating_point_infinity_value( schema="api_np", negative=False ) ) or ( bins[-1] == get_sql_dialect_floating_point_infinity_value( schema="api_cast", negative=False ) ): case_conditions.append( sa.func.sum( sa.case([(sa.column(column) >= bins[-2], 1)], else_=0) ).label("bin_" + str(len(bins) - 1)) ) else: case_conditions.append( sa.func.sum( sa.case( [ ( sa.and_( sa.column(column) >= bins[-2], sa.column(column) <= bins[-1], ), 1, ) ], else_=0, ) ).label("bin_" + str(len(bins) - 1)) ) query = ( sa.select(case_conditions) .where( sa.column(column) != None, ) .select_from(self._table) ) # Run the data through convert_to_json_serializable to ensure we do not have Decimal types hist = convert_to_json_serializable(list(self.engine.execute(query).fetchone())) return hist
[ "def", "get_column_hist", "(", "self", ",", "column", ",", "bins", ")", ":", "case_conditions", "=", "[", "]", "idx", "=", "0", "bins", "=", "list", "(", "bins", ")", "# If we have an infinite lower bound, don't express that in sql", "if", "(", "bins", "[", "0", "]", "==", "get_sql_dialect_floating_point_infinity_value", "(", "schema", "=", "\"api_np\"", ",", "negative", "=", "True", ")", ")", "or", "(", "bins", "[", "0", "]", "==", "get_sql_dialect_floating_point_infinity_value", "(", "schema", "=", "\"api_cast\"", ",", "negative", "=", "True", ")", ")", ":", "case_conditions", ".", "append", "(", "sa", ".", "func", ".", "sum", "(", "sa", ".", "case", "(", "[", "(", "sa", ".", "column", "(", "column", ")", "<", "bins", "[", "idx", "+", "1", "]", ",", "1", ")", "]", ",", "else_", "=", "0", ")", ")", ".", "label", "(", "\"bin_\"", "+", "str", "(", "idx", ")", ")", ")", "idx", "+=", "1", "for", "idx", "in", "range", "(", "idx", ",", "len", "(", "bins", ")", "-", "2", ")", ":", "case_conditions", ".", "append", "(", "sa", ".", "func", ".", "sum", "(", "sa", ".", "case", "(", "[", "(", "sa", ".", "and_", "(", "sa", ".", "column", "(", "column", ")", ">=", "bins", "[", "idx", "]", ",", "sa", ".", "column", "(", "column", ")", "<", "bins", "[", "idx", "+", "1", "]", ",", ")", ",", "1", ",", ")", "]", ",", "else_", "=", "0", ",", ")", ")", ".", "label", "(", "\"bin_\"", "+", "str", "(", "idx", ")", ")", ")", "if", "(", "bins", "[", "-", "1", "]", "==", "get_sql_dialect_floating_point_infinity_value", "(", "schema", "=", "\"api_np\"", ",", "negative", "=", "False", ")", ")", "or", "(", "bins", "[", "-", "1", "]", "==", "get_sql_dialect_floating_point_infinity_value", "(", "schema", "=", "\"api_cast\"", ",", "negative", "=", "False", ")", ")", ":", "case_conditions", ".", "append", "(", "sa", ".", "func", ".", "sum", "(", "sa", ".", "case", "(", "[", "(", "sa", ".", "column", "(", "column", ")", ">=", "bins", "[", "-", "2", "]", ",", "1", ")", "]", ",", "else_", "=", "0", ")", ")", ".", "label", "(", "\"bin_\"", "+", "str", "(", "len", "(", "bins", ")", "-", "1", ")", ")", ")", "else", ":", "case_conditions", ".", "append", "(", "sa", ".", "func", ".", "sum", "(", "sa", ".", "case", "(", "[", "(", "sa", ".", "and_", "(", "sa", ".", "column", "(", "column", ")", ">=", "bins", "[", "-", "2", "]", ",", "sa", ".", "column", "(", "column", ")", "<=", "bins", "[", "-", "1", "]", ",", ")", ",", "1", ",", ")", "]", ",", "else_", "=", "0", ",", ")", ")", ".", "label", "(", "\"bin_\"", "+", "str", "(", "len", "(", "bins", ")", "-", "1", ")", ")", ")", "query", "=", "(", "sa", ".", "select", "(", "case_conditions", ")", ".", "where", "(", "sa", ".", "column", "(", "column", ")", "!=", "None", ",", ")", ".", "select_from", "(", "self", ".", "_table", ")", ")", "# Run the data through convert_to_json_serializable to ensure we do not have Decimal types", "hist", "=", "convert_to_json_serializable", "(", "list", "(", "self", ".", "engine", ".", "execute", "(", "query", ")", ".", "fetchone", "(", ")", ")", ")", "return", "hist" ]
[ 1071, 4 ]
[ 1163, 19 ]
python
en
['en', 'en', 'en']
True
SqlAlchemyDataset.create_temporary_table
(self, table_name, custom_sql, schema_name=None)
Create Temporary table based on sql query. This will be used as a basis for executing expectations. WARNING: this feature is new in v0.4. It hasn't been tested in all SQL dialects, and may change based on community feedback. :param custom_sql:
Create Temporary table based on sql query. This will be used as a basis for executing expectations. WARNING: this feature is new in v0.4. It hasn't been tested in all SQL dialects, and may change based on community feedback. :param custom_sql:
def create_temporary_table(self, table_name, custom_sql, schema_name=None): """ Create Temporary table based on sql query. This will be used as a basis for executing expectations. WARNING: this feature is new in v0.4. It hasn't been tested in all SQL dialects, and may change based on community feedback. :param custom_sql: """ ### # NOTE: 20200310 - The update to support snowflake transient table creation revealed several # import cases that are not fully handled. # The snowflake-related change updated behavior to allow both custom_sql and schema to be specified. But # the underlying incomplete handling of schema remains. # # Several cases we need to consider: # # 1. Distributed backends (e.g. Snowflake and BigQuery) often use a `<database>.<schema>.<table>` # syntax, but currently we are biased towards only allowing schema.table # # 2. In the wild, we see people using several ways to declare the schema they want to use: # a. In the connection string, the original RFC only specifies database, but schema is supported by some # backends (Snowflake) as a query parameter. # b. As a default for a user (the equivalent of USE SCHEMA being provided at the beginning of a session) # c. As part of individual queries. # # 3. We currently don't make it possible to select from a table in one query, but create a temporary table in # another schema, except for with BigQuery and (now) snowflake, where you can specify the table name (and # potentially triple of database, schema, table) in the batch_kwargs. # # The SqlAlchemyDataset interface essentially predates the batch_kwargs concept and so part of what's going # on, I think, is a mismatch between those. I think we should rename custom_sql -> "temp_table_query" or # similar, for example. ### engine_dialect = self.sql_engine_dialect.name.lower() # handle cases where dialect.name.lower() returns a byte string (e.g. databricks) if isinstance(engine_dialect, bytes): engine_dialect = str(engine_dialect, "utf-8") if engine_dialect == "bigquery": stmt = "CREATE OR REPLACE VIEW `{table_name}` AS {custom_sql}".format( table_name=table_name, custom_sql=custom_sql ) elif engine_dialect == "databricks": stmt = "CREATE OR REPLACE VIEW `{table_name}` AS {custom_sql}".format( table_name=table_name, custom_sql=custom_sql ) elif engine_dialect == "snowflake": table_type = "TEMPORARY" if self.generated_table_name else "TRANSIENT" logger.info("Creating temporary table %s" % table_name) if schema_name is not None: table_name = schema_name + "." + table_name stmt = "CREATE OR REPLACE {table_type} TABLE {table_name} AS {custom_sql}".format( table_type=table_type, table_name=table_name, custom_sql=custom_sql ) elif self.sql_engine_dialect.name == "mysql": # Note: We can keep the "MySQL" clause separate for clarity, even though it is the same as the generic case. stmt = "CREATE TEMPORARY TABLE {table_name} AS {custom_sql}".format( table_name=table_name, custom_sql=custom_sql ) elif self.sql_engine_dialect.name == "mssql": # Insert "into #{table_name}" in the custom sql query right before the "from" clause # Split is case sensitive so detect case. # Note: transforming custom_sql to uppercase/lowercase has uninteded consequences (i.e., changing column names), so this is not an option! if "from" in custom_sql: strsep = "from" else: strsep = "FROM" custom_sqlmod = custom_sql.split(strsep, maxsplit=1) stmt = ( custom_sqlmod[0] + "into {table_name} from" + custom_sqlmod[1] ).format(table_name=table_name) elif engine_dialect == "awsathena": stmt = "CREATE TABLE {table_name} AS {custom_sql}".format( table_name=table_name, custom_sql=custom_sql ) elif engine_dialect == "oracle": # oracle 18c introduced PRIVATE temp tables which are transient objects stmt_1 = "CREATE PRIVATE TEMPORARY TABLE {table_name} ON COMMIT PRESERVE DEFINITION AS {custom_sql}".format( table_name=table_name, custom_sql=custom_sql ) # prior to oracle 18c only GLOBAL temp tables existed and only the data is transient # this means an empty table will persist after the db session stmt_2 = "CREATE GLOBAL TEMPORARY TABLE {table_name} ON COMMIT PRESERVE ROWS AS {custom_sql}".format( table_name=table_name, custom_sql=custom_sql ) else: stmt = 'CREATE TEMPORARY TABLE "{table_name}" AS {custom_sql}'.format( table_name=table_name, custom_sql=custom_sql ) if engine_dialect == "oracle": try: self.engine.execute(stmt_1) except DatabaseError: self.engine.execute(stmt_2) else: self.engine.execute(stmt)
[ "def", "create_temporary_table", "(", "self", ",", "table_name", ",", "custom_sql", ",", "schema_name", "=", "None", ")", ":", "###", "# NOTE: 20200310 - The update to support snowflake transient table creation revealed several", "# import cases that are not fully handled.", "# The snowflake-related change updated behavior to allow both custom_sql and schema to be specified. But", "# the underlying incomplete handling of schema remains.", "#", "# Several cases we need to consider:", "#", "# 1. Distributed backends (e.g. Snowflake and BigQuery) often use a `<database>.<schema>.<table>`", "# syntax, but currently we are biased towards only allowing schema.table", "#", "# 2. In the wild, we see people using several ways to declare the schema they want to use:", "# a. In the connection string, the original RFC only specifies database, but schema is supported by some", "# backends (Snowflake) as a query parameter.", "# b. As a default for a user (the equivalent of USE SCHEMA being provided at the beginning of a session)", "# c. As part of individual queries.", "#", "# 3. We currently don't make it possible to select from a table in one query, but create a temporary table in", "# another schema, except for with BigQuery and (now) snowflake, where you can specify the table name (and", "# potentially triple of database, schema, table) in the batch_kwargs.", "#", "# The SqlAlchemyDataset interface essentially predates the batch_kwargs concept and so part of what's going", "# on, I think, is a mismatch between those. I think we should rename custom_sql -> \"temp_table_query\" or", "# similar, for example.", "###", "engine_dialect", "=", "self", ".", "sql_engine_dialect", ".", "name", ".", "lower", "(", ")", "# handle cases where dialect.name.lower() returns a byte string (e.g. databricks)", "if", "isinstance", "(", "engine_dialect", ",", "bytes", ")", ":", "engine_dialect", "=", "str", "(", "engine_dialect", ",", "\"utf-8\"", ")", "if", "engine_dialect", "==", "\"bigquery\"", ":", "stmt", "=", "\"CREATE OR REPLACE VIEW `{table_name}` AS {custom_sql}\"", ".", "format", "(", "table_name", "=", "table_name", ",", "custom_sql", "=", "custom_sql", ")", "elif", "engine_dialect", "==", "\"databricks\"", ":", "stmt", "=", "\"CREATE OR REPLACE VIEW `{table_name}` AS {custom_sql}\"", ".", "format", "(", "table_name", "=", "table_name", ",", "custom_sql", "=", "custom_sql", ")", "elif", "engine_dialect", "==", "\"snowflake\"", ":", "table_type", "=", "\"TEMPORARY\"", "if", "self", ".", "generated_table_name", "else", "\"TRANSIENT\"", "logger", ".", "info", "(", "\"Creating temporary table %s\"", "%", "table_name", ")", "if", "schema_name", "is", "not", "None", ":", "table_name", "=", "schema_name", "+", "\".\"", "+", "table_name", "stmt", "=", "\"CREATE OR REPLACE {table_type} TABLE {table_name} AS {custom_sql}\"", ".", "format", "(", "table_type", "=", "table_type", ",", "table_name", "=", "table_name", ",", "custom_sql", "=", "custom_sql", ")", "elif", "self", ".", "sql_engine_dialect", ".", "name", "==", "\"mysql\"", ":", "# Note: We can keep the \"MySQL\" clause separate for clarity, even though it is the same as the generic case.", "stmt", "=", "\"CREATE TEMPORARY TABLE {table_name} AS {custom_sql}\"", ".", "format", "(", "table_name", "=", "table_name", ",", "custom_sql", "=", "custom_sql", ")", "elif", "self", ".", "sql_engine_dialect", ".", "name", "==", "\"mssql\"", ":", "# Insert \"into #{table_name}\" in the custom sql query right before the \"from\" clause", "# Split is case sensitive so detect case.", "# Note: transforming custom_sql to uppercase/lowercase has uninteded consequences (i.e., changing column names), so this is not an option!", "if", "\"from\"", "in", "custom_sql", ":", "strsep", "=", "\"from\"", "else", ":", "strsep", "=", "\"FROM\"", "custom_sqlmod", "=", "custom_sql", ".", "split", "(", "strsep", ",", "maxsplit", "=", "1", ")", "stmt", "=", "(", "custom_sqlmod", "[", "0", "]", "+", "\"into {table_name} from\"", "+", "custom_sqlmod", "[", "1", "]", ")", ".", "format", "(", "table_name", "=", "table_name", ")", "elif", "engine_dialect", "==", "\"awsathena\"", ":", "stmt", "=", "\"CREATE TABLE {table_name} AS {custom_sql}\"", ".", "format", "(", "table_name", "=", "table_name", ",", "custom_sql", "=", "custom_sql", ")", "elif", "engine_dialect", "==", "\"oracle\"", ":", "# oracle 18c introduced PRIVATE temp tables which are transient objects", "stmt_1", "=", "\"CREATE PRIVATE TEMPORARY TABLE {table_name} ON COMMIT PRESERVE DEFINITION AS {custom_sql}\"", ".", "format", "(", "table_name", "=", "table_name", ",", "custom_sql", "=", "custom_sql", ")", "# prior to oracle 18c only GLOBAL temp tables existed and only the data is transient", "# this means an empty table will persist after the db session", "stmt_2", "=", "\"CREATE GLOBAL TEMPORARY TABLE {table_name} ON COMMIT PRESERVE ROWS AS {custom_sql}\"", ".", "format", "(", "table_name", "=", "table_name", ",", "custom_sql", "=", "custom_sql", ")", "else", ":", "stmt", "=", "'CREATE TEMPORARY TABLE \"{table_name}\" AS {custom_sql}'", ".", "format", "(", "table_name", "=", "table_name", ",", "custom_sql", "=", "custom_sql", ")", "if", "engine_dialect", "==", "\"oracle\"", ":", "try", ":", "self", ".", "engine", ".", "execute", "(", "stmt_1", ")", "except", "DatabaseError", ":", "self", ".", "engine", ".", "execute", "(", "stmt_2", ")", "else", ":", "self", ".", "engine", ".", "execute", "(", "stmt", ")" ]
[ 1261, 4 ]
[ 1359, 37 ]
python
en
['en', 'error', 'th']
False
SqlAlchemyDataset.column_reflection_fallback
(self)
If we can't reflect the table, use a query to at least get column names.
If we can't reflect the table, use a query to at least get column names.
def column_reflection_fallback(self): """If we can't reflect the table, use a query to at least get column names.""" col_info_dict_list: List[Dict] if self.sql_engine_dialect.name.lower() == "mssql": type_module = self._get_dialect_type_module() # Get column names and types from the database # StackOverflow to the rescue: https://stackoverflow.com/a/38634368 col_info_query: TextClause = sa.text( f""" SELECT cols.NAME, ty.NAME FROM tempdb.sys.columns AS cols JOIN sys.types AS ty ON cols.user_type_id = ty.user_type_id WHERE object_id = OBJECT_ID('tempdb..{self._table}') """ ) col_info_tuples_list = self.engine.execute(col_info_query).fetchall() col_info_dict_list = [ {"name": col_name, "type": getattr(type_module, col_type.upper())()} for col_name, col_type in col_info_tuples_list ] else: query: Select = sa.select([sa.text("*")]).select_from(self._table).limit(1) col_names: list = self.engine.execute(query).keys() col_info_dict_list = [{"name": col_name} for col_name in col_names] return col_info_dict_list
[ "def", "column_reflection_fallback", "(", "self", ")", ":", "col_info_dict_list", ":", "List", "[", "Dict", "]", "if", "self", ".", "sql_engine_dialect", ".", "name", ".", "lower", "(", ")", "==", "\"mssql\"", ":", "type_module", "=", "self", ".", "_get_dialect_type_module", "(", ")", "# Get column names and types from the database", "# StackOverflow to the rescue: https://stackoverflow.com/a/38634368", "col_info_query", ":", "TextClause", "=", "sa", ".", "text", "(", "f\"\"\"\nSELECT\n cols.NAME, ty.NAME\nFROM\n tempdb.sys.columns AS cols\nJOIN\n sys.types AS ty\nON\n cols.user_type_id = ty.user_type_id\nWHERE\n object_id = OBJECT_ID('tempdb..{self._table}')\n \"\"\"", ")", "col_info_tuples_list", "=", "self", ".", "engine", ".", "execute", "(", "col_info_query", ")", ".", "fetchall", "(", ")", "col_info_dict_list", "=", "[", "{", "\"name\"", ":", "col_name", ",", "\"type\"", ":", "getattr", "(", "type_module", ",", "col_type", ".", "upper", "(", ")", ")", "(", ")", "}", "for", "col_name", ",", "col_type", "in", "col_info_tuples_list", "]", "else", ":", "query", ":", "Select", "=", "sa", ".", "select", "(", "[", "sa", ".", "text", "(", "\"*\"", ")", "]", ")", ".", "select_from", "(", "self", ".", "_table", ")", ".", "limit", "(", "1", ")", "col_names", ":", "list", "=", "self", ".", "engine", ".", "execute", "(", "query", ")", ".", "keys", "(", ")", "col_info_dict_list", "=", "[", "{", "\"name\"", ":", "col_name", "}", "for", "col_name", "in", "col_names", "]", "return", "col_info_dict_list" ]
[ 1361, 4 ]
[ 1391, 33 ]
python
en
['en', 'en', 'en']
True
SqlAlchemyDataset.expect_table_row_count_to_equal_other_table
( self, other_table_name, result_format=None, include_config=True, catch_exceptions=None, meta=None, )
Expect the number of rows in this table to equal the number of rows in a different table. expect_table_row_count_to_equal is a :func:`expectation \ <great_expectations.data_asset.data_asset.DataAsset.expectation>`, not a ``column_map_expectation`` or ``column_aggregate_expectation``. Args: other_table_name (str): \ The name of the other table to which to compare. Other Parameters: result_format (string or None): \ Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`. For more detail, see :ref:`result_format <result_format>`. include_config (boolean): \ If True, then include the expectation config as part of the result object. \ For more detail, see :ref:`include_config`. catch_exceptions (boolean or None): \ If True, then catch exceptions and include them as part of the result object. \ For more detail, see :ref:`catch_exceptions`. meta (dict or None): \ A JSON-serializable dictionary (nesting allowed) that will be included in the output without \ modification. For more detail, see :ref:`meta`. Returns: An ExpectationSuiteValidationResult Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and :ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`. See Also: expect_table_row_count_to_be_between
Expect the number of rows in this table to equal the number of rows in a different table.
def expect_table_row_count_to_equal_other_table( self, other_table_name, result_format=None, include_config=True, catch_exceptions=None, meta=None, ): """Expect the number of rows in this table to equal the number of rows in a different table. expect_table_row_count_to_equal is a :func:`expectation \ <great_expectations.data_asset.data_asset.DataAsset.expectation>`, not a ``column_map_expectation`` or ``column_aggregate_expectation``. Args: other_table_name (str): \ The name of the other table to which to compare. Other Parameters: result_format (string or None): \ Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`. For more detail, see :ref:`result_format <result_format>`. include_config (boolean): \ If True, then include the expectation config as part of the result object. \ For more detail, see :ref:`include_config`. catch_exceptions (boolean or None): \ If True, then catch exceptions and include them as part of the result object. \ For more detail, see :ref:`catch_exceptions`. meta (dict or None): \ A JSON-serializable dictionary (nesting allowed) that will be included in the output without \ modification. For more detail, see :ref:`meta`. Returns: An ExpectationSuiteValidationResult Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and :ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`. See Also: expect_table_row_count_to_be_between """ row_count = self.get_row_count() other_table_row_count = self.get_row_count(table_name=other_table_name) return { "success": row_count == other_table_row_count, "result": { "observed_value": { "self": row_count, "other": other_table_row_count, } }, }
[ "def", "expect_table_row_count_to_equal_other_table", "(", "self", ",", "other_table_name", ",", "result_format", "=", "None", ",", "include_config", "=", "True", ",", "catch_exceptions", "=", "None", ",", "meta", "=", "None", ",", ")", ":", "row_count", "=", "self", ".", "get_row_count", "(", ")", "other_table_row_count", "=", "self", ".", "get_row_count", "(", "table_name", "=", "other_table_name", ")", "return", "{", "\"success\"", ":", "row_count", "==", "other_table_row_count", ",", "\"result\"", ":", "{", "\"observed_value\"", ":", "{", "\"self\"", ":", "row_count", ",", "\"other\"", ":", "other_table_row_count", ",", "}", "}", ",", "}" ]
[ 1406, 4 ]
[ 1458, 9 ]
python
en
['en', 'en', 'en']
True
test_notebook_execution_with_pandas_backend
( titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled, )
To set this test up we: - create a suite using profiling - verify that no validations have happened - create the suite edit notebook by hijacking the private cli method We then: - execute that notebook (Note this will raise various errors like CellExecutionError if any cell in the notebook fails - create a new context from disk - verify that a validation has been run with our expectation suite
To set this test up we:
def test_notebook_execution_with_pandas_backend( titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled, ): """ To set this test up we: - create a suite using profiling - verify that no validations have happened - create the suite edit notebook by hijacking the private cli method We then: - execute that notebook (Note this will raise various errors like CellExecutionError if any cell in the notebook fails - create a new context from disk - verify that a validation has been run with our expectation suite """ context: DataContext = titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled root_dir: str = context.root_directory uncommitted_dir: str = os.path.join(root_dir, "uncommitted") expectation_suite_name: str = "warning" context.create_expectation_suite(expectation_suite_name=expectation_suite_name) batch_request: dict = { "datasource_name": "my_datasource", "data_connector_name": "my_basic_data_connector", "data_asset_name": "Titanic_1912", } # Sanity check test setup original_suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert len(original_suite.expectations) == 0 assert context.list_expectation_suite_names() == [expectation_suite_name] assert context.list_datasources() == [ { "name": "my_datasource", "class_name": "Datasource", "module_name": "great_expectations.datasource", "execution_engine": { "class_name": "PandasExecutionEngine", "module_name": "great_expectations.execution_engine", }, "data_connectors": { "my_basic_data_connector": { "module_name": "great_expectations.datasource.data_connector", "base_directory": f"{root_dir}/../data/titanic", "default_regex": { "pattern": "(.*)\\.csv", "group_names": ["data_asset_name"], }, "class_name": "InferredAssetFilesystemDataConnector", }, "my_special_data_connector": { "glob_directive": "*.csv", "assets": { "users": { "pattern": "(.+)_(\\d+)_(\\d+)\\.csv", "group_names": ["name", "timestamp", "size"], "class_name": "Asset", "base_directory": f"{root_dir}/../data/titanic", "module_name": "great_expectations.datasource.data_connector.asset", } }, "module_name": "great_expectations.datasource.data_connector", "base_directory": f"{root_dir}/../data/titanic", "default_regex": {"pattern": "(.+)\\.csv", "group_names": ["name"]}, "class_name": "ConfiguredAssetFilesystemDataConnector", }, "my_other_data_connector": { "glob_directive": "*.csv", "assets": { "users": { "class_name": "Asset", "module_name": "great_expectations.datasource.data_connector.asset", } }, "module_name": "great_expectations.datasource.data_connector", "base_directory": f"{root_dir}/../data/titanic", "default_regex": {"pattern": "(.+)\\.csv", "group_names": ["name"]}, "class_name": "ConfiguredAssetFilesystemDataConnector", }, "my_runtime_data_connector": { "module_name": "great_expectations.datasource.data_connector", "batch_identifiers": ["pipeline_stage_name", "airflow_run_id"], "class_name": "RuntimeDataConnector", }, }, }, { "name": "my_additional_datasource", "class_name": "Datasource", "module_name": "great_expectations.datasource", "execution_engine": { "module_name": "great_expectations.execution_engine", "class_name": "PandasExecutionEngine", }, "data_connectors": { "my_additional_data_connector": { "module_name": "great_expectations.datasource.data_connector", "default_regex": { "pattern": "(.*)\\.csv", "group_names": ["data_asset_name"], }, "base_directory": f"{root_dir}/../data/titanic", "class_name": "InferredAssetFilesystemDataConnector", } }, }, ] assert context.get_validation_result(expectation_suite_name="warning") == {} # Create notebook # do not want to actually send usage_message, since the function call is not the result of actual usage _suite_edit_workflow( context=context, expectation_suite_name=expectation_suite_name, profile=True, usage_event="test_notebook_execution", interactive=False, no_jupyter=True, create_if_not_exist=False, datasource_name=None, batch_request=batch_request, additional_batch_request_args=None, suppress_usage_message=True, assume_yes=True, ) edit_notebook_path: str = os.path.join(uncommitted_dir, "edit_warning.ipynb") assert os.path.isfile(edit_notebook_path) run_notebook( notebook_path=edit_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=validation_result_identifier)", replacement_string="", ) # Assertions about output context = DataContext(context_root_dir=root_dir) obs_validation_result: ExpectationSuiteValidationResult = ( context.get_validation_result(expectation_suite_name="warning") ) assert obs_validation_result.statistics == { "evaluated_expectations": 2, "successful_expectations": 2, "unsuccessful_expectations": 0, "success_percent": 100.0, } suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) suite["meta"].pop("citations", None) assert suite.expectations == [ ExpectationConfiguration( **{ "expectation_type": "expect_table_columns_to_match_ordered_list", "kwargs": { "column_list": [ "Unnamed: 0", "Name", "PClass", "Age", "Sex", "Survived", "SexCode", ] }, "meta": {}, } ), ExpectationConfiguration( **{ "expectation_type": "expect_table_row_count_to_be_between", "kwargs": {"max_value": 1313, "min_value": 1313}, "meta": {}, } ), ] columns_with_expectations: Set[str] expectations_from_suite: Set[str] ( columns_with_expectations, expectations_from_suite, ) = get_set_of_columns_and_expectations_from_suite(suite=suite) expected_expectations: Set[str] = { "expect_table_columns_to_match_ordered_list", "expect_table_row_count_to_be_between", } assert columns_with_expectations == set() assert expectations_from_suite == expected_expectations
[ "def", "test_notebook_execution_with_pandas_backend", "(", "titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled", ",", ")", ":", "context", ":", "DataContext", "=", "titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled", "root_dir", ":", "str", "=", "context", ".", "root_directory", "uncommitted_dir", ":", "str", "=", "os", ".", "path", ".", "join", "(", "root_dir", ",", "\"uncommitted\"", ")", "expectation_suite_name", ":", "str", "=", "\"warning\"", "context", ".", "create_expectation_suite", "(", "expectation_suite_name", "=", "expectation_suite_name", ")", "batch_request", ":", "dict", "=", "{", "\"datasource_name\"", ":", "\"my_datasource\"", ",", "\"data_connector_name\"", ":", "\"my_basic_data_connector\"", ",", "\"data_asset_name\"", ":", "\"Titanic_1912\"", ",", "}", "# Sanity check test setup", "original_suite", ":", "ExpectationSuite", "=", "context", ".", "get_expectation_suite", "(", "expectation_suite_name", "=", "expectation_suite_name", ")", "assert", "len", "(", "original_suite", ".", "expectations", ")", "==", "0", "assert", "context", ".", "list_expectation_suite_names", "(", ")", "==", "[", "expectation_suite_name", "]", "assert", "context", ".", "list_datasources", "(", ")", "==", "[", "{", "\"name\"", ":", "\"my_datasource\"", ",", "\"class_name\"", ":", "\"Datasource\"", ",", "\"module_name\"", ":", "\"great_expectations.datasource\"", ",", "\"execution_engine\"", ":", "{", "\"class_name\"", ":", "\"PandasExecutionEngine\"", ",", "\"module_name\"", ":", "\"great_expectations.execution_engine\"", ",", "}", ",", "\"data_connectors\"", ":", "{", "\"my_basic_data_connector\"", ":", "{", "\"module_name\"", ":", "\"great_expectations.datasource.data_connector\"", ",", "\"base_directory\"", ":", "f\"{root_dir}/../data/titanic\"", ",", "\"default_regex\"", ":", "{", "\"pattern\"", ":", "\"(.*)\\\\.csv\"", ",", "\"group_names\"", ":", "[", "\"data_asset_name\"", "]", ",", "}", ",", "\"class_name\"", ":", "\"InferredAssetFilesystemDataConnector\"", ",", "}", ",", "\"my_special_data_connector\"", ":", "{", "\"glob_directive\"", ":", "\"*.csv\"", ",", "\"assets\"", ":", "{", "\"users\"", ":", "{", "\"pattern\"", ":", "\"(.+)_(\\\\d+)_(\\\\d+)\\\\.csv\"", ",", "\"group_names\"", ":", "[", "\"name\"", ",", "\"timestamp\"", ",", "\"size\"", "]", ",", "\"class_name\"", ":", "\"Asset\"", ",", "\"base_directory\"", ":", "f\"{root_dir}/../data/titanic\"", ",", "\"module_name\"", ":", "\"great_expectations.datasource.data_connector.asset\"", ",", "}", "}", ",", "\"module_name\"", ":", "\"great_expectations.datasource.data_connector\"", ",", "\"base_directory\"", ":", "f\"{root_dir}/../data/titanic\"", ",", "\"default_regex\"", ":", "{", "\"pattern\"", ":", "\"(.+)\\\\.csv\"", ",", "\"group_names\"", ":", "[", "\"name\"", "]", "}", ",", "\"class_name\"", ":", "\"ConfiguredAssetFilesystemDataConnector\"", ",", "}", ",", "\"my_other_data_connector\"", ":", "{", "\"glob_directive\"", ":", "\"*.csv\"", ",", "\"assets\"", ":", "{", "\"users\"", ":", "{", "\"class_name\"", ":", "\"Asset\"", ",", "\"module_name\"", ":", "\"great_expectations.datasource.data_connector.asset\"", ",", "}", "}", ",", "\"module_name\"", ":", "\"great_expectations.datasource.data_connector\"", ",", "\"base_directory\"", ":", "f\"{root_dir}/../data/titanic\"", ",", "\"default_regex\"", ":", "{", "\"pattern\"", ":", "\"(.+)\\\\.csv\"", ",", "\"group_names\"", ":", "[", "\"name\"", "]", "}", ",", "\"class_name\"", ":", "\"ConfiguredAssetFilesystemDataConnector\"", ",", "}", ",", "\"my_runtime_data_connector\"", ":", "{", "\"module_name\"", ":", "\"great_expectations.datasource.data_connector\"", ",", "\"batch_identifiers\"", ":", "[", "\"pipeline_stage_name\"", ",", "\"airflow_run_id\"", "]", ",", "\"class_name\"", ":", "\"RuntimeDataConnector\"", ",", "}", ",", "}", ",", "}", ",", "{", "\"name\"", ":", "\"my_additional_datasource\"", ",", "\"class_name\"", ":", "\"Datasource\"", ",", "\"module_name\"", ":", "\"great_expectations.datasource\"", ",", "\"execution_engine\"", ":", "{", "\"module_name\"", ":", "\"great_expectations.execution_engine\"", ",", "\"class_name\"", ":", "\"PandasExecutionEngine\"", ",", "}", ",", "\"data_connectors\"", ":", "{", "\"my_additional_data_connector\"", ":", "{", "\"module_name\"", ":", "\"great_expectations.datasource.data_connector\"", ",", "\"default_regex\"", ":", "{", "\"pattern\"", ":", "\"(.*)\\\\.csv\"", ",", "\"group_names\"", ":", "[", "\"data_asset_name\"", "]", ",", "}", ",", "\"base_directory\"", ":", "f\"{root_dir}/../data/titanic\"", ",", "\"class_name\"", ":", "\"InferredAssetFilesystemDataConnector\"", ",", "}", "}", ",", "}", ",", "]", "assert", "context", ".", "get_validation_result", "(", "expectation_suite_name", "=", "\"warning\"", ")", "==", "{", "}", "# Create notebook", "# do not want to actually send usage_message, since the function call is not the result of actual usage", "_suite_edit_workflow", "(", "context", "=", "context", ",", "expectation_suite_name", "=", "expectation_suite_name", ",", "profile", "=", "True", ",", "usage_event", "=", "\"test_notebook_execution\"", ",", "interactive", "=", "False", ",", "no_jupyter", "=", "True", ",", "create_if_not_exist", "=", "False", ",", "datasource_name", "=", "None", ",", "batch_request", "=", "batch_request", ",", "additional_batch_request_args", "=", "None", ",", "suppress_usage_message", "=", "True", ",", "assume_yes", "=", "True", ",", ")", "edit_notebook_path", ":", "str", "=", "os", ".", "path", ".", "join", "(", "uncommitted_dir", ",", "\"edit_warning.ipynb\"", ")", "assert", "os", ".", "path", ".", "isfile", "(", "edit_notebook_path", ")", "run_notebook", "(", "notebook_path", "=", "edit_notebook_path", ",", "notebook_dir", "=", "uncommitted_dir", ",", "string_to_be_replaced", "=", "\"context.open_data_docs(resource_identifier=validation_result_identifier)\"", ",", "replacement_string", "=", "\"\"", ",", ")", "# Assertions about output", "context", "=", "DataContext", "(", "context_root_dir", "=", "root_dir", ")", "obs_validation_result", ":", "ExpectationSuiteValidationResult", "=", "(", "context", ".", "get_validation_result", "(", "expectation_suite_name", "=", "\"warning\"", ")", ")", "assert", "obs_validation_result", ".", "statistics", "==", "{", "\"evaluated_expectations\"", ":", "2", ",", "\"successful_expectations\"", ":", "2", ",", "\"unsuccessful_expectations\"", ":", "0", ",", "\"success_percent\"", ":", "100.0", ",", "}", "suite", ":", "ExpectationSuite", "=", "context", ".", "get_expectation_suite", "(", "expectation_suite_name", "=", "expectation_suite_name", ")", "suite", "[", "\"meta\"", "]", ".", "pop", "(", "\"citations\"", ",", "None", ")", "assert", "suite", ".", "expectations", "==", "[", "ExpectationConfiguration", "(", "*", "*", "{", "\"expectation_type\"", ":", "\"expect_table_columns_to_match_ordered_list\"", ",", "\"kwargs\"", ":", "{", "\"column_list\"", ":", "[", "\"Unnamed: 0\"", ",", "\"Name\"", ",", "\"PClass\"", ",", "\"Age\"", ",", "\"Sex\"", ",", "\"Survived\"", ",", "\"SexCode\"", ",", "]", "}", ",", "\"meta\"", ":", "{", "}", ",", "}", ")", ",", "ExpectationConfiguration", "(", "*", "*", "{", "\"expectation_type\"", ":", "\"expect_table_row_count_to_be_between\"", ",", "\"kwargs\"", ":", "{", "\"max_value\"", ":", "1313", ",", "\"min_value\"", ":", "1313", "}", ",", "\"meta\"", ":", "{", "}", ",", "}", ")", ",", "]", "columns_with_expectations", ":", "Set", "[", "str", "]", "expectations_from_suite", ":", "Set", "[", "str", "]", "(", "columns_with_expectations", ",", "expectations_from_suite", ",", ")", "=", "get_set_of_columns_and_expectations_from_suite", "(", "suite", "=", "suite", ")", "expected_expectations", ":", "Set", "[", "str", "]", "=", "{", "\"expect_table_columns_to_match_ordered_list\"", ",", "\"expect_table_row_count_to_be_between\"", ",", "}", "assert", "columns_with_expectations", "==", "set", "(", ")", "assert", "expectations_from_suite", "==", "expected_expectations" ]
[ 16, 0 ]
[ 210, 59 ]
python
en
['en', 'error', 'th']
False
NumericMetricRangeMultiBatchParameterBuilder.__init__
( self, parameter_name: str, metric_name: str, metric_domain_kwargs: Optional[Union[str, dict]] = None, metric_value_kwargs: Optional[Union[str, dict]] = None, sampling_method: Optional[str] = "bootstrap", enforce_numeric_metric: Optional[Union[str, bool]] = True, replace_nan_with_zero: Optional[Union[str, bool]] = True, false_positive_rate: Optional[Union[str, float]] = 5.0e-2, num_bootstrap_samples: Optional[Union[str, int]] = None, round_decimals: Optional[Union[str, int]] = None, truncate_values: Optional[ Union[str, Dict[str, Union[Optional[int], Optional[float]]]] ] = None, data_context: Optional[DataContext] = None, batch_request: Optional[Union[str, dict]] = None, )
Args: parameter_name: the name of this parameter -- this is user-specified parameter name (from configuration); it is not the fully-qualified parameter name; a fully-qualified parameter name must start with "$parameter." and may contain one or more subsequent parts (e.g., "$parameter.<my_param_from_config>.<metric_name>"). metric_name: the name of a metric used in MetricConfiguration (must be a supported and registered metric) metric_domain_kwargs: used in MetricConfiguration metric_value_kwargs: used in MetricConfiguration sampling_method: choice of the sampling algorithm: "oneshot" (one observation) or "bootstrap" (default) enforce_numeric_metric: used in MetricConfiguration to insure that metric computations return numeric values replace_nan_with_zero: if False, then if the computed metric gives NaN, then exception is raised; otherwise, if True (default), then if the computed metric gives NaN, then it is converted to the 0.0 (float) value. false_positive_rate: user-configured fraction between 0 and 1 expressing desired false positive rate for identifying unexpected values as judged by the upper- and lower- quantiles of the observed metric data. num_bootstrap_samples: Applicable only for the "bootstrap" sampling method -- if omitted (default), then 9999 is used (default in "https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.bootstrap.html"). round_decimals: user-configured non-negative integer indicating the number of decimals of the rounding precision of the computed parameter values (i.e., min_value, max_value) prior to packaging them on output. If omitted, then no rounding is performed, unless the computed value is already an integer. truncate_values: user-configured directive for whether or not to allow the computed parameter values (i.e., lower_bound, upper_bound) to take on values outside the specified bounds when packaged on output. data_context: DataContext batch_request: specified in ParameterBuilder configuration to get Batch objects for parameter computation.
Args: parameter_name: the name of this parameter -- this is user-specified parameter name (from configuration); it is not the fully-qualified parameter name; a fully-qualified parameter name must start with "$parameter." and may contain one or more subsequent parts (e.g., "$parameter.<my_param_from_config>.<metric_name>"). metric_name: the name of a metric used in MetricConfiguration (must be a supported and registered metric) metric_domain_kwargs: used in MetricConfiguration metric_value_kwargs: used in MetricConfiguration sampling_method: choice of the sampling algorithm: "oneshot" (one observation) or "bootstrap" (default) enforce_numeric_metric: used in MetricConfiguration to insure that metric computations return numeric values replace_nan_with_zero: if False, then if the computed metric gives NaN, then exception is raised; otherwise, if True (default), then if the computed metric gives NaN, then it is converted to the 0.0 (float) value. false_positive_rate: user-configured fraction between 0 and 1 expressing desired false positive rate for identifying unexpected values as judged by the upper- and lower- quantiles of the observed metric data. num_bootstrap_samples: Applicable only for the "bootstrap" sampling method -- if omitted (default), then 9999 is used (default in "https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.bootstrap.html"). round_decimals: user-configured non-negative integer indicating the number of decimals of the rounding precision of the computed parameter values (i.e., min_value, max_value) prior to packaging them on output. If omitted, then no rounding is performed, unless the computed value is already an integer. truncate_values: user-configured directive for whether or not to allow the computed parameter values (i.e., lower_bound, upper_bound) to take on values outside the specified bounds when packaged on output. data_context: DataContext batch_request: specified in ParameterBuilder configuration to get Batch objects for parameter computation.
def __init__( self, parameter_name: str, metric_name: str, metric_domain_kwargs: Optional[Union[str, dict]] = None, metric_value_kwargs: Optional[Union[str, dict]] = None, sampling_method: Optional[str] = "bootstrap", enforce_numeric_metric: Optional[Union[str, bool]] = True, replace_nan_with_zero: Optional[Union[str, bool]] = True, false_positive_rate: Optional[Union[str, float]] = 5.0e-2, num_bootstrap_samples: Optional[Union[str, int]] = None, round_decimals: Optional[Union[str, int]] = None, truncate_values: Optional[ Union[str, Dict[str, Union[Optional[int], Optional[float]]]] ] = None, data_context: Optional[DataContext] = None, batch_request: Optional[Union[str, dict]] = None, ): """ Args: parameter_name: the name of this parameter -- this is user-specified parameter name (from configuration); it is not the fully-qualified parameter name; a fully-qualified parameter name must start with "$parameter." and may contain one or more subsequent parts (e.g., "$parameter.<my_param_from_config>.<metric_name>"). metric_name: the name of a metric used in MetricConfiguration (must be a supported and registered metric) metric_domain_kwargs: used in MetricConfiguration metric_value_kwargs: used in MetricConfiguration sampling_method: choice of the sampling algorithm: "oneshot" (one observation) or "bootstrap" (default) enforce_numeric_metric: used in MetricConfiguration to insure that metric computations return numeric values replace_nan_with_zero: if False, then if the computed metric gives NaN, then exception is raised; otherwise, if True (default), then if the computed metric gives NaN, then it is converted to the 0.0 (float) value. false_positive_rate: user-configured fraction between 0 and 1 expressing desired false positive rate for identifying unexpected values as judged by the upper- and lower- quantiles of the observed metric data. num_bootstrap_samples: Applicable only for the "bootstrap" sampling method -- if omitted (default), then 9999 is used (default in "https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.bootstrap.html"). round_decimals: user-configured non-negative integer indicating the number of decimals of the rounding precision of the computed parameter values (i.e., min_value, max_value) prior to packaging them on output. If omitted, then no rounding is performed, unless the computed value is already an integer. truncate_values: user-configured directive for whether or not to allow the computed parameter values (i.e., lower_bound, upper_bound) to take on values outside the specified bounds when packaged on output. data_context: DataContext batch_request: specified in ParameterBuilder configuration to get Batch objects for parameter computation. """ super().__init__( parameter_name=parameter_name, data_context=data_context, batch_request=batch_request, ) self._metric_name = metric_name self._metric_domain_kwargs = metric_domain_kwargs self._metric_value_kwargs = metric_value_kwargs self._sampling_method = sampling_method self._enforce_numeric_metric = enforce_numeric_metric self._replace_nan_with_zero = replace_nan_with_zero self._false_positive_rate = false_positive_rate self._num_bootstrap_samples = num_bootstrap_samples self._round_decimals = round_decimals if not truncate_values: truncate_values = { "lower_bound": None, "upper_bound": None, } truncate_values_keys: set = set(truncate_values.keys()) if ( not truncate_values_keys <= NumericMetricRangeMultiBatchParameterBuilder.RECOGNIZED_TRUNCATE_DISTRIBUTION_KEYS ): raise ge_exceptions.ProfilerExecutionError( message=f"""Unrecognized truncate_values key(s) in {self.__class__.__name__}: "{str(truncate_values_keys - NumericMetricRangeMultiBatchParameterBuilder.RECOGNIZED_TRUNCATE_DISTRIBUTION_KEYS)}" \ detected. """ ) self._truncate_values = truncate_values
[ "def", "__init__", "(", "self", ",", "parameter_name", ":", "str", ",", "metric_name", ":", "str", ",", "metric_domain_kwargs", ":", "Optional", "[", "Union", "[", "str", ",", "dict", "]", "]", "=", "None", ",", "metric_value_kwargs", ":", "Optional", "[", "Union", "[", "str", ",", "dict", "]", "]", "=", "None", ",", "sampling_method", ":", "Optional", "[", "str", "]", "=", "\"bootstrap\"", ",", "enforce_numeric_metric", ":", "Optional", "[", "Union", "[", "str", ",", "bool", "]", "]", "=", "True", ",", "replace_nan_with_zero", ":", "Optional", "[", "Union", "[", "str", ",", "bool", "]", "]", "=", "True", ",", "false_positive_rate", ":", "Optional", "[", "Union", "[", "str", ",", "float", "]", "]", "=", "5.0e-2", ",", "num_bootstrap_samples", ":", "Optional", "[", "Union", "[", "str", ",", "int", "]", "]", "=", "None", ",", "round_decimals", ":", "Optional", "[", "Union", "[", "str", ",", "int", "]", "]", "=", "None", ",", "truncate_values", ":", "Optional", "[", "Union", "[", "str", ",", "Dict", "[", "str", ",", "Union", "[", "Optional", "[", "int", "]", ",", "Optional", "[", "float", "]", "]", "]", "]", "]", "=", "None", ",", "data_context", ":", "Optional", "[", "DataContext", "]", "=", "None", ",", "batch_request", ":", "Optional", "[", "Union", "[", "str", ",", "dict", "]", "]", "=", "None", ",", ")", ":", "super", "(", ")", ".", "__init__", "(", "parameter_name", "=", "parameter_name", ",", "data_context", "=", "data_context", ",", "batch_request", "=", "batch_request", ",", ")", "self", ".", "_metric_name", "=", "metric_name", "self", ".", "_metric_domain_kwargs", "=", "metric_domain_kwargs", "self", ".", "_metric_value_kwargs", "=", "metric_value_kwargs", "self", ".", "_sampling_method", "=", "sampling_method", "self", ".", "_enforce_numeric_metric", "=", "enforce_numeric_metric", "self", ".", "_replace_nan_with_zero", "=", "replace_nan_with_zero", "self", ".", "_false_positive_rate", "=", "false_positive_rate", "self", ".", "_num_bootstrap_samples", "=", "num_bootstrap_samples", "self", ".", "_round_decimals", "=", "round_decimals", "if", "not", "truncate_values", ":", "truncate_values", "=", "{", "\"lower_bound\"", ":", "None", ",", "\"upper_bound\"", ":", "None", ",", "}", "truncate_values_keys", ":", "set", "=", "set", "(", "truncate_values", ".", "keys", "(", ")", ")", "if", "(", "not", "truncate_values_keys", "<=", "NumericMetricRangeMultiBatchParameterBuilder", ".", "RECOGNIZED_TRUNCATE_DISTRIBUTION_KEYS", ")", ":", "raise", "ge_exceptions", ".", "ProfilerExecutionError", "(", "message", "=", "f\"\"\"Unrecognized truncate_values key(s) in {self.__class__.__name__}:\n\"{str(truncate_values_keys - NumericMetricRangeMultiBatchParameterBuilder.RECOGNIZED_TRUNCATE_DISTRIBUTION_KEYS)}\" \\\ndetected.\n\"\"\"", ")", "self", ".", "_truncate_values", "=", "truncate_values" ]
[ 52, 4 ]
[ 131, 47 ]
python
en
['en', 'error', 'th']
False
NumericMetricRangeMultiBatchParameterBuilder._build_parameters
( self, parameter_container: ParameterContainer, domain: Domain, *, variables: Optional[ParameterContainer] = None, parameters: Optional[Dict[str, ParameterContainer]] = None, )
Builds ParameterContainer object that holds ParameterNode objects with attribute name-value pairs and optional details. :return: ParameterContainer object that holds ParameterNode objects with attribute name-value pairs and optional details The algorithm operates according to the following steps: 1. Obtain batch IDs of interest using DataContext and BatchRequest (unless passed explicitly as argument). Note that this specific BatchRequest was specified as part of configuration for the present ParameterBuilder class. 2. Set up metric_domain_kwargs and metric_value_kwargs (using configuration and/or variables and parameters). 3. Instantiate the Validator object corresponding to BatchRequest (with a temporary expectation_suite_name) in order to have access to all Batch objects, on each of which the specified metric_name will be computed. 4. Perform metric computations and obtain the result in the array-like form (one metric value per each Batch). 5. Using the configured directives and heuristics, determine whether or not the ranges should be clipped. 6. Using the configured directives and heuristics, determine if return values should be rounded to an integer. 7. Convert the list of floating point metric computation results to a numpy array (for further computations). Steps 8 -- 10 are for the "oneshot" sampling method only (the "bootstrap" method achieves same automatically): 8. Compute the mean and the standard deviation of the metric (aggregated over all the gathered Batch objects). 9. Compute number of standard deviations (as floating point) needed (around the mean) to achieve the specified false_positive_rate (note that false_positive_rate of 0.0 would result in infinite number of standard deviations, hence it is "nudged" by small quantity "epsilon" above 0.0 if false_positive_rate of 0.0 appears as argument). (Please refer to "https://en.wikipedia.org/wiki/Normal_distribution" and references therein for background.) 10. Compute the "band" around the mean as the min_value and max_value (to be used in ExpectationConfiguration). 11. Return [low, high] for the desired metric as estimated by the specified sampling method. 12. Set up the arguments and call build_parameter_container() to store the parameter as part of "rule state".
Builds ParameterContainer object that holds ParameterNode objects with attribute name-value pairs and optional details.
def _build_parameters( self, parameter_container: ParameterContainer, domain: Domain, *, variables: Optional[ParameterContainer] = None, parameters: Optional[Dict[str, ParameterContainer]] = None, ): """ Builds ParameterContainer object that holds ParameterNode objects with attribute name-value pairs and optional details. :return: ParameterContainer object that holds ParameterNode objects with attribute name-value pairs and optional details The algorithm operates according to the following steps: 1. Obtain batch IDs of interest using DataContext and BatchRequest (unless passed explicitly as argument). Note that this specific BatchRequest was specified as part of configuration for the present ParameterBuilder class. 2. Set up metric_domain_kwargs and metric_value_kwargs (using configuration and/or variables and parameters). 3. Instantiate the Validator object corresponding to BatchRequest (with a temporary expectation_suite_name) in order to have access to all Batch objects, on each of which the specified metric_name will be computed. 4. Perform metric computations and obtain the result in the array-like form (one metric value per each Batch). 5. Using the configured directives and heuristics, determine whether or not the ranges should be clipped. 6. Using the configured directives and heuristics, determine if return values should be rounded to an integer. 7. Convert the list of floating point metric computation results to a numpy array (for further computations). Steps 8 -- 10 are for the "oneshot" sampling method only (the "bootstrap" method achieves same automatically): 8. Compute the mean and the standard deviation of the metric (aggregated over all the gathered Batch objects). 9. Compute number of standard deviations (as floating point) needed (around the mean) to achieve the specified false_positive_rate (note that false_positive_rate of 0.0 would result in infinite number of standard deviations, hence it is "nudged" by small quantity "epsilon" above 0.0 if false_positive_rate of 0.0 appears as argument). (Please refer to "https://en.wikipedia.org/wiki/Normal_distribution" and references therein for background.) 10. Compute the "band" around the mean as the min_value and max_value (to be used in ExpectationConfiguration). 11. Return [low, high] for the desired metric as estimated by the specified sampling method. 12. Set up the arguments and call build_parameter_container() to store the parameter as part of "rule state". """ validator: Validator = self.get_validator( domain=domain, variables=variables, parameters=parameters, ) batch_ids: Optional[List[str]] = self.get_batch_ids( domain=domain, variables=variables, parameters=parameters, ) if not batch_ids: raise ge_exceptions.ProfilerExecutionError( message=f"Utilizing a {self.__class__.__name__} requires a non-empty list of batch identifiers." ) metric_computation_result: Dict[ str, Union[Union[np.ndarray, List[Union[Any, Number]]], Dict[str, Any]] ] = self.get_metrics( batch_ids=batch_ids, validator=validator, metric_name=self._metric_name, metric_domain_kwargs=self._metric_domain_kwargs, metric_value_kwargs=self._metric_value_kwargs, enforce_numeric_metric=self._enforce_numeric_metric, replace_nan_with_zero=self._replace_nan_with_zero, domain=domain, variables=variables, parameters=parameters, ) metric_values: Union[ np.ndarray, List[Union[Any, Number]] ] = metric_computation_result["metric_values"] details: Dict[str, Any] = metric_computation_result["details"] # Obtain sampling_method directive from rule state (i.e., variables and parameters); from instance variable otherwise. sampling_method: str = get_parameter_value_and_validate_return_type( domain=domain, parameter_reference=self._sampling_method, expected_return_type=str, variables=variables, parameters=parameters, ) if not ( sampling_method in NumericMetricRangeMultiBatchParameterBuilder.RECOGNIZED_SAMPLING_METHOD_NAMES ): raise ge_exceptions.ProfilerExecutionError( message=f"""The directive "sampling_method" for {self.__class__.__name__} can be only one of {NumericMetricRangeMultiBatchParameterBuilder.RECOGNIZED_SAMPLING_METHOD_NAMES} ("{sampling_method}" was detected). """ ) # Obtain false_positive_rate from rule state (i.e., variables and parameters); from instance variable otherwise. false_positive_rate: Union[ Any, str ] = get_parameter_value_and_validate_return_type( domain=domain, parameter_reference=self._false_positive_rate, expected_return_type=float, variables=variables, parameters=parameters, ) if not (0.0 <= false_positive_rate <= 1.0): raise ge_exceptions.ProfilerExecutionError( message=f"The confidence level for {self.__class__.__name__} is outside of [0.0, 1.0] closed interval." ) truncate_values: Dict[str, Number] = self._get_truncate_values_using_heuristics( metric_values=metric_values, domain=domain, variables=variables, parameters=parameters, ) lower_bound: Optional[float] = truncate_values.get("lower_bound") upper_bound: Optional[float] = truncate_values.get("upper_bound") round_decimals: int = self._get_round_decimals_using_heuristics( metric_values=metric_values, domain=domain, variables=variables, parameters=parameters, ) metric_values = np.array(metric_values, dtype=np.float64) lower_quantile: Union[Number, float] upper_quantile: Union[Number, float] if np.all(np.isclose(metric_values, metric_values[0])): # Computation is unnecessary if distribution is degenerate. lower_quantile = upper_quantile = metric_values[0] elif sampling_method == "bootstrap": lower_quantile, upper_quantile = self._get_bootstrap_estimate( metric_values=metric_values, false_positive_rate=false_positive_rate, domain=domain, variables=variables, parameters=parameters, ) else: lower_quantile, upper_quantile = compute_quantiles( metric_values=metric_values, false_positive_rate=false_positive_rate, ) min_value: Union[Number, float] max_value: Union[Number, float] if round_decimals == 0: min_value = round(float(lower_quantile)) max_value = round(float(upper_quantile)) else: min_value = round(float(lower_quantile), round_decimals) max_value = round(float(upper_quantile), round_decimals) if lower_bound is not None: min_value = max(min_value, lower_bound) if upper_bound is not None: max_value = min(max_value, upper_bound) parameter_values: Dict[str, Any] = { f"$parameter.{self.parameter_name}": { "value": { "min_value": min_value, "max_value": max_value, }, "details": details, }, } build_parameter_container( parameter_container=parameter_container, parameter_values=parameter_values )
[ "def", "_build_parameters", "(", "self", ",", "parameter_container", ":", "ParameterContainer", ",", "domain", ":", "Domain", ",", "*", ",", "variables", ":", "Optional", "[", "ParameterContainer", "]", "=", "None", ",", "parameters", ":", "Optional", "[", "Dict", "[", "str", ",", "ParameterContainer", "]", "]", "=", "None", ",", ")", ":", "validator", ":", "Validator", "=", "self", ".", "get_validator", "(", "domain", "=", "domain", ",", "variables", "=", "variables", ",", "parameters", "=", "parameters", ",", ")", "batch_ids", ":", "Optional", "[", "List", "[", "str", "]", "]", "=", "self", ".", "get_batch_ids", "(", "domain", "=", "domain", ",", "variables", "=", "variables", ",", "parameters", "=", "parameters", ",", ")", "if", "not", "batch_ids", ":", "raise", "ge_exceptions", ".", "ProfilerExecutionError", "(", "message", "=", "f\"Utilizing a {self.__class__.__name__} requires a non-empty list of batch identifiers.\"", ")", "metric_computation_result", ":", "Dict", "[", "str", ",", "Union", "[", "Union", "[", "np", ".", "ndarray", ",", "List", "[", "Union", "[", "Any", ",", "Number", "]", "]", "]", ",", "Dict", "[", "str", ",", "Any", "]", "]", "]", "=", "self", ".", "get_metrics", "(", "batch_ids", "=", "batch_ids", ",", "validator", "=", "validator", ",", "metric_name", "=", "self", ".", "_metric_name", ",", "metric_domain_kwargs", "=", "self", ".", "_metric_domain_kwargs", ",", "metric_value_kwargs", "=", "self", ".", "_metric_value_kwargs", ",", "enforce_numeric_metric", "=", "self", ".", "_enforce_numeric_metric", ",", "replace_nan_with_zero", "=", "self", ".", "_replace_nan_with_zero", ",", "domain", "=", "domain", ",", "variables", "=", "variables", ",", "parameters", "=", "parameters", ",", ")", "metric_values", ":", "Union", "[", "np", ".", "ndarray", ",", "List", "[", "Union", "[", "Any", ",", "Number", "]", "]", "]", "=", "metric_computation_result", "[", "\"metric_values\"", "]", "details", ":", "Dict", "[", "str", ",", "Any", "]", "=", "metric_computation_result", "[", "\"details\"", "]", "# Obtain sampling_method directive from rule state (i.e., variables and parameters); from instance variable otherwise.", "sampling_method", ":", "str", "=", "get_parameter_value_and_validate_return_type", "(", "domain", "=", "domain", ",", "parameter_reference", "=", "self", ".", "_sampling_method", ",", "expected_return_type", "=", "str", ",", "variables", "=", "variables", ",", "parameters", "=", "parameters", ",", ")", "if", "not", "(", "sampling_method", "in", "NumericMetricRangeMultiBatchParameterBuilder", ".", "RECOGNIZED_SAMPLING_METHOD_NAMES", ")", ":", "raise", "ge_exceptions", ".", "ProfilerExecutionError", "(", "message", "=", "f\"\"\"The directive \"sampling_method\" for {self.__class__.__name__} can be only one of\n{NumericMetricRangeMultiBatchParameterBuilder.RECOGNIZED_SAMPLING_METHOD_NAMES} (\"{sampling_method}\" was detected).\n\"\"\"", ")", "# Obtain false_positive_rate from rule state (i.e., variables and parameters); from instance variable otherwise.", "false_positive_rate", ":", "Union", "[", "Any", ",", "str", "]", "=", "get_parameter_value_and_validate_return_type", "(", "domain", "=", "domain", ",", "parameter_reference", "=", "self", ".", "_false_positive_rate", ",", "expected_return_type", "=", "float", ",", "variables", "=", "variables", ",", "parameters", "=", "parameters", ",", ")", "if", "not", "(", "0.0", "<=", "false_positive_rate", "<=", "1.0", ")", ":", "raise", "ge_exceptions", ".", "ProfilerExecutionError", "(", "message", "=", "f\"The confidence level for {self.__class__.__name__} is outside of [0.0, 1.0] closed interval.\"", ")", "truncate_values", ":", "Dict", "[", "str", ",", "Number", "]", "=", "self", ".", "_get_truncate_values_using_heuristics", "(", "metric_values", "=", "metric_values", ",", "domain", "=", "domain", ",", "variables", "=", "variables", ",", "parameters", "=", "parameters", ",", ")", "lower_bound", ":", "Optional", "[", "float", "]", "=", "truncate_values", ".", "get", "(", "\"lower_bound\"", ")", "upper_bound", ":", "Optional", "[", "float", "]", "=", "truncate_values", ".", "get", "(", "\"upper_bound\"", ")", "round_decimals", ":", "int", "=", "self", ".", "_get_round_decimals_using_heuristics", "(", "metric_values", "=", "metric_values", ",", "domain", "=", "domain", ",", "variables", "=", "variables", ",", "parameters", "=", "parameters", ",", ")", "metric_values", "=", "np", ".", "array", "(", "metric_values", ",", "dtype", "=", "np", ".", "float64", ")", "lower_quantile", ":", "Union", "[", "Number", ",", "float", "]", "upper_quantile", ":", "Union", "[", "Number", ",", "float", "]", "if", "np", ".", "all", "(", "np", ".", "isclose", "(", "metric_values", ",", "metric_values", "[", "0", "]", ")", ")", ":", "# Computation is unnecessary if distribution is degenerate.", "lower_quantile", "=", "upper_quantile", "=", "metric_values", "[", "0", "]", "elif", "sampling_method", "==", "\"bootstrap\"", ":", "lower_quantile", ",", "upper_quantile", "=", "self", ".", "_get_bootstrap_estimate", "(", "metric_values", "=", "metric_values", ",", "false_positive_rate", "=", "false_positive_rate", ",", "domain", "=", "domain", ",", "variables", "=", "variables", ",", "parameters", "=", "parameters", ",", ")", "else", ":", "lower_quantile", ",", "upper_quantile", "=", "compute_quantiles", "(", "metric_values", "=", "metric_values", ",", "false_positive_rate", "=", "false_positive_rate", ",", ")", "min_value", ":", "Union", "[", "Number", ",", "float", "]", "max_value", ":", "Union", "[", "Number", ",", "float", "]", "if", "round_decimals", "==", "0", ":", "min_value", "=", "round", "(", "float", "(", "lower_quantile", ")", ")", "max_value", "=", "round", "(", "float", "(", "upper_quantile", ")", ")", "else", ":", "min_value", "=", "round", "(", "float", "(", "lower_quantile", ")", ",", "round_decimals", ")", "max_value", "=", "round", "(", "float", "(", "upper_quantile", ")", ",", "round_decimals", ")", "if", "lower_bound", "is", "not", "None", ":", "min_value", "=", "max", "(", "min_value", ",", "lower_bound", ")", "if", "upper_bound", "is", "not", "None", ":", "max_value", "=", "min", "(", "max_value", ",", "upper_bound", ")", "parameter_values", ":", "Dict", "[", "str", ",", "Any", "]", "=", "{", "f\"$parameter.{self.parameter_name}\"", ":", "{", "\"value\"", ":", "{", "\"min_value\"", ":", "min_value", ",", "\"max_value\"", ":", "max_value", ",", "}", ",", "\"details\"", ":", "details", ",", "}", ",", "}", "build_parameter_container", "(", "parameter_container", "=", "parameter_container", ",", "parameter_values", "=", "parameter_values", ")" ]
[ 133, 4 ]
[ 299, 9 ]
python
en
['en', 'error', 'th']
False
UpdateOwnProfile.has_object_permission
(self, request, view, obj)
Check user is trying to edit their own profile.
Check user is trying to edit their own profile.
def has_object_permission(self, request, view, obj): """Check user is trying to edit their own profile.""" if request.method in permissions.SAFE_METHODS: return True return obj.id == request.user.id
[ "def", "has_object_permission", "(", "self", ",", "request", ",", "view", ",", "obj", ")", ":", "if", "request", ".", "method", "in", "permissions", ".", "SAFE_METHODS", ":", "return", "True", "return", "obj", ".", "id", "==", "request", ".", "user", ".", "id" ]
[ 6, 4 ]
[ 12, 40 ]
python
en
['en', 'en', 'en']
True
SqlAlchemyBatchData.__init__
( self, execution_engine, record_set_name: str = None, # Option 1 schema_name: str = None, table_name: str = None, # Option 2 query: str = None, # Option 3 selectable=None, create_temp_table: bool = True, temp_table_name: str = None, temp_table_schema_name: str = None, use_quoted_name: bool = False, source_table_name: str = None, source_schema_name: str = None, )
A Constructor used to initialize and SqlAlchemy Batch, create an id for it, and verify that all necessary parameters have been provided. If a Query is given, also builds a temporary table for this query Args: engine (SqlAlchemy Engine): \ A SqlAlchemy Engine or connection that will be used to access the data record_set_name: (string or None): \ The name of the record set available as a domain kwarg for Great Expectations validations. record_set_name can usually be None, but is required when there are multiple record_sets in the same Batch. schema_name (string or None): \ The name of the schema_name in which the databases lie table_name (string or None): \ The name of the table that will be accessed. Either this parameter or the query parameter must be specified. Default is 'None'. query (string or None): \ A query string representing a domain, which will be used to create a temporary table selectable (Sqlalchemy Selectable or None): \ A SqlAlchemy selectable representing a domain, which will be used to create a temporary table create_temp_table (bool): \ When building the batch data object from a query, this flag determines whether a temporary table should be created against which to validate data from the query. If False, a subselect statement will be used in each validation. temp_table_name (str or None): \ The name to use for a temporary table if one should be created. If None, a default name will be generated. temp_table_schema_name (str or None): \ The name of the schema in which a temporary table should be created. If None, the default schema will be used if a temporary table is requested. use_quoted_name (bool): \ If true, names should be quoted to preserve case sensitivity on databases that usually normalize them source_table_name (str): \ For SqlAlchemyBatchData based on selectables, source_table_name provides the name of the table on which the selectable is based. This is required for most kinds of table introspection (e.g. looking up column types) source_schema_name (str): \ For SqlAlchemyBatchData based on selectables, source_schema_name provides the name of the schema on which the selectable is based. This is required for most kinds of table introspection (e.g. looking up column types) The query that will be executed against the DB can be determined in any of three ways: 1. Specify a `schema_name` and `table_name`. This will query the whole table as a record_set. If schema_name is None, then the default schema will be used. 2. Specify a `query`, which will be executed as-is to fetch the record_set. NOTE Abe 20201118 : This functionality is currently untested. 3. Specify a `selectable`, which will be to fetch the record_set. This is the primary path used by DataConnectors. In the case of (2) and (3) you have the option to execute the query either as a temporary table, or as a subselect statement. In general, temporary tables invite more optimization from the query engine itself. Subselect statements may sometimes be preferred, because they do not require write access on the database.
A Constructor used to initialize and SqlAlchemy Batch, create an id for it, and verify that all necessary parameters have been provided. If a Query is given, also builds a temporary table for this query
def __init__( self, execution_engine, record_set_name: str = None, # Option 1 schema_name: str = None, table_name: str = None, # Option 2 query: str = None, # Option 3 selectable=None, create_temp_table: bool = True, temp_table_name: str = None, temp_table_schema_name: str = None, use_quoted_name: bool = False, source_table_name: str = None, source_schema_name: str = None, ): """A Constructor used to initialize and SqlAlchemy Batch, create an id for it, and verify that all necessary parameters have been provided. If a Query is given, also builds a temporary table for this query Args: engine (SqlAlchemy Engine): \ A SqlAlchemy Engine or connection that will be used to access the data record_set_name: (string or None): \ The name of the record set available as a domain kwarg for Great Expectations validations. record_set_name can usually be None, but is required when there are multiple record_sets in the same Batch. schema_name (string or None): \ The name of the schema_name in which the databases lie table_name (string or None): \ The name of the table that will be accessed. Either this parameter or the query parameter must be specified. Default is 'None'. query (string or None): \ A query string representing a domain, which will be used to create a temporary table selectable (Sqlalchemy Selectable or None): \ A SqlAlchemy selectable representing a domain, which will be used to create a temporary table create_temp_table (bool): \ When building the batch data object from a query, this flag determines whether a temporary table should be created against which to validate data from the query. If False, a subselect statement will be used in each validation. temp_table_name (str or None): \ The name to use for a temporary table if one should be created. If None, a default name will be generated. temp_table_schema_name (str or None): \ The name of the schema in which a temporary table should be created. If None, the default schema will be used if a temporary table is requested. use_quoted_name (bool): \ If true, names should be quoted to preserve case sensitivity on databases that usually normalize them source_table_name (str): \ For SqlAlchemyBatchData based on selectables, source_table_name provides the name of the table on which the selectable is based. This is required for most kinds of table introspection (e.g. looking up column types) source_schema_name (str): \ For SqlAlchemyBatchData based on selectables, source_schema_name provides the name of the schema on which the selectable is based. This is required for most kinds of table introspection (e.g. looking up column types) The query that will be executed against the DB can be determined in any of three ways: 1. Specify a `schema_name` and `table_name`. This will query the whole table as a record_set. If schema_name is None, then the default schema will be used. 2. Specify a `query`, which will be executed as-is to fetch the record_set. NOTE Abe 20201118 : This functionality is currently untested. 3. Specify a `selectable`, which will be to fetch the record_set. This is the primary path used by DataConnectors. In the case of (2) and (3) you have the option to execute the query either as a temporary table, or as a subselect statement. In general, temporary tables invite more optimization from the query engine itself. Subselect statements may sometimes be preferred, because they do not require write access on the database. """ super().__init__(execution_engine) engine = execution_engine.engine self._engine = engine self._record_set_name = record_set_name or "great_expectations_sub_selection" if not isinstance(self._record_set_name, str): raise TypeError( f"record_set_name should be of type str, not {type(record_set_name)}" ) self._schema_name = schema_name self._use_quoted_name = use_quoted_name self._source_table_name = source_table_name self._source_schema_name = source_schema_name if sum(bool(x) for x in [table_name, query, selectable is not None]) != 1: raise ValueError( "Exactly one of table_name, query, or selectable must be specified" ) elif (query and schema_name) or (selectable is not None and schema_name): raise ValueError( "schema_name can only be used with table_name. Use temp_table_schema_name to provide a target schema for creating a temporary table." ) if table_name: # Suggestion: pull this block out as its own _function if use_quoted_name: table_name = quoted_name(table_name, quote=True) if engine.dialect.name.lower() == "bigquery": if schema_name is not None: logger.warning( "schema_name should not be used when passing a table_name for biquery. Instead, include the schema name in the table_name string." ) # In BigQuery the table name is already qualified with its schema name self._selectable = sa.Table( table_name, sa.MetaData(), schema_name=None, ) else: self._selectable = sa.Table( table_name, sa.MetaData(), schema_name=schema_name, ) elif create_temp_table: if temp_table_name: generated_table_name = temp_table_name else: # Suggestion: Pull this into a separate "_generate_temporary_table_name" method generated_table_name = f"ge_tmp_{str(uuid.uuid4())[:8]}" # mssql expects all temporary table names to have a prefix '#' if engine.dialect.name.lower() == "mssql": generated_table_name = f"#{generated_table_name}" if selectable is not None: if engine.dialect.name.lower() == "oracle": # oracle query was already passed as a string query = selectable else: # compile selectable to sql statement query = selectable.compile( dialect=self.sql_engine_dialect, compile_kwargs={"literal_binds": True}, ) self._create_temporary_table( generated_table_name, query, temp_table_schema_name=temp_table_schema_name, ) self._selectable = sa.Table( generated_table_name, sa.MetaData(), schema_name=temp_table_schema_name, ) else: if query: self._selectable = sa.text(query) else: self._selectable = selectable.alias(self._record_set_name)
[ "def", "__init__", "(", "self", ",", "execution_engine", ",", "record_set_name", ":", "str", "=", "None", ",", "# Option 1", "schema_name", ":", "str", "=", "None", ",", "table_name", ":", "str", "=", "None", ",", "# Option 2", "query", ":", "str", "=", "None", ",", "# Option 3", "selectable", "=", "None", ",", "create_temp_table", ":", "bool", "=", "True", ",", "temp_table_name", ":", "str", "=", "None", ",", "temp_table_schema_name", ":", "str", "=", "None", ",", "use_quoted_name", ":", "bool", "=", "False", ",", "source_table_name", ":", "str", "=", "None", ",", "source_schema_name", ":", "str", "=", "None", ",", ")", ":", "super", "(", ")", ".", "__init__", "(", "execution_engine", ")", "engine", "=", "execution_engine", ".", "engine", "self", ".", "_engine", "=", "engine", "self", ".", "_record_set_name", "=", "record_set_name", "or", "\"great_expectations_sub_selection\"", "if", "not", "isinstance", "(", "self", ".", "_record_set_name", ",", "str", ")", ":", "raise", "TypeError", "(", "f\"record_set_name should be of type str, not {type(record_set_name)}\"", ")", "self", ".", "_schema_name", "=", "schema_name", "self", ".", "_use_quoted_name", "=", "use_quoted_name", "self", ".", "_source_table_name", "=", "source_table_name", "self", ".", "_source_schema_name", "=", "source_schema_name", "if", "sum", "(", "bool", "(", "x", ")", "for", "x", "in", "[", "table_name", ",", "query", ",", "selectable", "is", "not", "None", "]", ")", "!=", "1", ":", "raise", "ValueError", "(", "\"Exactly one of table_name, query, or selectable must be specified\"", ")", "elif", "(", "query", "and", "schema_name", ")", "or", "(", "selectable", "is", "not", "None", "and", "schema_name", ")", ":", "raise", "ValueError", "(", "\"schema_name can only be used with table_name. Use temp_table_schema_name to provide a target schema for creating a temporary table.\"", ")", "if", "table_name", ":", "# Suggestion: pull this block out as its own _function", "if", "use_quoted_name", ":", "table_name", "=", "quoted_name", "(", "table_name", ",", "quote", "=", "True", ")", "if", "engine", ".", "dialect", ".", "name", ".", "lower", "(", ")", "==", "\"bigquery\"", ":", "if", "schema_name", "is", "not", "None", ":", "logger", ".", "warning", "(", "\"schema_name should not be used when passing a table_name for biquery. Instead, include the schema name in the table_name string.\"", ")", "# In BigQuery the table name is already qualified with its schema name", "self", ".", "_selectable", "=", "sa", ".", "Table", "(", "table_name", ",", "sa", ".", "MetaData", "(", ")", ",", "schema_name", "=", "None", ",", ")", "else", ":", "self", ".", "_selectable", "=", "sa", ".", "Table", "(", "table_name", ",", "sa", ".", "MetaData", "(", ")", ",", "schema_name", "=", "schema_name", ",", ")", "elif", "create_temp_table", ":", "if", "temp_table_name", ":", "generated_table_name", "=", "temp_table_name", "else", ":", "# Suggestion: Pull this into a separate \"_generate_temporary_table_name\" method", "generated_table_name", "=", "f\"ge_tmp_{str(uuid.uuid4())[:8]}\"", "# mssql expects all temporary table names to have a prefix '#'", "if", "engine", ".", "dialect", ".", "name", ".", "lower", "(", ")", "==", "\"mssql\"", ":", "generated_table_name", "=", "f\"#{generated_table_name}\"", "if", "selectable", "is", "not", "None", ":", "if", "engine", ".", "dialect", ".", "name", ".", "lower", "(", ")", "==", "\"oracle\"", ":", "# oracle query was already passed as a string", "query", "=", "selectable", "else", ":", "# compile selectable to sql statement", "query", "=", "selectable", ".", "compile", "(", "dialect", "=", "self", ".", "sql_engine_dialect", ",", "compile_kwargs", "=", "{", "\"literal_binds\"", ":", "True", "}", ",", ")", "self", ".", "_create_temporary_table", "(", "generated_table_name", ",", "query", ",", "temp_table_schema_name", "=", "temp_table_schema_name", ",", ")", "self", ".", "_selectable", "=", "sa", ".", "Table", "(", "generated_table_name", ",", "sa", ".", "MetaData", "(", ")", ",", "schema_name", "=", "temp_table_schema_name", ",", ")", "else", ":", "if", "query", ":", "self", ".", "_selectable", "=", "sa", ".", "text", "(", "query", ")", "else", ":", "self", ".", "_selectable", "=", "selectable", ".", "alias", "(", "self", ".", "_record_set_name", ")" ]
[ 22, 4 ]
[ 166, 74 ]
python
en
['en', 'en', 'en']
True
SqlAlchemyBatchData.sql_engine_dialect
(self)
Returns the Batches' current engine dialect
Returns the Batches' current engine dialect
def sql_engine_dialect(self) -> DefaultDialect: """Returns the Batches' current engine dialect""" return self._engine.dialect
[ "def", "sql_engine_dialect", "(", "self", ")", "->", "DefaultDialect", ":", "return", "self", ".", "_engine", ".", "dialect" ]
[ 169, 4 ]
[ 171, 35 ]
python
en
['en', 'en', 'en']
True
SqlAlchemyBatchData._create_temporary_table
( self, temp_table_name, query, temp_table_schema_name=None )
Create Temporary table based on sql query. This will be used as a basis for executing expectations. :param query:
Create Temporary table based on sql query. This will be used as a basis for executing expectations. :param query:
def _create_temporary_table( self, temp_table_name, query, temp_table_schema_name=None ): """ Create Temporary table based on sql query. This will be used as a basis for executing expectations. :param query: """ if self.sql_engine_dialect.name.lower() == "bigquery": stmt = "CREATE OR REPLACE TABLE `{temp_table_name}` AS {query}".format( temp_table_name=temp_table_name, query=query ) elif self.sql_engine_dialect.name.lower() == "snowflake": if temp_table_schema_name is not None: temp_table_name = temp_table_schema_name + "." + temp_table_name stmt = ( "CREATE OR REPLACE TEMPORARY TABLE {temp_table_name} AS {query}".format( temp_table_name=temp_table_name, query=query ) ) elif self.sql_engine_dialect.name == "mysql": # Note: We can keep the "MySQL" clause separate for clarity, even though it is the same as the # generic case. stmt = "CREATE TEMPORARY TABLE {temp_table_name} AS {query}".format( temp_table_name=temp_table_name, query=query ) elif self.sql_engine_dialect.name == "mssql": # Insert "into #{temp_table_name}" in the custom sql query right before the "from" clause # Split is case sensitive so detect case. # Note: transforming query to uppercase/lowercase has unintended consequences (i.e., # changing column names), so this is not an option! query = query.string # extracting string from MSSQLCompiler object if "from" in query: strsep = "from" else: strsep = "FROM" querymod = query.split(strsep, maxsplit=1) stmt = (querymod[0] + "into {temp_table_name} from" + querymod[1]).format( temp_table_name=temp_table_name ) elif self.sql_engine_dialect.name.lower() == "awsathena": stmt = "CREATE TABLE {temp_table_name} AS {query}".format( temp_table_name=temp_table_name, query=query ) elif self.sql_engine_dialect.name.lower() == "oracle": # oracle 18c introduced PRIVATE temp tables which are transient objects stmt_1 = "CREATE PRIVATE TEMPORARY TABLE {temp_table_name} ON COMMIT PRESERVE DEFINITION AS {query}".format( temp_table_name=temp_table_name, query=query ) # prior to oracle 18c only GLOBAL temp tables existed and only the data is transient # this means an empty table will persist after the db session stmt_2 = "CREATE GLOBAL TEMPORARY TABLE {temp_table_name} ON COMMIT PRESERVE ROWS AS {query}".format( temp_table_name=temp_table_name, query=query ) else: stmt = 'CREATE TEMPORARY TABLE "{temp_table_name}" AS {query}'.format( temp_table_name=temp_table_name, query=query ) if self.sql_engine_dialect.name.lower() == "oracle": try: self._engine.execute(stmt_1) except DatabaseError: self._engine.execute(stmt_2) else: self._engine.execute(stmt)
[ "def", "_create_temporary_table", "(", "self", ",", "temp_table_name", ",", "query", ",", "temp_table_schema_name", "=", "None", ")", ":", "if", "self", ".", "sql_engine_dialect", ".", "name", ".", "lower", "(", ")", "==", "\"bigquery\"", ":", "stmt", "=", "\"CREATE OR REPLACE TABLE `{temp_table_name}` AS {query}\"", ".", "format", "(", "temp_table_name", "=", "temp_table_name", ",", "query", "=", "query", ")", "elif", "self", ".", "sql_engine_dialect", ".", "name", ".", "lower", "(", ")", "==", "\"snowflake\"", ":", "if", "temp_table_schema_name", "is", "not", "None", ":", "temp_table_name", "=", "temp_table_schema_name", "+", "\".\"", "+", "temp_table_name", "stmt", "=", "(", "\"CREATE OR REPLACE TEMPORARY TABLE {temp_table_name} AS {query}\"", ".", "format", "(", "temp_table_name", "=", "temp_table_name", ",", "query", "=", "query", ")", ")", "elif", "self", ".", "sql_engine_dialect", ".", "name", "==", "\"mysql\"", ":", "# Note: We can keep the \"MySQL\" clause separate for clarity, even though it is the same as the", "# generic case.", "stmt", "=", "\"CREATE TEMPORARY TABLE {temp_table_name} AS {query}\"", ".", "format", "(", "temp_table_name", "=", "temp_table_name", ",", "query", "=", "query", ")", "elif", "self", ".", "sql_engine_dialect", ".", "name", "==", "\"mssql\"", ":", "# Insert \"into #{temp_table_name}\" in the custom sql query right before the \"from\" clause", "# Split is case sensitive so detect case.", "# Note: transforming query to uppercase/lowercase has unintended consequences (i.e.,", "# changing column names), so this is not an option!", "query", "=", "query", ".", "string", "# extracting string from MSSQLCompiler object", "if", "\"from\"", "in", "query", ":", "strsep", "=", "\"from\"", "else", ":", "strsep", "=", "\"FROM\"", "querymod", "=", "query", ".", "split", "(", "strsep", ",", "maxsplit", "=", "1", ")", "stmt", "=", "(", "querymod", "[", "0", "]", "+", "\"into {temp_table_name} from\"", "+", "querymod", "[", "1", "]", ")", ".", "format", "(", "temp_table_name", "=", "temp_table_name", ")", "elif", "self", ".", "sql_engine_dialect", ".", "name", ".", "lower", "(", ")", "==", "\"awsathena\"", ":", "stmt", "=", "\"CREATE TABLE {temp_table_name} AS {query}\"", ".", "format", "(", "temp_table_name", "=", "temp_table_name", ",", "query", "=", "query", ")", "elif", "self", ".", "sql_engine_dialect", ".", "name", ".", "lower", "(", ")", "==", "\"oracle\"", ":", "# oracle 18c introduced PRIVATE temp tables which are transient objects", "stmt_1", "=", "\"CREATE PRIVATE TEMPORARY TABLE {temp_table_name} ON COMMIT PRESERVE DEFINITION AS {query}\"", ".", "format", "(", "temp_table_name", "=", "temp_table_name", ",", "query", "=", "query", ")", "# prior to oracle 18c only GLOBAL temp tables existed and only the data is transient", "# this means an empty table will persist after the db session", "stmt_2", "=", "\"CREATE GLOBAL TEMPORARY TABLE {temp_table_name} ON COMMIT PRESERVE ROWS AS {query}\"", ".", "format", "(", "temp_table_name", "=", "temp_table_name", ",", "query", "=", "query", ")", "else", ":", "stmt", "=", "'CREATE TEMPORARY TABLE \"{temp_table_name}\" AS {query}'", ".", "format", "(", "temp_table_name", "=", "temp_table_name", ",", "query", "=", "query", ")", "if", "self", ".", "sql_engine_dialect", ".", "name", ".", "lower", "(", ")", "==", "\"oracle\"", ":", "try", ":", "self", ".", "_engine", ".", "execute", "(", "stmt_1", ")", "except", "DatabaseError", ":", "self", ".", "_engine", ".", "execute", "(", "stmt_2", ")", "else", ":", "self", ".", "_engine", ".", "execute", "(", "stmt", ")" ]
[ 193, 4 ]
[ 256, 38 ]
python
en
['en', 'error', 'th']
False
load_xml_generator_configuration
(configuration, **defaults)
Loads CastXML or GCC-XML configuration. Args: configuration (string|configparser.ConfigParser): can be a string (file path to a configuration file) or instance of :class:`configparser.ConfigParser`. defaults: can be used to override single configuration values. Returns: :class:`.xml_generator_configuration_t`: a configuration object The file passed needs to be in a format that can be parsed by :class:`configparser.ConfigParser`. An example configuration file skeleton can be found `here <https://github.com/gccxml/pygccxml/blob/develop/ unittests/xml_generator.cfg>`_.
Loads CastXML or GCC-XML configuration.
def load_xml_generator_configuration(configuration, **defaults): """ Loads CastXML or GCC-XML configuration. Args: configuration (string|configparser.ConfigParser): can be a string (file path to a configuration file) or instance of :class:`configparser.ConfigParser`. defaults: can be used to override single configuration values. Returns: :class:`.xml_generator_configuration_t`: a configuration object The file passed needs to be in a format that can be parsed by :class:`configparser.ConfigParser`. An example configuration file skeleton can be found `here <https://github.com/gccxml/pygccxml/blob/develop/ unittests/xml_generator.cfg>`_. """ parser = configuration if utils.is_str(configuration): parser = ConfigParser() parser.read(configuration) # Create a new empty configuration cfg = xml_generator_configuration_t() values = defaults if not values: values = {} if parser.has_section('xml_generator'): for name, value in parser.items('xml_generator'): if value.strip(): values[name] = value for name, value in values.items(): if isinstance(value, str): value = value.strip() if name == 'gccxml_path': cfg.gccxml_path = value if name == 'xml_generator_path': cfg.xml_generator_path = value elif name == 'working_directory': cfg.working_directory = value elif name == 'include_paths': for p in value.split(';'): p = p.strip() if p: cfg.include_paths.append(os.path.normpath(p)) elif name == 'compiler': cfg.compiler = value elif name == 'xml_generator': cfg.xml_generator = value elif name == 'castxml_epic_version': cfg.castxml_epic_version = int(value) elif name == 'keep_xml': cfg.keep_xml = value elif name == 'cflags': cfg.cflags = value elif name == 'flags': cfg.flags = value elif name == 'compiler_path': cfg.compiler_path = value else: print('\n%s entry was ignored' % name) # If no compiler path was set and we are using castxml, set the path # Here we overwrite the default configuration done in the cfg because # the xml_generator was set through the setter after the creation of a new # emppty configuration object. cfg.compiler_path = create_compiler_path( cfg.xml_generator, cfg.compiler_path) return cfg
[ "def", "load_xml_generator_configuration", "(", "configuration", ",", "*", "*", "defaults", ")", ":", "parser", "=", "configuration", "if", "utils", ".", "is_str", "(", "configuration", ")", ":", "parser", "=", "ConfigParser", "(", ")", "parser", ".", "read", "(", "configuration", ")", "# Create a new empty configuration", "cfg", "=", "xml_generator_configuration_t", "(", ")", "values", "=", "defaults", "if", "not", "values", ":", "values", "=", "{", "}", "if", "parser", ".", "has_section", "(", "'xml_generator'", ")", ":", "for", "name", ",", "value", "in", "parser", ".", "items", "(", "'xml_generator'", ")", ":", "if", "value", ".", "strip", "(", ")", ":", "values", "[", "name", "]", "=", "value", "for", "name", ",", "value", "in", "values", ".", "items", "(", ")", ":", "if", "isinstance", "(", "value", ",", "str", ")", ":", "value", "=", "value", ".", "strip", "(", ")", "if", "name", "==", "'gccxml_path'", ":", "cfg", ".", "gccxml_path", "=", "value", "if", "name", "==", "'xml_generator_path'", ":", "cfg", ".", "xml_generator_path", "=", "value", "elif", "name", "==", "'working_directory'", ":", "cfg", ".", "working_directory", "=", "value", "elif", "name", "==", "'include_paths'", ":", "for", "p", "in", "value", ".", "split", "(", "';'", ")", ":", "p", "=", "p", ".", "strip", "(", ")", "if", "p", ":", "cfg", ".", "include_paths", ".", "append", "(", "os", ".", "path", ".", "normpath", "(", "p", ")", ")", "elif", "name", "==", "'compiler'", ":", "cfg", ".", "compiler", "=", "value", "elif", "name", "==", "'xml_generator'", ":", "cfg", ".", "xml_generator", "=", "value", "elif", "name", "==", "'castxml_epic_version'", ":", "cfg", ".", "castxml_epic_version", "=", "int", "(", "value", ")", "elif", "name", "==", "'keep_xml'", ":", "cfg", ".", "keep_xml", "=", "value", "elif", "name", "==", "'cflags'", ":", "cfg", ".", "cflags", "=", "value", "elif", "name", "==", "'flags'", ":", "cfg", ".", "flags", "=", "value", "elif", "name", "==", "'compiler_path'", ":", "cfg", ".", "compiler_path", "=", "value", "else", ":", "print", "(", "'\\n%s entry was ignored'", "%", "name", ")", "# If no compiler path was set and we are using castxml, set the path", "# Here we overwrite the default configuration done in the cfg because", "# the xml_generator was set through the setter after the creation of a new", "# emppty configuration object.", "cfg", ".", "compiler_path", "=", "create_compiler_path", "(", "cfg", ".", "xml_generator", ",", "cfg", ".", "compiler_path", ")", "return", "cfg" ]
[ 332, 0 ]
[ 409, 14 ]
python
en
['en', 'error', 'th']
False
create_compiler_path
(xml_generator, compiler_path)
Try to guess a path for the compiler. If you want ot use a specific compiler, please provide the compiler path manually, as the guess may not be what you are expecting. Providing the path can be done by passing it as an argument (compiler_path) to the xml_generator_configuration_t() or by defining it in your pygccxml configuration file.
Try to guess a path for the compiler.
def create_compiler_path(xml_generator, compiler_path): """ Try to guess a path for the compiler. If you want ot use a specific compiler, please provide the compiler path manually, as the guess may not be what you are expecting. Providing the path can be done by passing it as an argument (compiler_path) to the xml_generator_configuration_t() or by defining it in your pygccxml configuration file. """ if xml_generator == 'castxml' and compiler_path is None: if platform.system() == 'Windows': # Look for msvc p = subprocess.Popen( ['where', 'cl'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) compiler_path = p.stdout.read().decode("utf-8").rstrip() p.wait() p.stdout.close() p.stderr.close() # No msvc found; look for mingw if compiler_path == '': p = subprocess.Popen( ['where', 'mingw'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) compiler_path = p.stdout.read().decode("utf-8").rstrip() p.wait() p.stdout.close() p.stderr.close() else: # OS X or Linux # Look for clang first, then gcc p = subprocess.Popen( ['which', 'clang++'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) compiler_path = p.stdout.read().decode("utf-8").rstrip() p.wait() p.stdout.close() p.stderr.close() # No clang found; use gcc if compiler_path == '': p = subprocess.Popen( ['which', 'c++'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) compiler_path = p.stdout.read().decode("utf-8").rstrip() p.wait() p.stdout.close() p.stderr.close() if compiler_path == "": compiler_path = None return compiler_path
[ "def", "create_compiler_path", "(", "xml_generator", ",", "compiler_path", ")", ":", "if", "xml_generator", "==", "'castxml'", "and", "compiler_path", "is", "None", ":", "if", "platform", ".", "system", "(", ")", "==", "'Windows'", ":", "# Look for msvc", "p", "=", "subprocess", ".", "Popen", "(", "[", "'where'", ",", "'cl'", "]", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "compiler_path", "=", "p", ".", "stdout", ".", "read", "(", ")", ".", "decode", "(", "\"utf-8\"", ")", ".", "rstrip", "(", ")", "p", ".", "wait", "(", ")", "p", ".", "stdout", ".", "close", "(", ")", "p", ".", "stderr", ".", "close", "(", ")", "# No msvc found; look for mingw", "if", "compiler_path", "==", "''", ":", "p", "=", "subprocess", ".", "Popen", "(", "[", "'where'", ",", "'mingw'", "]", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "compiler_path", "=", "p", ".", "stdout", ".", "read", "(", ")", ".", "decode", "(", "\"utf-8\"", ")", ".", "rstrip", "(", ")", "p", ".", "wait", "(", ")", "p", ".", "stdout", ".", "close", "(", ")", "p", ".", "stderr", ".", "close", "(", ")", "else", ":", "# OS X or Linux", "# Look for clang first, then gcc", "p", "=", "subprocess", ".", "Popen", "(", "[", "'which'", ",", "'clang++'", "]", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "compiler_path", "=", "p", ".", "stdout", ".", "read", "(", ")", ".", "decode", "(", "\"utf-8\"", ")", ".", "rstrip", "(", ")", "p", ".", "wait", "(", ")", "p", ".", "stdout", ".", "close", "(", ")", "p", ".", "stderr", ".", "close", "(", ")", "# No clang found; use gcc", "if", "compiler_path", "==", "''", ":", "p", "=", "subprocess", ".", "Popen", "(", "[", "'which'", ",", "'c++'", "]", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "compiler_path", "=", "p", ".", "stdout", ".", "read", "(", ")", ".", "decode", "(", "\"utf-8\"", ")", ".", "rstrip", "(", ")", "p", ".", "wait", "(", ")", "p", ".", "stdout", ".", "close", "(", ")", "p", ".", "stderr", ".", "close", "(", ")", "if", "compiler_path", "==", "\"\"", ":", "compiler_path", "=", "None", "return", "compiler_path" ]
[ 412, 0 ]
[ 470, 24 ]
python
en
['en', 'error', 'th']
False
parser_configuration_t.include_paths
(self)
list of include paths to look for header files
list of include paths to look for header files
def include_paths(self): """list of include paths to look for header files""" return self.__include_paths
[ "def", "include_paths", "(", "self", ")", ":", "return", "self", ".", "__include_paths" ]
[ 102, 4 ]
[ 104, 35 ]
python
en
['en', 'en', 'en']
True
parser_configuration_t.define_symbols
(self)
list of "define" directives
list of "define" directives
def define_symbols(self): """list of "define" directives """ return self.__define_symbols
[ "def", "define_symbols", "(", "self", ")", ":", "return", "self", ".", "__define_symbols" ]
[ 107, 4 ]
[ 109, 36 ]
python
en
['en', 'en', 'en']
True
parser_configuration_t.undefine_symbols
(self)
list of "undefine" directives
list of "undefine" directives
def undefine_symbols(self): """list of "undefine" directives """ return self.__undefine_symbols
[ "def", "undefine_symbols", "(", "self", ")", ":", "return", "self", ".", "__undefine_symbols" ]
[ 112, 4 ]
[ 114, 38 ]
python
de
['en', 'de', 'ur']
False
parser_configuration_t.compiler
(self)
get compiler name to simulate
get compiler name to simulate
def compiler(self): """get compiler name to simulate""" return self.__compiler
[ "def", "compiler", "(", "self", ")", ":", "return", "self", ".", "__compiler" ]
[ 117, 4 ]
[ 119, 30 ]
python
en
['en', 'en', 'en']
True
parser_configuration_t.compiler
(self, compiler)
set compiler name to simulate
set compiler name to simulate
def compiler(self, compiler): """set compiler name to simulate""" self.__compiler = compiler
[ "def", "compiler", "(", "self", ",", "compiler", ")", ":", "self", ".", "__compiler", "=", "compiler" ]
[ 122, 4 ]
[ 124, 34 ]
python
en
['en', 'en', 'en']
True
parser_configuration_t.xml_generator
(self)
get xml_generator (gccxml or castxml)
get xml_generator (gccxml or castxml)
def xml_generator(self): """get xml_generator (gccxml or castxml)""" return self.__xml_generator
[ "def", "xml_generator", "(", "self", ")", ":", "return", "self", ".", "__xml_generator" ]
[ 127, 4 ]
[ 129, 35 ]
python
en
['en', 'la', 'en']
True
parser_configuration_t.xml_generator
(self, xml_generator)
set xml_generator (gccxml or castxml)
set xml_generator (gccxml or castxml)
def xml_generator(self, xml_generator): """set xml_generator (gccxml or castxml)""" if "real" in xml_generator: # Support for gccxml.real from newer gccxml package # Can be removed once gccxml support is dropped. xml_generator = "gccxml" self.__xml_generator = xml_generator
[ "def", "xml_generator", "(", "self", ",", "xml_generator", ")", ":", "if", "\"real\"", "in", "xml_generator", ":", "# Support for gccxml.real from newer gccxml package", "# Can be removed once gccxml support is dropped.", "xml_generator", "=", "\"gccxml\"", "self", ".", "__xml_generator", "=", "xml_generator" ]
[ 132, 4 ]
[ 138, 44 ]
python
en
['en', 'la', 'en']
True
parser_configuration_t.castxml_epic_version
(self)
File format version used by castxml.
File format version used by castxml.
def castxml_epic_version(self): """ File format version used by castxml. """ return self.__castxml_epic_version
[ "def", "castxml_epic_version", "(", "self", ")", ":", "return", "self", ".", "__castxml_epic_version" ]
[ 141, 4 ]
[ 145, 42 ]
python
en
['en', 'error', 'th']
False
parser_configuration_t.castxml_epic_version
(self, castxml_epic_version)
File format version used by castxml.
File format version used by castxml.
def castxml_epic_version(self, castxml_epic_version): """ File format version used by castxml. """ self.__castxml_epic_version = castxml_epic_version
[ "def", "castxml_epic_version", "(", "self", ",", "castxml_epic_version", ")", ":", "self", ".", "__castxml_epic_version", "=", "castxml_epic_version" ]
[ 148, 4 ]
[ 152, 58 ]
python
en
['en', 'error', 'th']
False
parser_configuration_t.keep_xml
(self)
Are xml files kept after errors.
Are xml files kept after errors.
def keep_xml(self): """Are xml files kept after errors.""" return self.__keep_xml
[ "def", "keep_xml", "(", "self", ")", ":", "return", "self", ".", "__keep_xml" ]
[ 155, 4 ]
[ 157, 30 ]
python
en
['da', 'en', 'en']
True
parser_configuration_t.keep_xml
(self, keep_xml)
Set if xml files kept after errors.
Set if xml files kept after errors.
def keep_xml(self, keep_xml): """Set if xml files kept after errors.""" self.__keep_xml = keep_xml
[ "def", "keep_xml", "(", "self", ",", "keep_xml", ")", ":", "self", ".", "__keep_xml", "=", "keep_xml" ]
[ 160, 4 ]
[ 162, 34 ]
python
en
['da', 'en', 'en']
True
parser_configuration_t.flags
(self)
Optional flags for pygccxml.
Optional flags for pygccxml.
def flags(self): """Optional flags for pygccxml.""" return self.__flags
[ "def", "flags", "(", "self", ")", ":", "return", "self", ".", "__flags" ]
[ 165, 4 ]
[ 167, 27 ]
python
en
['en', 'en', 'en']
True
parser_configuration_t.flags
(self, flags)
Optional flags for pygccxml.
Optional flags for pygccxml.
def flags(self, flags): """Optional flags for pygccxml.""" if flags is None: flags = [] self.__flags = flags
[ "def", "flags", "(", "self", ",", "flags", ")", ":", "if", "flags", "is", "None", ":", "flags", "=", "[", "]", "self", ".", "__flags", "=", "flags" ]
[ 170, 4 ]
[ 174, 28 ]
python
en
['en', 'en', 'en']
True
parser_configuration_t.compiler_path
(self)
Get the path for the compiler.
Get the path for the compiler.
def compiler_path(self): """Get the path for the compiler.""" return self.__compiler_path
[ "def", "compiler_path", "(", "self", ")", ":", "return", "self", ".", "__compiler_path" ]
[ 177, 4 ]
[ 179, 35 ]
python
en
['en', 'en', 'en']
True
parser_configuration_t.compiler_path
(self, compiler_path)
Set the path for the compiler.
Set the path for the compiler.
def compiler_path(self, compiler_path): """Set the path for the compiler.""" self.__compiler_path = compiler_path
[ "def", "compiler_path", "(", "self", ",", "compiler_path", ")", ":", "self", ".", "__compiler_path", "=", "compiler_path" ]
[ 182, 4 ]
[ 184, 44 ]
python
en
['en', 'en', 'en']
True
parser_configuration_t.cflags
(self)
additional flags to pass to compiler
additional flags to pass to compiler
def cflags(self): """additional flags to pass to compiler""" return self.__cflags
[ "def", "cflags", "(", "self", ")", ":", "return", "self", ".", "__cflags" ]
[ 187, 4 ]
[ 189, 28 ]
python
en
['en', 'en', 'en']
True
parser_configuration_t.raise_on_wrong_settings
(self)
Validates the configuration settings and raises RuntimeError on error
Validates the configuration settings and raises RuntimeError on error
def raise_on_wrong_settings(self): """ Validates the configuration settings and raises RuntimeError on error """ self.__ensure_dir_exists(self.working_directory, 'working directory') for idir in self.include_paths: self.__ensure_dir_exists(idir, 'include directory') if self.__xml_generator not in ["castxml", "gccxml"]: msg = ('xml_generator("%s") should either be ' + '"castxml" or "gccxml".') % self.xml_generator raise RuntimeError(msg)
[ "def", "raise_on_wrong_settings", "(", "self", ")", ":", "self", ".", "__ensure_dir_exists", "(", "self", ".", "working_directory", ",", "'working directory'", ")", "for", "idir", "in", "self", ".", "include_paths", ":", "self", ".", "__ensure_dir_exists", "(", "idir", ",", "'include directory'", ")", "if", "self", ".", "__xml_generator", "not", "in", "[", "\"castxml\"", ",", "\"gccxml\"", "]", ":", "msg", "=", "(", "'xml_generator(\"%s\") should either be '", "+", "'\"castxml\" or \"gccxml\".'", ")", "%", "self", ".", "xml_generator", "raise", "RuntimeError", "(", "msg", ")" ]
[ 209, 4 ]
[ 219, 35 ]
python
en
['en', 'error', 'th']
False
xml_generator_configuration_t.xml_generator_path
(self)
XML generator binary location
XML generator binary location
def xml_generator_path(self): """ XML generator binary location """ return self.__xml_generator_path
[ "def", "xml_generator_path", "(", "self", ")", ":", "return", "self", ".", "__xml_generator_path" ]
[ 279, 4 ]
[ 285, 40 ]
python
en
['en', 'error', 'th']
False
xml_generator_configuration_t.xml_generator_from_xml_file
(self)
Configuration object containing information about the xml generator read from the xml file. Returns: utils.xml_generators: configuration object
Configuration object containing information about the xml generator read from the xml file.
def xml_generator_from_xml_file(self): """ Configuration object containing information about the xml generator read from the xml file. Returns: utils.xml_generators: configuration object """ return self.__xml_generator_from_xml_file
[ "def", "xml_generator_from_xml_file", "(", "self", ")", ":", "return", "self", ".", "__xml_generator_from_xml_file" ]
[ 292, 4 ]
[ 300, 49 ]
python
en
['en', 'error', 'th']
False
xml_generator_configuration_t.start_with_declarations
(self)
list of declarations gccxml should start with, when it dumps declaration tree
list of declarations gccxml should start with, when it dumps declaration tree
def start_with_declarations(self): """list of declarations gccxml should start with, when it dumps declaration tree""" return self.__start_with_declarations
[ "def", "start_with_declarations", "(", "self", ")", ":", "return", "self", ".", "__start_with_declarations" ]
[ 307, 4 ]
[ 310, 45 ]
python
en
['en', 'fr', 'en']
True
xml_generator_configuration_t.ignore_gccxml_output
(self)
set this property to True, if you want pygccxml to ignore any error warning that comes from gccxml
set this property to True, if you want pygccxml to ignore any error warning that comes from gccxml
def ignore_gccxml_output(self): """set this property to True, if you want pygccxml to ignore any error warning that comes from gccxml""" return self.__ignore_gccxml_output
[ "def", "ignore_gccxml_output", "(", "self", ")", ":", "return", "self", ".", "__ignore_gccxml_output" ]
[ 313, 4 ]
[ 316, 42 ]
python
en
['en', 'en', 'en']
True
User.password
(self, value)
设置属性 user.passord = "xxxxx" :param value: 设置属性时的数据 value就是"xxxxx", 原始的明文密码 :return:
设置属性 user.passord = "xxxxx" :param value: 设置属性时的数据 value就是"xxxxx", 原始的明文密码 :return:
def password(self, value): """ 设置属性 user.passord = "xxxxx" :param value: 设置属性时的数据 value就是"xxxxx", 原始的明文密码 :return: """ self.password_hash = generate_password_hash(value)
[ "def", "password", "(", "self", ",", "value", ")", ":", "self", ".", "password_hash", "=", "generate_password_hash", "(", "value", ")" ]
[ 55, 4 ]
[ 61, 58 ]
python
en
['en', 'error', 'th']
False
User.check_password
(self, passwd)
检验密码的正确性 :param passwd: 用户登录时填写的原始密码 :return: 如果正确,返回True, 否则返回False
检验密码的正确性 :param passwd: 用户登录时填写的原始密码 :return: 如果正确,返回True, 否则返回False
def check_password(self, passwd): """ 检验密码的正确性 :param passwd: 用户登录时填写的原始密码 :return: 如果正确,返回True, 否则返回False """ return check_password_hash(self.password_hash, passwd)
[ "def", "check_password", "(", "self", ",", "passwd", ")", ":", "return", "check_password_hash", "(", "self", ".", "password_hash", ",", "passwd", ")" ]
[ 67, 4 ]
[ 73, 62 ]
python
en
['en', 'error', 'th']
False
User.auth_to_dict
(self)
将实名信息转换为字典数据
将实名信息转换为字典数据
def auth_to_dict(self): """将实名信息转换为字典数据""" auth_dict = { "user_id": self.id, "real_name": self.real_name, } return auth_dict
[ "def", "auth_to_dict", "(", "self", ")", ":", "auth_dict", "=", "{", "\"user_id\"", ":", "self", ".", "id", ",", "\"real_name\"", ":", "self", ".", "real_name", ",", "}", "return", "auth_dict" ]
[ 85, 4 ]
[ 91, 24 ]
python
zh
['zh', 'zh', 'zh']
False