Search is not available for this dataset
identifier
stringlengths
1
155
parameters
stringlengths
2
6.09k
docstring
stringlengths
11
63.4k
docstring_summary
stringlengths
0
63.4k
function
stringlengths
29
99.8k
function_tokens
sequence
start_point
sequence
end_point
sequence
language
stringclasses
1 value
docstring_language
stringlengths
2
7
docstring_language_predictions
stringlengths
18
23
is_langid_reliable
stringclasses
2 values
variable_t._get__cmp__items
(self)
implementation details
implementation details
def _get__cmp__items(self): """implementation details""" return [self.decl_type, self.type_qualifiers, self.value]
[ "def", "_get__cmp__items", "(", "self", ")", ":", "return", "[", "self", ".", "decl_type", ",", "self", ".", "type_qualifiers", ",", "self", ".", "value", "]" ]
[ 34, 4 ]
[ 36, 65 ]
python
da
['eo', 'da', 'en']
False
variable_t.__eq__
(self, other)
implementation details
implementation details
def __eq__(self, other): """implementation details""" if not declaration.declaration_t.__eq__(self, other): return False return self.decl_type == other.decl_type \ and self.type_qualifiers == other.type_qualifiers \ and self.value == other.value \ and self.bits == other.bits
[ "def", "__eq__", "(", "self", ",", "other", ")", ":", "if", "not", "declaration", ".", "declaration_t", ".", "__eq__", "(", "self", ",", "other", ")", ":", "return", "False", "return", "self", ".", "decl_type", "==", "other", ".", "decl_type", "and", "self", ".", "type_qualifiers", "==", "other", ".", "type_qualifiers", "and", "self", ".", "value", "==", "other", ".", "value", "and", "self", ".", "bits", "==", "other", ".", "bits" ]
[ 38, 4 ]
[ 45, 39 ]
python
da
['eo', 'da', 'en']
False
variable_t.decl_type
(self)
reference to the variable :class:`decl_type <type_t>`
reference to the variable :class:`decl_type <type_t>`
def decl_type(self): """reference to the variable :class:`decl_type <type_t>`""" return self._decl_type
[ "def", "decl_type", "(", "self", ")", ":", "return", "self", ".", "_decl_type" ]
[ 51, 4 ]
[ 53, 30 ]
python
en
['en', 'en', 'en']
True
variable_t.type_qualifiers
(self)
reference to the :class:`type_qualifiers_t` instance
reference to the :class:`type_qualifiers_t` instance
def type_qualifiers(self): """reference to the :class:`type_qualifiers_t` instance""" return self._type_qualifiers
[ "def", "type_qualifiers", "(", "self", ")", ":", "return", "self", ".", "_type_qualifiers" ]
[ 60, 4 ]
[ 62, 36 ]
python
en
['en', 'en', 'en']
True
variable_t.value
(self)
string, that contains the variable value
string, that contains the variable value
def value(self): """string, that contains the variable value""" return self._value
[ "def", "value", "(", "self", ")", ":", "return", "self", ".", "_value" ]
[ 69, 4 ]
[ 71, 26 ]
python
en
['en', 'en', 'en']
True
variable_t.bits
(self)
integer, that contains information about how many bit takes bit field
integer, that contains information about how many bit takes bit field
def bits(self): """integer, that contains information about how many bit takes bit field""" return self._bits
[ "def", "bits", "(", "self", ")", ":", "return", "self", ".", "_bits" ]
[ 78, 4 ]
[ 81, 25 ]
python
en
['en', 'en', 'en']
True
variable_t.byte_offset
(self)
integer, offset of the field from the beginning of class.
integer, offset of the field from the beginning of class.
def byte_offset(self): """integer, offset of the field from the beginning of class.""" return self._byte_offset
[ "def", "byte_offset", "(", "self", ")", ":", "return", "self", ".", "_byte_offset" ]
[ 88, 4 ]
[ 90, 32 ]
python
en
['en', 'en', 'en']
True
variable_t.mangled
(self)
Unique declaration name generated by the compiler. :return: the mangled name :rtype: str
Unique declaration name generated by the compiler.
def mangled(self): """ Unique declaration name generated by the compiler. :return: the mangled name :rtype: str """ return self.get_mangled_name()
[ "def", "mangled", "(", "self", ")", ":", "return", "self", ".", "get_mangled_name", "(", ")" ]
[ 105, 4 ]
[ 114, 38 ]
python
en
['en', 'error', 'th']
False
parser_t.has_pattern
(self, decl_string)
Implementation detail
Implementation detail
def has_pattern(self, decl_string): """ Implementation detail """ if self.__begin == "<": # Cleanup parentheses blocks before checking for the pattern # See also the args() method (in this file) for more explanations. decl_string = re.sub("\\s\\(.*?\\)", "", decl_string).strip() last_part = decl_string.split('::')[-1] return ( decl_string.find(self.__begin) != -1 and last_part.find(self.__end) != -1 )
[ "def", "has_pattern", "(", "self", ",", "decl_string", ")", ":", "if", "self", ".", "__begin", "==", "\"<\"", ":", "# Cleanup parentheses blocks before checking for the pattern", "# See also the args() method (in this file) for more explanations.", "decl_string", "=", "re", ".", "sub", "(", "\"\\\\s\\\\(.*?\\\\)\"", ",", "\"\"", ",", "decl_string", ")", ".", "strip", "(", ")", "last_part", "=", "decl_string", ".", "split", "(", "'::'", ")", "[", "-", "1", "]", "return", "(", "decl_string", ".", "find", "(", "self", ".", "__begin", ")", "!=", "-", "1", "and", "last_part", ".", "find", "(", "self", ".", "__end", ")", "!=", "-", "1", ")" ]
[ 31, 4 ]
[ 45, 9 ]
python
en
['en', 'error', 'th']
False
parser_t.name
(self, decl_string)
implementation details
implementation details
def name(self, decl_string): """implementation details""" if not self.has_pattern(decl_string): return decl_string args_begin = decl_string.find(self.__begin) return decl_string[0: args_begin].strip()
[ "def", "name", "(", "self", ",", "decl_string", ")", ":", "if", "not", "self", ".", "has_pattern", "(", "decl_string", ")", ":", "return", "decl_string", "args_begin", "=", "decl_string", ".", "find", "(", "self", ".", "__begin", ")", "return", "decl_string", "[", "0", ":", "args_begin", "]", ".", "strip", "(", ")" ]
[ 47, 4 ]
[ 52, 49 ]
python
da
['eo', 'da', 'en']
False
parser_t.__find_args_separator
(self, decl_string, start_pos)
implementation details
implementation details
def __find_args_separator(self, decl_string, start_pos): """implementation details""" bracket_depth = 0 for index, ch in enumerate(decl_string[start_pos:]): if ch not in (self.__begin, self.__end, self.__separator): continue # I am interested only in < and > elif self.__separator == ch: if not bracket_depth: return index + start_pos elif self.__begin == ch: bracket_depth += 1 elif not bracket_depth: return index + start_pos else: bracket_depth -= 1 return -1
[ "def", "__find_args_separator", "(", "self", ",", "decl_string", ",", "start_pos", ")", ":", "bracket_depth", "=", "0", "for", "index", ",", "ch", "in", "enumerate", "(", "decl_string", "[", "start_pos", ":", "]", ")", ":", "if", "ch", "not", "in", "(", "self", ".", "__begin", ",", "self", ".", "__end", ",", "self", ".", "__separator", ")", ":", "continue", "# I am interested only in < and >", "elif", "self", ".", "__separator", "==", "ch", ":", "if", "not", "bracket_depth", ":", "return", "index", "+", "start_pos", "elif", "self", ".", "__begin", "==", "ch", ":", "bracket_depth", "+=", "1", "elif", "not", "bracket_depth", ":", "return", "index", "+", "start_pos", "else", ":", "bracket_depth", "-=", "1", "return", "-", "1" ]
[ 54, 4 ]
[ 69, 17 ]
python
da
['eo', 'da', 'en']
False
parser_t.args
(self, decl_string)
Extracts a list of arguments from the provided declaration string. Implementation detail. Example usages: Input: myClass<std::vector<int>, std::vector<double>> Output: [std::vector<int>, std::vector<double>] Args: decl_string (str): the full declaration string Returns: list: list of arguments as strings
Extracts a list of arguments from the provided declaration string.
def args(self, decl_string): """ Extracts a list of arguments from the provided declaration string. Implementation detail. Example usages: Input: myClass<std::vector<int>, std::vector<double>> Output: [std::vector<int>, std::vector<double>] Args: decl_string (str): the full declaration string Returns: list: list of arguments as strings """ args_begin = decl_string.find(self.__begin) args_end = decl_string.rfind(self.__end) if -1 in (args_begin, args_end) or args_begin == args_end: raise RuntimeError( "%s doesn't validate template instantiation string" % decl_string) args_only = decl_string[args_begin + 1: args_end].strip() # The list of arguments to be returned args = [] parentheses_blocks = [] prev_span = 0 if self.__begin == "<": # In case where we are splitting template names, there # can be parentheses blocks (for arguments) that need to be taken # care of. # Build a regex matching a space (\s) # + something inside parentheses regex = re.compile("\\s\\(.*?\\)") for m in regex.finditer(args_only): # Store the position and the content parentheses_blocks.append([m.start() - prev_span, m.group()]) prev_span = m.end() - m.start() # Cleanup the args_only string by removing the parentheses and # their content. args_only = args_only.replace(m.group(), "") # Now we are trying to split the args_only string in multiple arguments previous_found, found = 0, 0 while True: found = self.__find_args_separator(args_only, previous_found) if found == -1: args.append(args_only[previous_found:].strip()) # This is the last argument. Break out of the loop. break else: args.append(args_only[previous_found: found].strip()) previous_found = found + 1 # skip found separator # Get the size and position for each argument absolute_pos_list = [] absolute_pos = 0 for arg in args: absolute_pos += len(arg) absolute_pos_list.append(absolute_pos) for item in parentheses_blocks: # In case where there are parentheses blocks we add them back # to the right argument parentheses_block_absolute_pos = item[0] parentheses_block_string = item[1] current_arg_absolute_pos = 0 for arg_index, arg_absolute_pos in enumerate(absolute_pos_list): current_arg_absolute_pos += arg_absolute_pos if current_arg_absolute_pos >= parentheses_block_absolute_pos: # Add the parentheses block back and break out of the loop. args[arg_index] += parentheses_block_string break return args
[ "def", "args", "(", "self", ",", "decl_string", ")", ":", "args_begin", "=", "decl_string", ".", "find", "(", "self", ".", "__begin", ")", "args_end", "=", "decl_string", ".", "rfind", "(", "self", ".", "__end", ")", "if", "-", "1", "in", "(", "args_begin", ",", "args_end", ")", "or", "args_begin", "==", "args_end", ":", "raise", "RuntimeError", "(", "\"%s doesn't validate template instantiation string\"", "%", "decl_string", ")", "args_only", "=", "decl_string", "[", "args_begin", "+", "1", ":", "args_end", "]", ".", "strip", "(", ")", "# The list of arguments to be returned", "args", "=", "[", "]", "parentheses_blocks", "=", "[", "]", "prev_span", "=", "0", "if", "self", ".", "__begin", "==", "\"<\"", ":", "# In case where we are splitting template names, there", "# can be parentheses blocks (for arguments) that need to be taken", "# care of.", "# Build a regex matching a space (\\s)", "# + something inside parentheses", "regex", "=", "re", ".", "compile", "(", "\"\\\\s\\\\(.*?\\\\)\"", ")", "for", "m", "in", "regex", ".", "finditer", "(", "args_only", ")", ":", "# Store the position and the content", "parentheses_blocks", ".", "append", "(", "[", "m", ".", "start", "(", ")", "-", "prev_span", ",", "m", ".", "group", "(", ")", "]", ")", "prev_span", "=", "m", ".", "end", "(", ")", "-", "m", ".", "start", "(", ")", "# Cleanup the args_only string by removing the parentheses and", "# their content.", "args_only", "=", "args_only", ".", "replace", "(", "m", ".", "group", "(", ")", ",", "\"\"", ")", "# Now we are trying to split the args_only string in multiple arguments", "previous_found", ",", "found", "=", "0", ",", "0", "while", "True", ":", "found", "=", "self", ".", "__find_args_separator", "(", "args_only", ",", "previous_found", ")", "if", "found", "==", "-", "1", ":", "args", ".", "append", "(", "args_only", "[", "previous_found", ":", "]", ".", "strip", "(", ")", ")", "# This is the last argument. Break out of the loop.", "break", "else", ":", "args", ".", "append", "(", "args_only", "[", "previous_found", ":", "found", "]", ".", "strip", "(", ")", ")", "previous_found", "=", "found", "+", "1", "# skip found separator", "# Get the size and position for each argument", "absolute_pos_list", "=", "[", "]", "absolute_pos", "=", "0", "for", "arg", "in", "args", ":", "absolute_pos", "+=", "len", "(", "arg", ")", "absolute_pos_list", ".", "append", "(", "absolute_pos", ")", "for", "item", "in", "parentheses_blocks", ":", "# In case where there are parentheses blocks we add them back", "# to the right argument", "parentheses_block_absolute_pos", "=", "item", "[", "0", "]", "parentheses_block_string", "=", "item", "[", "1", "]", "current_arg_absolute_pos", "=", "0", "for", "arg_index", ",", "arg_absolute_pos", "in", "enumerate", "(", "absolute_pos_list", ")", ":", "current_arg_absolute_pos", "+=", "arg_absolute_pos", "if", "current_arg_absolute_pos", ">=", "parentheses_block_absolute_pos", ":", "# Add the parentheses block back and break out of the loop.", "args", "[", "arg_index", "]", "+=", "parentheses_block_string", "break", "return", "args" ]
[ 71, 4 ]
[ 149, 19 ]
python
en
['en', 'error', 'th']
False
parser_t.find_args
(self, text, start=None)
implementation details
implementation details
def find_args(self, text, start=None): """implementation details""" if start is None: start = 0 first_occurance = text.find(self.__begin, start) if first_occurance == -1: return self.NOT_FOUND previous_found, found = first_occurance + 1, 0 while True: found = self.__find_args_separator(text, previous_found) if found == -1: return self.NOT_FOUND elif text[found] == self.__end: return first_occurance, found else: previous_found = found + 1
[ "def", "find_args", "(", "self", ",", "text", ",", "start", "=", "None", ")", ":", "if", "start", "is", "None", ":", "start", "=", "0", "first_occurance", "=", "text", ".", "find", "(", "self", ".", "__begin", ",", "start", ")", "if", "first_occurance", "==", "-", "1", ":", "return", "self", ".", "NOT_FOUND", "previous_found", ",", "found", "=", "first_occurance", "+", "1", ",", "0", "while", "True", ":", "found", "=", "self", ".", "__find_args_separator", "(", "text", ",", "previous_found", ")", "if", "found", "==", "-", "1", ":", "return", "self", ".", "NOT_FOUND", "elif", "text", "[", "found", "]", "==", "self", ".", "__end", ":", "return", "first_occurance", ",", "found", "else", ":", "previous_found", "=", "found", "+", "1" ]
[ 154, 4 ]
[ 169, 42 ]
python
da
['eo', 'da', 'en']
False
parser_t.split
(self, decl_string)
implementation details
implementation details
def split(self, decl_string): """implementation details""" assert self.has_pattern(decl_string) return self.name(decl_string), self.args(decl_string)
[ "def", "split", "(", "self", ",", "decl_string", ")", ":", "assert", "self", ".", "has_pattern", "(", "decl_string", ")", "return", "self", ".", "name", "(", "decl_string", ")", ",", "self", ".", "args", "(", "decl_string", ")" ]
[ 171, 4 ]
[ 174, 61 ]
python
da
['eo', 'da', 'en']
False
parser_t.split_recursive
(self, decl_string)
implementation details
implementation details
def split_recursive(self, decl_string): """implementation details""" assert self.has_pattern(decl_string) to_go = [decl_string] while to_go: name, args = self.split(to_go.pop()) for arg in args: if self.has_pattern(arg): to_go.append(arg) yield name, args
[ "def", "split_recursive", "(", "self", ",", "decl_string", ")", ":", "assert", "self", ".", "has_pattern", "(", "decl_string", ")", "to_go", "=", "[", "decl_string", "]", "while", "to_go", ":", "name", ",", "args", "=", "self", ".", "split", "(", "to_go", ".", "pop", "(", ")", ")", "for", "arg", "in", "args", ":", "if", "self", ".", "has_pattern", "(", "arg", ")", ":", "to_go", ".", "append", "(", "arg", ")", "yield", "name", ",", "args" ]
[ 176, 4 ]
[ 185, 28 ]
python
da
['eo', 'da', 'en']
False
parser_t.join
(self, name, args, arg_separator=None)
implementation details
implementation details
def join(self, name, args, arg_separator=None): """implementation details""" if None is arg_separator: arg_separator = ', ' args = [_f for _f in args if _f] if not args: args_str = ' ' elif len(args) == 1: args_str = ' ' + args[0] + ' ' else: args_str = ' ' + arg_separator.join(args) + ' ' return ''.join([name, self.__begin, args_str, self.__end])
[ "def", "join", "(", "self", ",", "name", ",", "args", ",", "arg_separator", "=", "None", ")", ":", "if", "None", "is", "arg_separator", ":", "arg_separator", "=", "', '", "args", "=", "[", "_f", "for", "_f", "in", "args", "if", "_f", "]", "if", "not", "args", ":", "args_str", "=", "' '", "elif", "len", "(", "args", ")", "==", "1", ":", "args_str", "=", "' '", "+", "args", "[", "0", "]", "+", "' '", "else", ":", "args_str", "=", "' '", "+", "arg_separator", ".", "join", "(", "args", ")", "+", "' '", "return", "''", ".", "join", "(", "[", "name", ",", "self", ".", "__begin", ",", "args_str", ",", "self", ".", "__end", "]", ")" ]
[ 187, 4 ]
[ 200, 66 ]
python
da
['eo', 'da', 'en']
False
parser_t.normalize
(self, decl_string, arg_separator=None)
implementation details
implementation details
def normalize(self, decl_string, arg_separator=None): """implementation details""" if not self.has_pattern(decl_string): return decl_string name, args = self.split(decl_string) for i, arg in enumerate(args): args[i] = self.normalize(arg) return self.join(name, args, arg_separator)
[ "def", "normalize", "(", "self", ",", "decl_string", ",", "arg_separator", "=", "None", ")", ":", "if", "not", "self", ".", "has_pattern", "(", "decl_string", ")", ":", "return", "decl_string", "name", ",", "args", "=", "self", ".", "split", "(", "decl_string", ")", "for", "i", ",", "arg", "in", "enumerate", "(", "args", ")", ":", "args", "[", "i", "]", "=", "self", ".", "normalize", "(", "arg", ")", "return", "self", ".", "join", "(", "name", ",", "args", ",", "arg_separator", ")" ]
[ 202, 4 ]
[ 209, 51 ]
python
da
['eo', 'da', 'en']
False
tutorial8_preprocessing
()
## Converters Haystack's converter classes are designed to help you turn files on your computer into the documents that can be processed by the Haystack pipeline. There are file converters for txt, pdf, docx files as well as a converter that is powered by Apache Tika.
## Converters Haystack's converter classes are designed to help you turn files on your computer into the documents that can be processed by the Haystack pipeline. There are file converters for txt, pdf, docx files as well as a converter that is powered by Apache Tika.
def tutorial8_preprocessing(): # This fetches some sample files to work with doc_dir = "data/preprocessing_tutorial" s3_url = "https://s3.eu-central-1.amazonaws.com/deepset.ai-farm-qa/datasets/documents/preprocessing_tutorial.zip" fetch_archive_from_http(url=s3_url, output_dir=doc_dir) """ ## Converters Haystack's converter classes are designed to help you turn files on your computer into the documents that can be processed by the Haystack pipeline. There are file converters for txt, pdf, docx files as well as a converter that is powered by Apache Tika. """ # Here are some examples of how you would use file converters converter = TextConverter(remove_numeric_tables=True, valid_languages=["en"]) doc_txt = converter.convert(file_path="data/preprocessing_tutorial/classics.txt", meta=None) converter = PDFToTextConverter(remove_numeric_tables=True, valid_languages=["en"]) doc_pdf = converter.convert(file_path="data/preprocessing_tutorial/bert.pdf", meta=None) converter = DocxToTextConverter(remove_numeric_tables=True, valid_languages=["en"]) doc_docx = converter.convert(file_path="data/preprocessing_tutorial/heavy_metal.docx", meta=None) # Haystack also has a convenience function that will automatically apply the right converter to each file in a directory. all_docs = convert_files_to_dicts(dir_path="data/preprocessing_tutorial") """ ## PreProcessor The PreProcessor class is designed to help you clean text and split text into sensible units. File splitting can have a very significant impact on the system's performance. Have a look at the [Preprocessing](https://haystack.deepset.ai/docs/latest/preprocessingmd) and [Optimization](https://haystack.deepset.ai/docs/latest/optimizationmd) pages on our website for more details. """ # This is a default usage of the PreProcessor. # Here, it performs cleaning of consecutive whitespaces # and splits a single large document into smaller documents. # Each document is up to 1000 words long and document breaks cannot fall in the middle of sentences # Note how the single document passed into the document gets split into 5 smaller documents preprocessor = PreProcessor( clean_empty_lines=True, clean_whitespace=True, clean_header_footer=False, split_by="word", split_length=1000, split_respect_sentence_boundary=True ) docs_default = preprocessor.process(doc_txt) print(f"n_docs_input: 1\nn_docs_output: {len(docs_default)}") """ ## Cleaning - `clean_empty_lines` will normalize 3 or more consecutive empty lines to be just a two empty lines - `clean_whitespace` will remove any whitespace at the beginning or end of each line in the text - `clean_header_footer` will remove any long header or footer texts that are repeated on each page ## Splitting By default, the PreProcessor will respect sentence boundaries, meaning that documents will not start or end midway through a sentence. This will help reduce the possibility of answer phrases being split between two documents. This feature can be turned off by setting `split_respect_sentence_boundary=False`. """ # Not respecting sentence boundary vs respecting sentence boundary preprocessor_nrsb = PreProcessor(split_respect_sentence_boundary=False) docs_nrsb = preprocessor_nrsb.process(doc_txt) print("RESPECTING SENTENCE BOUNDARY") end_text = docs_default[0]["text"][-50:] print("End of document: \"..." + end_text + "\"") print() print("NOT RESPECTING SENTENCE BOUNDARY") end_text_nrsb = docs_nrsb[0]["text"][-50:] print("End of document: \"..." + end_text_nrsb + "\"") """ A commonly used strategy to split long documents, especially in the field of Question Answering, is the sliding window approach. If `split_length=10` and `split_overlap=3`, your documents will look like this: - doc1 = words[0:10] - doc2 = words[7:17] - doc3 = words[14:24] - ... You can use this strategy by following the code below. """ # Sliding window approach preprocessor_sliding_window = PreProcessor( split_overlap=3, split_length=10, split_respect_sentence_boundary=False ) docs_sliding_window = preprocessor_sliding_window.process(doc_txt) doc1 = docs_sliding_window[0]["text"][:200] doc2 = docs_sliding_window[1]["text"][:100] doc3 = docs_sliding_window[2]["text"][:100] print("Document 1: \"" + doc1 + "...\"") print("Document 2: \"" + doc2 + "...\"") print("Document 3: \"" + doc3 + "...\"")
[ "def", "tutorial8_preprocessing", "(", ")", ":", "# This fetches some sample files to work with", "doc_dir", "=", "\"data/preprocessing_tutorial\"", "s3_url", "=", "\"https://s3.eu-central-1.amazonaws.com/deepset.ai-farm-qa/datasets/documents/preprocessing_tutorial.zip\"", "fetch_archive_from_http", "(", "url", "=", "s3_url", ",", "output_dir", "=", "doc_dir", ")", "# Here are some examples of how you would use file converters", "converter", "=", "TextConverter", "(", "remove_numeric_tables", "=", "True", ",", "valid_languages", "=", "[", "\"en\"", "]", ")", "doc_txt", "=", "converter", ".", "convert", "(", "file_path", "=", "\"data/preprocessing_tutorial/classics.txt\"", ",", "meta", "=", "None", ")", "converter", "=", "PDFToTextConverter", "(", "remove_numeric_tables", "=", "True", ",", "valid_languages", "=", "[", "\"en\"", "]", ")", "doc_pdf", "=", "converter", ".", "convert", "(", "file_path", "=", "\"data/preprocessing_tutorial/bert.pdf\"", ",", "meta", "=", "None", ")", "converter", "=", "DocxToTextConverter", "(", "remove_numeric_tables", "=", "True", ",", "valid_languages", "=", "[", "\"en\"", "]", ")", "doc_docx", "=", "converter", ".", "convert", "(", "file_path", "=", "\"data/preprocessing_tutorial/heavy_metal.docx\"", ",", "meta", "=", "None", ")", "# Haystack also has a convenience function that will automatically apply the right converter to each file in a directory.", "all_docs", "=", "convert_files_to_dicts", "(", "dir_path", "=", "\"data/preprocessing_tutorial\"", ")", "\"\"\"\n \n ## PreProcessor\n \n The PreProcessor class is designed to help you clean text and split text into sensible units.\n File splitting can have a very significant impact on the system's performance.\n Have a look at the [Preprocessing](https://haystack.deepset.ai/docs/latest/preprocessingmd)\n and [Optimization](https://haystack.deepset.ai/docs/latest/optimizationmd) pages on our website for more details.\n \"\"\"", "# This is a default usage of the PreProcessor.", "# Here, it performs cleaning of consecutive whitespaces", "# and splits a single large document into smaller documents.", "# Each document is up to 1000 words long and document breaks cannot fall in the middle of sentences", "# Note how the single document passed into the document gets split into 5 smaller documents", "preprocessor", "=", "PreProcessor", "(", "clean_empty_lines", "=", "True", ",", "clean_whitespace", "=", "True", ",", "clean_header_footer", "=", "False", ",", "split_by", "=", "\"word\"", ",", "split_length", "=", "1000", ",", "split_respect_sentence_boundary", "=", "True", ")", "docs_default", "=", "preprocessor", ".", "process", "(", "doc_txt", ")", "print", "(", "f\"n_docs_input: 1\\nn_docs_output: {len(docs_default)}\"", ")", "\"\"\"\n ## Cleaning\n \n - `clean_empty_lines` will normalize 3 or more consecutive empty lines to be just a two empty lines\n - `clean_whitespace` will remove any whitespace at the beginning or end of each line in the text\n - `clean_header_footer` will remove any long header or footer texts that are repeated on each page\n \n ## Splitting\n By default, the PreProcessor will respect sentence boundaries, meaning that documents will not start or end\n midway through a sentence.\n This will help reduce the possibility of answer phrases being split between two documents.\n This feature can be turned off by setting `split_respect_sentence_boundary=False`.\n \"\"\"", "# Not respecting sentence boundary vs respecting sentence boundary", "preprocessor_nrsb", "=", "PreProcessor", "(", "split_respect_sentence_boundary", "=", "False", ")", "docs_nrsb", "=", "preprocessor_nrsb", ".", "process", "(", "doc_txt", ")", "print", "(", "\"RESPECTING SENTENCE BOUNDARY\"", ")", "end_text", "=", "docs_default", "[", "0", "]", "[", "\"text\"", "]", "[", "-", "50", ":", "]", "print", "(", "\"End of document: \\\"...\"", "+", "end_text", "+", "\"\\\"\"", ")", "print", "(", ")", "print", "(", "\"NOT RESPECTING SENTENCE BOUNDARY\"", ")", "end_text_nrsb", "=", "docs_nrsb", "[", "0", "]", "[", "\"text\"", "]", "[", "-", "50", ":", "]", "print", "(", "\"End of document: \\\"...\"", "+", "end_text_nrsb", "+", "\"\\\"\"", ")", "\"\"\"\n A commonly used strategy to split long documents, especially in the field of Question Answering,\n is the sliding window approach. If `split_length=10` and `split_overlap=3`, your documents will look like this:\n \n - doc1 = words[0:10]\n - doc2 = words[7:17]\n - doc3 = words[14:24]\n - ...\n \n You can use this strategy by following the code below.\n \"\"\"", "# Sliding window approach", "preprocessor_sliding_window", "=", "PreProcessor", "(", "split_overlap", "=", "3", ",", "split_length", "=", "10", ",", "split_respect_sentence_boundary", "=", "False", ")", "docs_sliding_window", "=", "preprocessor_sliding_window", ".", "process", "(", "doc_txt", ")", "doc1", "=", "docs_sliding_window", "[", "0", "]", "[", "\"text\"", "]", "[", ":", "200", "]", "doc2", "=", "docs_sliding_window", "[", "1", "]", "[", "\"text\"", "]", "[", ":", "100", "]", "doc3", "=", "docs_sliding_window", "[", "2", "]", "[", "\"text\"", "]", "[", ":", "100", "]", "print", "(", "\"Document 1: \\\"\"", "+", "doc1", "+", "\"...\\\"\"", ")", "print", "(", "\"Document 2: \\\"\"", "+", "doc2", "+", "\"...\\\"\"", ")", "print", "(", "\"Document 3: \\\"\"", "+", "doc3", "+", "\"...\\\"\"", ")" ]
[ 29, 0 ]
[ 141, 44 ]
python
en
['en', 'error', 'th']
False
reverse_array_dict
(dictionary)
Returns a reversed version a dictionary of keys to list-like objects. Each value in each list-like becomes a key in the returned dictionary mapping to its key in the provided dictionary.
Returns a reversed version a dictionary of keys to list-like objects. Each value in each list-like becomes a key in the returned dictionary mapping to its key in the provided dictionary.
def reverse_array_dict(dictionary): """ Returns a reversed version a dictionary of keys to list-like objects. Each value in each list-like becomes a key in the returned dictionary mapping to its key in the provided dictionary. """ return_dict = {} for label, values in dictionary.items(): for value in values: return_dict[value] = label return return_dict
[ "def", "reverse_array_dict", "(", "dictionary", ")", ":", "return_dict", "=", "{", "}", "for", "label", ",", "values", "in", "dictionary", ".", "items", "(", ")", ":", "for", "value", "in", "values", ":", "return_dict", "[", "value", "]", "=", "label", "return", "return_dict" ]
[ 35, 0 ]
[ 44, 22 ]
python
en
['en', 'ja', 'th']
False
list_prod
(lst)
Takes the product of elements in a list.
Takes the product of elements in a list.
def list_prod(lst): """Takes the product of elements in a list.""" return functools.reduce(operator.mul, lst)
[ "def", "list_prod", "(", "lst", ")", ":", "return", "functools", ".", "reduce", "(", "operator", ".", "mul", ",", "lst", ")" ]
[ 46, 0 ]
[ 48, 46 ]
python
en
['en', 'en', 'en']
True
check_for_float
(array)
Check if a NumPy array-like contains floats. Parameters ---------- array : numpy.ndarray or convertible The array to check.
Check if a NumPy array-like contains floats. Parameters ---------- array : numpy.ndarray or convertible The array to check.
def check_for_float(array): """ Check if a NumPy array-like contains floats. Parameters ---------- array : numpy.ndarray or convertible The array to check. """ try: return array.dtype.kind == 'f' except AttributeError: # in case it's not a numpy array it will probably have no dtype. return np.asarray(array).dtype.kind in numerical_dtype_kinds
[ "def", "check_for_float", "(", "array", ")", ":", "try", ":", "return", "array", ".", "dtype", ".", "kind", "==", "'f'", "except", "AttributeError", ":", "# in case it's not a numpy array it will probably have no dtype.\r", "return", "np", ".", "asarray", "(", "array", ")", ".", "dtype", ".", "kind", "in", "numerical_dtype_kinds" ]
[ 50, 0 ]
[ 63, 68 ]
python
en
['en', 'ja', 'th']
False
create_cfmask_clean_mask
(cfmask)
Description: Create a clean mask for clear land/water pixels, i.e. mask out shadow, snow, cloud, and no data ----- Input: cfmask (xarray) - cf_mask from the ledaps products Output: clean_mask (boolean numpy array) - clear land/water mask
Description: Create a clean mask for clear land/water pixels, i.e. mask out shadow, snow, cloud, and no data ----- Input: cfmask (xarray) - cf_mask from the ledaps products Output: clean_mask (boolean numpy array) - clear land/water mask
def create_cfmask_clean_mask(cfmask): """ Description: Create a clean mask for clear land/water pixels, i.e. mask out shadow, snow, cloud, and no data ----- Input: cfmask (xarray) - cf_mask from the ledaps products Output: clean_mask (boolean numpy array) - clear land/water mask """ ######################### # cfmask values: # # 0 - clear # # 1 - water # # 2 - cloud shadow # # 3 - snow # # 4 - cloud # # 255 - fill # ######################### clean_mask = (cfmask == 0) | (cfmask == 1) return clean_mask.values
[ "def", "create_cfmask_clean_mask", "(", "cfmask", ")", ":", "#########################\r", "# cfmask values: #\r", "# 0 - clear #\r", "# 1 - water #\r", "# 2 - cloud shadow #\r", "# 3 - snow #\r", "# 4 - cloud #\r", "# 255 - fill #\r", "#########################\r", "clean_mask", "=", "(", "cfmask", "==", "0", ")", "|", "(", "cfmask", "==", "1", ")", "return", "clean_mask", ".", "values" ]
[ 65, 0 ]
[ 88, 28 ]
python
en
['en', 'ja', 'th']
False
create_default_clean_mask
(dataset_in)
Description: Creates a data mask that masks nothing. ----- Inputs: dataset_in (xarray.Dataset) - dataset retrieved from the Data Cube. Throws: ValueError - if dataset_in is an empty xarray.Dataset.
Description: Creates a data mask that masks nothing. ----- Inputs: dataset_in (xarray.Dataset) - dataset retrieved from the Data Cube. Throws: ValueError - if dataset_in is an empty xarray.Dataset.
def create_default_clean_mask(dataset_in): """ Description: Creates a data mask that masks nothing. ----- Inputs: dataset_in (xarray.Dataset) - dataset retrieved from the Data Cube. Throws: ValueError - if dataset_in is an empty xarray.Dataset. """ data_vars = dataset_in.data_vars if len(data_vars) != 0: first_data_var = next(iter(data_vars)) clean_mask = np.ones(dataset_in[first_data_var].shape).astype(np.bool) return clean_mask else: raise ValueError('`dataset_in` has no data!')
[ "def", "create_default_clean_mask", "(", "dataset_in", ")", ":", "data_vars", "=", "dataset_in", ".", "data_vars", "if", "len", "(", "data_vars", ")", "!=", "0", ":", "first_data_var", "=", "next", "(", "iter", "(", "data_vars", ")", ")", "clean_mask", "=", "np", ".", "ones", "(", "dataset_in", "[", "first_data_var", "]", ".", "shape", ")", ".", "astype", "(", "np", ".", "bool", ")", "return", "clean_mask", "else", ":", "raise", "ValueError", "(", "'`dataset_in` has no data!'", ")" ]
[ 90, 0 ]
[ 106, 53 ]
python
en
['en', 'ja', 'th']
False
get_spatial_ref
(crs)
Description: Get the spatial reference of a given crs ----- Input: crs (datacube.model.CRS) - Example: CRS('EPSG:4326') Output: ref (str) - spatial reference of given crs
Description: Get the spatial reference of a given crs ----- Input: crs (datacube.model.CRS) - Example: CRS('EPSG:4326') Output: ref (str) - spatial reference of given crs
def get_spatial_ref(crs): """ Description: Get the spatial reference of a given crs ----- Input: crs (datacube.model.CRS) - Example: CRS('EPSG:4326') Output: ref (str) - spatial reference of given crs """ crs_str = str(crs) epsg_code = int(crs_str.split(':')[1]) ref = osr.SpatialReference() ref.ImportFromEPSG(epsg_code) return str(ref)
[ "def", "get_spatial_ref", "(", "crs", ")", ":", "crs_str", "=", "str", "(", "crs", ")", "epsg_code", "=", "int", "(", "crs_str", ".", "split", "(", "':'", ")", "[", "1", "]", ")", "ref", "=", "osr", ".", "SpatialReference", "(", ")", "ref", ".", "ImportFromEPSG", "(", "epsg_code", ")", "return", "str", "(", "ref", ")" ]
[ 108, 0 ]
[ 123, 19 ]
python
en
['en', 'ja', 'th']
False
perform_timeseries_analysis
(dataset_in, band_name, intermediate_product=None, no_data=-9999, operation="mean")
Description: ----- Input: dataset_in (xarray.DataSet) - dataset with one variable to perform timeseries on band_name: name of the band to create stats for. intermediate_product: result of this function for previous data, to be combined here Output: dataset_out (xarray.DataSet) - dataset containing variables: normalized_data, total_data, total_clean
Description: ----- Input: dataset_in (xarray.DataSet) - dataset with one variable to perform timeseries on band_name: name of the band to create stats for. intermediate_product: result of this function for previous data, to be combined here Output: dataset_out (xarray.DataSet) - dataset containing variables: normalized_data, total_data, total_clean
def perform_timeseries_analysis(dataset_in, band_name, intermediate_product=None, no_data=-9999, operation="mean"): """ Description: ----- Input: dataset_in (xarray.DataSet) - dataset with one variable to perform timeseries on band_name: name of the band to create stats for. intermediate_product: result of this function for previous data, to be combined here Output: dataset_out (xarray.DataSet) - dataset containing variables: normalized_data, total_data, total_clean """ assert operation in ['mean', 'max', 'min'], "Please enter a valid operation." data = dataset_in[band_name] data = data.where(data != no_data) processed_data_sum = data.sum('time') clean_data = data.notnull() clean_data_sum = clean_data.astype('bool').sum('time') dataset_out = None if intermediate_product is None: processed_data_normalized = processed_data_sum / clean_data_sum dataset_out = xr.Dataset( { 'normalized_data': processed_data_normalized, 'min': data.min(dim='time'), 'max': data.max(dim='time'), 'total_data': processed_data_sum, 'total_clean': clean_data_sum }, coords={'latitude': dataset_in.latitude, 'longitude': dataset_in.longitude}) else: dataset_out = intermediate_product dataset_out['total_data'] += processed_data_sum dataset_out['total_clean'] += clean_data_sum dataset_out['normalized_data'] = dataset_out['total_data'] / dataset_out['total_clean'] dataset_out['min'] = xr.concat([dataset_out['min'], data.min(dim='time')], dim='time').min(dim='time') dataset_out['max'] = xr.concat([dataset_out['max'], data.max(dim='time')], dim='time').max(dim='time') nan_to_num(dataset_out, 0) return dataset_out
[ "def", "perform_timeseries_analysis", "(", "dataset_in", ",", "band_name", ",", "intermediate_product", "=", "None", ",", "no_data", "=", "-", "9999", ",", "operation", "=", "\"mean\"", ")", ":", "assert", "operation", "in", "[", "'mean'", ",", "'max'", ",", "'min'", "]", ",", "\"Please enter a valid operation.\"", "data", "=", "dataset_in", "[", "band_name", "]", "data", "=", "data", ".", "where", "(", "data", "!=", "no_data", ")", "processed_data_sum", "=", "data", ".", "sum", "(", "'time'", ")", "clean_data", "=", "data", ".", "notnull", "(", ")", "clean_data_sum", "=", "clean_data", ".", "astype", "(", "'bool'", ")", ".", "sum", "(", "'time'", ")", "dataset_out", "=", "None", "if", "intermediate_product", "is", "None", ":", "processed_data_normalized", "=", "processed_data_sum", "/", "clean_data_sum", "dataset_out", "=", "xr", ".", "Dataset", "(", "{", "'normalized_data'", ":", "processed_data_normalized", ",", "'min'", ":", "data", ".", "min", "(", "dim", "=", "'time'", ")", ",", "'max'", ":", "data", ".", "max", "(", "dim", "=", "'time'", ")", ",", "'total_data'", ":", "processed_data_sum", ",", "'total_clean'", ":", "clean_data_sum", "}", ",", "coords", "=", "{", "'latitude'", ":", "dataset_in", ".", "latitude", ",", "'longitude'", ":", "dataset_in", ".", "longitude", "}", ")", "else", ":", "dataset_out", "=", "intermediate_product", "dataset_out", "[", "'total_data'", "]", "+=", "processed_data_sum", "dataset_out", "[", "'total_clean'", "]", "+=", "clean_data_sum", "dataset_out", "[", "'normalized_data'", "]", "=", "dataset_out", "[", "'total_data'", "]", "/", "dataset_out", "[", "'total_clean'", "]", "dataset_out", "[", "'min'", "]", "=", "xr", ".", "concat", "(", "[", "dataset_out", "[", "'min'", "]", ",", "data", ".", "min", "(", "dim", "=", "'time'", ")", "]", ",", "dim", "=", "'time'", ")", ".", "min", "(", "dim", "=", "'time'", ")", "dataset_out", "[", "'max'", "]", "=", "xr", ".", "concat", "(", "[", "dataset_out", "[", "'max'", "]", ",", "data", ".", "max", "(", "dim", "=", "'time'", ")", "]", ",", "dim", "=", "'time'", ")", ".", "max", "(", "dim", "=", "'time'", ")", "nan_to_num", "(", "dataset_out", ",", "0", ")", "return", "dataset_out" ]
[ 125, 0 ]
[ 173, 22 ]
python
en
['en', 'ja', 'th']
False
nan_to_num
(data, number)
Converts all nan values in `data` to `number`. Parameters ---------- data: xarray.Dataset or xarray.DataArray
Converts all nan values in `data` to `number`. Parameters ---------- data: xarray.Dataset or xarray.DataArray
def nan_to_num(data, number): """ Converts all nan values in `data` to `number`. Parameters ---------- data: xarray.Dataset or xarray.DataArray """ if isinstance(data, xr.Dataset): for key in list(data.data_vars): data[key].values[np.isnan(data[key].values)] = number elif isinstance(data, xr.DataArray): data.values[np.isnan(data.values)] = number
[ "def", "nan_to_num", "(", "data", ",", "number", ")", ":", "if", "isinstance", "(", "data", ",", "xr", ".", "Dataset", ")", ":", "for", "key", "in", "list", "(", "data", ".", "data_vars", ")", ":", "data", "[", "key", "]", ".", "values", "[", "np", ".", "isnan", "(", "data", "[", "key", "]", ".", "values", ")", "]", "=", "number", "elif", "isinstance", "(", "data", ",", "xr", ".", "DataArray", ")", ":", "data", ".", "values", "[", "np", ".", "isnan", "(", "data", ".", "values", ")", "]", "=", "number" ]
[ 176, 0 ]
[ 188, 51 ]
python
en
['en', 'ja', 'th']
False
clear_attrs
(dataset)
Clear out all attributes on an xarray dataset to write to disk.
Clear out all attributes on an xarray dataset to write to disk.
def clear_attrs(dataset): """Clear out all attributes on an xarray dataset to write to disk.""" dataset.attrs = collections.OrderedDict() for band in dataset.data_vars: dataset[band].attrs = collections.OrderedDict()
[ "def", "clear_attrs", "(", "dataset", ")", ":", "dataset", ".", "attrs", "=", "collections", ".", "OrderedDict", "(", ")", "for", "band", "in", "dataset", ".", "data_vars", ":", "dataset", "[", "band", "]", ".", "attrs", "=", "collections", ".", "OrderedDict", "(", ")" ]
[ 191, 0 ]
[ 195, 55 ]
python
en
['en', 'en', 'en']
True
create_bit_mask
(data_array, valid_bits, no_data=-9999)
Create a boolean bit mask from a list of valid bits Args: data_array: xarray data array to extract bit information for. valid_bits: array of ints representing what bits should be considered valid. no_data: no_data value for the data array. Returns: Boolean mask signifying valid data.
Create a boolean bit mask from a list of valid bits Args: data_array: xarray data array to extract bit information for. valid_bits: array of ints representing what bits should be considered valid. no_data: no_data value for the data array. Returns: Boolean mask signifying valid data.
def create_bit_mask(data_array, valid_bits, no_data=-9999): """Create a boolean bit mask from a list of valid bits Args: data_array: xarray data array to extract bit information for. valid_bits: array of ints representing what bits should be considered valid. no_data: no_data value for the data array. Returns: Boolean mask signifying valid data. """ assert isinstance(valid_bits, list) and isinstance(valid_bits[0], int), "Valid bits must be a list of integer bits" #do bitwise and on valid mask - all zeros means no intersection e.g. invalid else return a truthy value? valid_mask = sum([1 << valid_bit for valid_bit in valid_bits]) clean_mask = (data_array & valid_mask).astype('bool') return clean_mask.values
[ "def", "create_bit_mask", "(", "data_array", ",", "valid_bits", ",", "no_data", "=", "-", "9999", ")", ":", "assert", "isinstance", "(", "valid_bits", ",", "list", ")", "and", "isinstance", "(", "valid_bits", "[", "0", "]", ",", "int", ")", ",", "\"Valid bits must be a list of integer bits\"", "#do bitwise and on valid mask - all zeros means no intersection e.g. invalid else return a truthy value?\r", "valid_mask", "=", "sum", "(", "[", "1", "<<", "valid_bit", "for", "valid_bit", "in", "valid_bits", "]", ")", "clean_mask", "=", "(", "data_array", "&", "valid_mask", ")", ".", "astype", "(", "'bool'", ")", "return", "clean_mask", ".", "values" ]
[ 198, 0 ]
[ 214, 28 ]
python
en
['en', 'en', 'en']
True
add_timestamp_data_to_xr
(dataset)
Add timestamp data to an xarray dataset using the time dimension. Adds both a timestamp and a human readable date int to a dataset - int32 format required. modifies the dataset in place.
Add timestamp data to an xarray dataset using the time dimension. Adds both a timestamp and a human readable date int to a dataset - int32 format required. modifies the dataset in place.
def add_timestamp_data_to_xr(dataset): """Add timestamp data to an xarray dataset using the time dimension. Adds both a timestamp and a human readable date int to a dataset - int32 format required. modifies the dataset in place. """ dims_data_var = list(dataset.data_vars)[0] timestamp_data = np.full(dataset[dims_data_var].values.shape, 0, dtype="int32") date_data = np.full(dataset[dims_data_var].values.shape, 0, dtype="int32") for index, acq_date in enumerate(dataset.time.values.astype('M8[ms]').tolist()): timestamp_data[index::] = acq_date.timestamp() date_data[index::] = int(acq_date.strftime("%Y%m%d")) dataset['timestamp'] = xr.DataArray( timestamp_data, dims=('time', 'latitude', 'longitude'), coords={'latitude': dataset.latitude, 'longitude': dataset.longitude, 'time': dataset.time}) dataset['date'] = xr.DataArray( date_data, dims=('time', 'latitude', 'longitude'), coords={'latitude': dataset.latitude, 'longitude': dataset.longitude, 'time': dataset.time})
[ "def", "add_timestamp_data_to_xr", "(", "dataset", ")", ":", "dims_data_var", "=", "list", "(", "dataset", ".", "data_vars", ")", "[", "0", "]", "timestamp_data", "=", "np", ".", "full", "(", "dataset", "[", "dims_data_var", "]", ".", "values", ".", "shape", ",", "0", ",", "dtype", "=", "\"int32\"", ")", "date_data", "=", "np", ".", "full", "(", "dataset", "[", "dims_data_var", "]", ".", "values", ".", "shape", ",", "0", ",", "dtype", "=", "\"int32\"", ")", "for", "index", ",", "acq_date", "in", "enumerate", "(", "dataset", ".", "time", ".", "values", ".", "astype", "(", "'M8[ms]'", ")", ".", "tolist", "(", ")", ")", ":", "timestamp_data", "[", "index", ":", ":", "]", "=", "acq_date", ".", "timestamp", "(", ")", "date_data", "[", "index", ":", ":", "]", "=", "int", "(", "acq_date", ".", "strftime", "(", "\"%Y%m%d\"", ")", ")", "dataset", "[", "'timestamp'", "]", "=", "xr", ".", "DataArray", "(", "timestamp_data", ",", "dims", "=", "(", "'time'", ",", "'latitude'", ",", "'longitude'", ")", ",", "coords", "=", "{", "'latitude'", ":", "dataset", ".", "latitude", ",", "'longitude'", ":", "dataset", ".", "longitude", ",", "'time'", ":", "dataset", ".", "time", "}", ")", "dataset", "[", "'date'", "]", "=", "xr", ".", "DataArray", "(", "date_data", ",", "dims", "=", "(", "'time'", ",", "'latitude'", ",", "'longitude'", ")", ",", "coords", "=", "{", "'latitude'", ":", "dataset", ".", "latitude", ",", "'longitude'", ":", "dataset", ".", "longitude", ",", "'time'", ":", "dataset", ".", "time", "}", ")" ]
[ 217, 0 ]
[ 242, 38 ]
python
en
['en', 'en', 'en']
True
write_geotiff_from_xr
(tif_path, data, bands=None, no_data=-9999, crs="EPSG:4326", x_coord='longitude', y_coord='latitude')
NOTE: Instead of this function, please use `import_export.export_xarray_to_geotiff()`. Export a GeoTIFF from an `xarray.Dataset`. Parameters ---------- tif_path: string The path to write the GeoTIFF file to. You should include the file extension. data: xarray.Dataset or xarray.DataArray bands: list of string The bands to write - in the order they should be written. Ignored if `data` is an `xarray.DataArray`. no_data: int The nodata value. crs: string The CRS of the output. x_coord, y_coord: string The string names of the x and y dimensions.
NOTE: Instead of this function, please use `import_export.export_xarray_to_geotiff()`. Export a GeoTIFF from an `xarray.Dataset`. Parameters ---------- tif_path: string The path to write the GeoTIFF file to. You should include the file extension. data: xarray.Dataset or xarray.DataArray bands: list of string The bands to write - in the order they should be written. Ignored if `data` is an `xarray.DataArray`. no_data: int The nodata value. crs: string The CRS of the output. x_coord, y_coord: string The string names of the x and y dimensions.
def write_geotiff_from_xr(tif_path, data, bands=None, no_data=-9999, crs="EPSG:4326", x_coord='longitude', y_coord='latitude'): """ NOTE: Instead of this function, please use `import_export.export_xarray_to_geotiff()`. Export a GeoTIFF from an `xarray.Dataset`. Parameters ---------- tif_path: string The path to write the GeoTIFF file to. You should include the file extension. data: xarray.Dataset or xarray.DataArray bands: list of string The bands to write - in the order they should be written. Ignored if `data` is an `xarray.DataArray`. no_data: int The nodata value. crs: string The CRS of the output. x_coord, y_coord: string The string names of the x and y dimensions. """ if isinstance(data, xr.DataArray): height, width = data.sizes[y_coord], data.sizes[x_coord] count, dtype = 1, data.dtype else: if bands is None: bands = list(data.data_vars.keys()) else: assrt_msg_begin = "The `data` parameter is an `xarray.Dataset`. " assert isinstance(bands, list), assrt_msg_begin + "Bands must be a list of strings." assert len(bands) > 0 and isinstance(bands[0], str), assrt_msg_begin + "You must supply at least one band." height, width = data.dims[y_coord], data.dims[x_coord] count, dtype = len(bands), data[bands[0]].dtype with rasterio.open( tif_path, 'w', driver='GTiff', height=height, width=width, count=count, dtype=dtype, crs=crs, transform=_get_transform_from_xr(data, x_coord=x_coord, y_coord=y_coord), nodata=no_data) as dst: if isinstance(data, xr.DataArray): dst.write(data.values, 1) else: for index, band in enumerate(bands): dst.write(data[band].values, index + 1) dst.close()
[ "def", "write_geotiff_from_xr", "(", "tif_path", ",", "data", ",", "bands", "=", "None", ",", "no_data", "=", "-", "9999", ",", "crs", "=", "\"EPSG:4326\"", ",", "x_coord", "=", "'longitude'", ",", "y_coord", "=", "'latitude'", ")", ":", "if", "isinstance", "(", "data", ",", "xr", ".", "DataArray", ")", ":", "height", ",", "width", "=", "data", ".", "sizes", "[", "y_coord", "]", ",", "data", ".", "sizes", "[", "x_coord", "]", "count", ",", "dtype", "=", "1", ",", "data", ".", "dtype", "else", ":", "if", "bands", "is", "None", ":", "bands", "=", "list", "(", "data", ".", "data_vars", ".", "keys", "(", ")", ")", "else", ":", "assrt_msg_begin", "=", "\"The `data` parameter is an `xarray.Dataset`. \"", "assert", "isinstance", "(", "bands", ",", "list", ")", ",", "assrt_msg_begin", "+", "\"Bands must be a list of strings.\"", "assert", "len", "(", "bands", ")", ">", "0", "and", "isinstance", "(", "bands", "[", "0", "]", ",", "str", ")", ",", "assrt_msg_begin", "+", "\"You must supply at least one band.\"", "height", ",", "width", "=", "data", ".", "dims", "[", "y_coord", "]", ",", "data", ".", "dims", "[", "x_coord", "]", "count", ",", "dtype", "=", "len", "(", "bands", ")", ",", "data", "[", "bands", "[", "0", "]", "]", ".", "dtype", "with", "rasterio", ".", "open", "(", "tif_path", ",", "'w'", ",", "driver", "=", "'GTiff'", ",", "height", "=", "height", ",", "width", "=", "width", ",", "count", "=", "count", ",", "dtype", "=", "dtype", ",", "crs", "=", "crs", ",", "transform", "=", "_get_transform_from_xr", "(", "data", ",", "x_coord", "=", "x_coord", ",", "y_coord", "=", "y_coord", ")", ",", "nodata", "=", "no_data", ")", "as", "dst", ":", "if", "isinstance", "(", "data", ",", "xr", ".", "DataArray", ")", ":", "dst", ".", "write", "(", "data", ".", "values", ",", "1", ")", "else", ":", "for", "index", ",", "band", "in", "enumerate", "(", "bands", ")", ":", "dst", ".", "write", "(", "data", "[", "band", "]", ".", "values", ",", "index", "+", "1", ")", "dst", ".", "close", "(", ")" ]
[ 245, 0 ]
[ 295, 15 ]
python
en
['en', 'ja', 'th']
False
write_png_from_xr
(png_path, dataset, bands, png_filled_path=None, fill_color='red', scale=None, low_res=False, no_data=-9999, crs="EPSG:4326")
Write a rgb png from an xarray dataset. Note that using `low_res==True` currently causes the file(s) for `png_path` and `png_filled_path` to not be created. Args: png_path: path for the png to be written to. dataset: dataset to use for the png creation. bands: a list of three strings representing the bands and their order png_filled_path: optional png with no_data values filled fill_color: color to use as the no_data fill scale: desired scale - tuple like (0, 4000) for the upper and lower bounds
Write a rgb png from an xarray dataset. Note that using `low_res==True` currently causes the file(s) for `png_path` and `png_filled_path` to not be created. Args: png_path: path for the png to be written to. dataset: dataset to use for the png creation. bands: a list of three strings representing the bands and their order png_filled_path: optional png with no_data values filled fill_color: color to use as the no_data fill scale: desired scale - tuple like (0, 4000) for the upper and lower bounds
def write_png_from_xr(png_path, dataset, bands, png_filled_path=None, fill_color='red', scale=None, low_res=False, no_data=-9999, crs="EPSG:4326"): """Write a rgb png from an xarray dataset. Note that using `low_res==True` currently causes the file(s) for `png_path` and `png_filled_path` to not be created. Args: png_path: path for the png to be written to. dataset: dataset to use for the png creation. bands: a list of three strings representing the bands and their order png_filled_path: optional png with no_data values filled fill_color: color to use as the no_data fill scale: desired scale - tuple like (0, 4000) for the upper and lower bounds """ assert isinstance(bands, list), "Bands must a list of strings" assert len(bands) == 3 and isinstance(bands[0], str), "You must supply three string bands for a PNG." tif_path = os.path.join(os.path.dirname(png_path), str(uuid.uuid4()) + ".png") write_geotiff_from_xr(tif_path, dataset, bands, no_data=no_data, crs=crs) scale_string = "" if scale is not None and len(scale) == 2: scale_string = "-scale {} {} 0 255".format(scale[0], scale[1]) elif scale is not None and len(scale) == 3: for index, scale_member in enumerate(scale): scale_string += " -scale_{} {} {} 0 255".format(index + 1, scale_member[0], scale_member[1]) outsize_string = "-outsize 25% 25%" if low_res else "" cmd = "gdal_translate -ot Byte " + outsize_string + " " + scale_string + " -of PNG -b 1 -b 2 -b 3 " + tif_path + ' ' + png_path os.system(cmd) if png_filled_path is not None and fill_color is not None: cmd = "convert -transparent \"#000000\" " + png_path + " " + png_path os.system(cmd) cmd = "convert " + png_path + " -background " + \ fill_color + " -alpha remove " + png_filled_path os.system(cmd) os.remove(tif_path)
[ "def", "write_png_from_xr", "(", "png_path", ",", "dataset", ",", "bands", ",", "png_filled_path", "=", "None", ",", "fill_color", "=", "'red'", ",", "scale", "=", "None", ",", "low_res", "=", "False", ",", "no_data", "=", "-", "9999", ",", "crs", "=", "\"EPSG:4326\"", ")", ":", "assert", "isinstance", "(", "bands", ",", "list", ")", ",", "\"Bands must a list of strings\"", "assert", "len", "(", "bands", ")", "==", "3", "and", "isinstance", "(", "bands", "[", "0", "]", ",", "str", ")", ",", "\"You must supply three string bands for a PNG.\"", "tif_path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "png_path", ")", ",", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", "+", "\".png\"", ")", "write_geotiff_from_xr", "(", "tif_path", ",", "dataset", ",", "bands", ",", "no_data", "=", "no_data", ",", "crs", "=", "crs", ")", "scale_string", "=", "\"\"", "if", "scale", "is", "not", "None", "and", "len", "(", "scale", ")", "==", "2", ":", "scale_string", "=", "\"-scale {} {} 0 255\"", ".", "format", "(", "scale", "[", "0", "]", ",", "scale", "[", "1", "]", ")", "elif", "scale", "is", "not", "None", "and", "len", "(", "scale", ")", "==", "3", ":", "for", "index", ",", "scale_member", "in", "enumerate", "(", "scale", ")", ":", "scale_string", "+=", "\" -scale_{} {} {} 0 255\"", ".", "format", "(", "index", "+", "1", ",", "scale_member", "[", "0", "]", ",", "scale_member", "[", "1", "]", ")", "outsize_string", "=", "\"-outsize 25% 25%\"", "if", "low_res", "else", "\"\"", "cmd", "=", "\"gdal_translate -ot Byte \"", "+", "outsize_string", "+", "\" \"", "+", "scale_string", "+", "\" -of PNG -b 1 -b 2 -b 3 \"", "+", "tif_path", "+", "' '", "+", "png_path", "os", ".", "system", "(", "cmd", ")", "if", "png_filled_path", "is", "not", "None", "and", "fill_color", "is", "not", "None", ":", "cmd", "=", "\"convert -transparent \\\"#000000\\\" \"", "+", "png_path", "+", "\" \"", "+", "png_path", "os", ".", "system", "(", "cmd", ")", "cmd", "=", "\"convert \"", "+", "png_path", "+", "\" -background \"", "+", "fill_color", "+", "\" -alpha remove \"", "+", "png_filled_path", "os", ".", "system", "(", "cmd", ")", "os", ".", "remove", "(", "tif_path", ")" ]
[ 298, 0 ]
[ 337, 23 ]
python
en
['en', 'en', 'en']
True
write_single_band_png_from_xr
(png_path, dataset, band, color_scale=None, fill_color=None, interpolate=True, no_data=-9999, crs="EPSG:4326")
Write a pseudocolor png from an xarray dataset. Args: png_path: path for the png to be written to. dataset: dataset to use for the png creation. band: The band to write to a png png_filled_path: optional png with no_data values filled fill_color: color to use as the no_data fill color_scale: path to a color scale compatible with gdal.
Write a pseudocolor png from an xarray dataset. Args: png_path: path for the png to be written to. dataset: dataset to use for the png creation. band: The band to write to a png png_filled_path: optional png with no_data values filled fill_color: color to use as the no_data fill color_scale: path to a color scale compatible with gdal.
def write_single_band_png_from_xr(png_path, dataset, band, color_scale=None, fill_color=None, interpolate=True, no_data=-9999, crs="EPSG:4326"): """Write a pseudocolor png from an xarray dataset. Args: png_path: path for the png to be written to. dataset: dataset to use for the png creation. band: The band to write to a png png_filled_path: optional png with no_data values filled fill_color: color to use as the no_data fill color_scale: path to a color scale compatible with gdal. """ assert os.path.exists(color_scale), "Color scale must be a path to a text file containing a gdal compatible scale." assert isinstance(band, str), "Band must be a string." tif_path = os.path.join(os.path.dirname(png_path), str(uuid.uuid4()) + ".png") write_geotiff_from_xr(tif_path, dataset, [band], no_data=no_data, crs=crs) cmd = "gdaldem color-relief -of PNG -b 1 " + tif_path + " " + \ color_scale + " " + png_path os.system(cmd) if fill_color is not None: cmd = "convert -transparent \"#FFFFFF\" " + \ png_path + " " + png_path os.system(cmd) if fill_color is not None and fill_color != "transparent": cmd = "convert " + png_path + " -background " + \ fill_color + " -alpha remove " + png_path os.system(cmd) os.remove(tif_path)
[ "def", "write_single_band_png_from_xr", "(", "png_path", ",", "dataset", ",", "band", ",", "color_scale", "=", "None", ",", "fill_color", "=", "None", ",", "interpolate", "=", "True", ",", "no_data", "=", "-", "9999", ",", "crs", "=", "\"EPSG:4326\"", ")", ":", "assert", "os", ".", "path", ".", "exists", "(", "color_scale", ")", ",", "\"Color scale must be a path to a text file containing a gdal compatible scale.\"", "assert", "isinstance", "(", "band", ",", "str", ")", ",", "\"Band must be a string.\"", "tif_path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "png_path", ")", ",", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", "+", "\".png\"", ")", "write_geotiff_from_xr", "(", "tif_path", ",", "dataset", ",", "[", "band", "]", ",", "no_data", "=", "no_data", ",", "crs", "=", "crs", ")", "cmd", "=", "\"gdaldem color-relief -of PNG -b 1 \"", "+", "tif_path", "+", "\" \"", "+", "color_scale", "+", "\" \"", "+", "png_path", "os", ".", "system", "(", "cmd", ")", "if", "fill_color", "is", "not", "None", ":", "cmd", "=", "\"convert -transparent \\\"#FFFFFF\\\" \"", "+", "png_path", "+", "\" \"", "+", "png_path", "os", ".", "system", "(", "cmd", ")", "if", "fill_color", "is", "not", "None", "and", "fill_color", "!=", "\"transparent\"", ":", "cmd", "=", "\"convert \"", "+", "png_path", "+", "\" -background \"", "+", "fill_color", "+", "\" -alpha remove \"", "+", "png_path", "os", ".", "system", "(", "cmd", ")", "os", ".", "remove", "(", "tif_path", ")" ]
[ 340, 0 ]
[ 372, 23 ]
python
en
['en', 'en', 'en']
True
_get_transform_from_xr
(data, x_coord='longitude', y_coord='latitude')
Create a geotransform from an xarray.Dataset or xarray.DataArray.
Create a geotransform from an xarray.Dataset or xarray.DataArray.
def _get_transform_from_xr(data, x_coord='longitude', y_coord='latitude'): """Create a geotransform from an xarray.Dataset or xarray.DataArray. """ from rasterio.transform import from_bounds geotransform = from_bounds(data[x_coord][0], data[y_coord][-1], data[x_coord][-1], data[y_coord][0], len(data[x_coord]), len(data[y_coord])) return geotransform
[ "def", "_get_transform_from_xr", "(", "data", ",", "x_coord", "=", "'longitude'", ",", "y_coord", "=", "'latitude'", ")", ":", "from", "rasterio", ".", "transform", "import", "from_bounds", "geotransform", "=", "from_bounds", "(", "data", "[", "x_coord", "]", "[", "0", "]", ",", "data", "[", "y_coord", "]", "[", "-", "1", "]", ",", "data", "[", "x_coord", "]", "[", "-", "1", "]", ",", "data", "[", "y_coord", "]", "[", "0", "]", ",", "len", "(", "data", "[", "x_coord", "]", ")", ",", "len", "(", "data", "[", "y_coord", "]", ")", ")", "return", "geotransform" ]
[ 374, 0 ]
[ 382, 23 ]
python
en
['en', 'ga', 'en']
True
ignore_warnings
(func, *args, **kwargs)
Runs a function while ignoring warnings
Runs a function while ignoring warnings
def ignore_warnings(func, *args, **kwargs): """Runs a function while ignoring warnings""" with warnings.catch_warnings(): warnings.simplefilter("ignore") ret = func(*args, **kwargs) return ret
[ "def", "ignore_warnings", "(", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "warnings", ".", "simplefilter", "(", "\"ignore\"", ")", "ret", "=", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "ret" ]
[ 390, 0 ]
[ 395, 14 ]
python
en
['en', 'en', 'en']
True
initialized_sqlite_project
( mock_webbrowser, caplog, tmp_path_factory, titanic_sqlite_db_file, sa )
This is an initialized project through the CLI.
This is an initialized project through the CLI.
def initialized_sqlite_project( mock_webbrowser, caplog, tmp_path_factory, titanic_sqlite_db_file, sa ): """This is an initialized project through the CLI.""" project_dir = str(tmp_path_factory.mktemp("my_rad_project")) engine = sa.create_engine( "sqlite:///{}".format(titanic_sqlite_db_file), pool_recycle=3600 ) inspector = sa.inspect(engine) # get the default schema and table for testing schemas = inspector.get_schema_names() default_schema = schemas[0] tables = [ table_name for table_name in inspector.get_table_names(schema=default_schema) ] default_table = tables[0] runner = CliRunner(mix_stderr=False) result = runner.invoke( cli, ["init", "-d", project_dir], input="\n\n2\n6\ntitanic\n{url}\n\n\n1\n{schema}\n{table}\nwarning\n\n\n\n".format( url=engine.url, schema=default_schema, table=default_table ), catch_exceptions=False, ) assert result.exit_code == 0 assert mock_webbrowser.call_count == 1 assert ( "{}/great_expectations/uncommitted/data_docs/local_site/validations/warning/".format( project_dir ) in mock_webbrowser.call_args[0][0] ) assert_no_logging_messages_or_tracebacks(caplog, result) context = DataContext(os.path.join(project_dir, DataContext.GE_DIR)) assert isinstance(context, DataContext) assert len(context.list_datasources()) == 1 assert context.list_datasources() == [ { "class_name": "SqlAlchemyDatasource", "name": "titanic", "module_name": "great_expectations.datasource", "credentials": {"url": str(engine.url)}, "data_asset_type": { "class_name": "SqlAlchemyDataset", "module_name": "great_expectations.dataset", }, } ] return project_dir
[ "def", "initialized_sqlite_project", "(", "mock_webbrowser", ",", "caplog", ",", "tmp_path_factory", ",", "titanic_sqlite_db_file", ",", "sa", ")", ":", "project_dir", "=", "str", "(", "tmp_path_factory", ".", "mktemp", "(", "\"my_rad_project\"", ")", ")", "engine", "=", "sa", ".", "create_engine", "(", "\"sqlite:///{}\"", ".", "format", "(", "titanic_sqlite_db_file", ")", ",", "pool_recycle", "=", "3600", ")", "inspector", "=", "sa", ".", "inspect", "(", "engine", ")", "# get the default schema and table for testing", "schemas", "=", "inspector", ".", "get_schema_names", "(", ")", "default_schema", "=", "schemas", "[", "0", "]", "tables", "=", "[", "table_name", "for", "table_name", "in", "inspector", ".", "get_table_names", "(", "schema", "=", "default_schema", ")", "]", "default_table", "=", "tables", "[", "0", "]", "runner", "=", "CliRunner", "(", "mix_stderr", "=", "False", ")", "result", "=", "runner", ".", "invoke", "(", "cli", ",", "[", "\"init\"", ",", "\"-d\"", ",", "project_dir", "]", ",", "input", "=", "\"\\n\\n2\\n6\\ntitanic\\n{url}\\n\\n\\n1\\n{schema}\\n{table}\\nwarning\\n\\n\\n\\n\"", ".", "format", "(", "url", "=", "engine", ".", "url", ",", "schema", "=", "default_schema", ",", "table", "=", "default_table", ")", ",", "catch_exceptions", "=", "False", ",", ")", "assert", "result", ".", "exit_code", "==", "0", "assert", "mock_webbrowser", ".", "call_count", "==", "1", "assert", "(", "\"{}/great_expectations/uncommitted/data_docs/local_site/validations/warning/\"", ".", "format", "(", "project_dir", ")", "in", "mock_webbrowser", ".", "call_args", "[", "0", "]", "[", "0", "]", ")", "assert_no_logging_messages_or_tracebacks", "(", "caplog", ",", "result", ")", "context", "=", "DataContext", "(", "os", ".", "path", ".", "join", "(", "project_dir", ",", "DataContext", ".", "GE_DIR", ")", ")", "assert", "isinstance", "(", "context", ",", "DataContext", ")", "assert", "len", "(", "context", ".", "list_datasources", "(", ")", ")", "==", "1", "assert", "context", ".", "list_datasources", "(", ")", "==", "[", "{", "\"class_name\"", ":", "\"SqlAlchemyDatasource\"", ",", "\"name\"", ":", "\"titanic\"", ",", "\"module_name\"", ":", "\"great_expectations.datasource\"", ",", "\"credentials\"", ":", "{", "\"url\"", ":", "str", "(", "engine", ".", "url", ")", "}", ",", "\"data_asset_type\"", ":", "{", "\"class_name\"", ":", "\"SqlAlchemyDataset\"", ",", "\"module_name\"", ":", "\"great_expectations.dataset\"", ",", "}", ",", "}", "]", "return", "project_dir" ]
[ 411, 0 ]
[ 467, 22 ]
python
en
['en', 'en', 'en']
True
_compute_critic_score
(critics, smooth_window)
Compute an array of anomaly scores. Args: critics (ndarray): Critic values. smooth_window (int): Smooth window that will be applied to compute smooth errors. Returns: ndarray: Array of anomaly scores.
Compute an array of anomaly scores.
def _compute_critic_score(critics, smooth_window): """Compute an array of anomaly scores. Args: critics (ndarray): Critic values. smooth_window (int): Smooth window that will be applied to compute smooth errors. Returns: ndarray: Array of anomaly scores. """ critics = np.asarray(critics) l_quantile = np.quantile(critics, 0.25) u_quantile = np.quantile(critics, 0.75) in_range = np.logical_and(critics >= l_quantile, critics <= u_quantile) critic_mean = np.mean(critics[in_range]) critic_std = np.std(critics) z_scores = np.absolute((np.asarray(critics) - critic_mean) / critic_std) + 1 z_scores = pd.Series(z_scores).rolling( smooth_window, center=True, min_periods=smooth_window // 2).mean().values return z_scores
[ "def", "_compute_critic_score", "(", "critics", ",", "smooth_window", ")", ":", "critics", "=", "np", ".", "asarray", "(", "critics", ")", "l_quantile", "=", "np", ".", "quantile", "(", "critics", ",", "0.25", ")", "u_quantile", "=", "np", ".", "quantile", "(", "critics", ",", "0.75", ")", "in_range", "=", "np", ".", "logical_and", "(", "critics", ">=", "l_quantile", ",", "critics", "<=", "u_quantile", ")", "critic_mean", "=", "np", ".", "mean", "(", "critics", "[", "in_range", "]", ")", "critic_std", "=", "np", ".", "std", "(", "critics", ")", "z_scores", "=", "np", ".", "absolute", "(", "(", "np", ".", "asarray", "(", "critics", ")", "-", "critic_mean", ")", "/", "critic_std", ")", "+", "1", "z_scores", "=", "pd", ".", "Series", "(", "z_scores", ")", ".", "rolling", "(", "smooth_window", ",", "center", "=", "True", ",", "min_periods", "=", "smooth_window", "//", "2", ")", ".", "mean", "(", ")", ".", "values", "return", "z_scores" ]
[ 275, 0 ]
[ 299, 19 ]
python
en
['en', 'en', 'en']
True
score_anomalies
(y, y_hat, critic, index, score_window=10, critic_smooth_window=None, error_smooth_window=None, smooth=True, rec_error_type="point", comb="mult", lambda_rec=0.5)
Compute an array of anomaly scores. Anomaly scores are calculated using a combination of reconstruction error and critic score. Args: y (ndarray): Ground truth. y_hat (ndarray): Predicted values. Each timestamp has multiple predictions. index (ndarray): time index for each y (start position of the window) critic (ndarray): Critic score. Each timestamp has multiple critic scores. score_window (int): Optional. Size of the window over which the scores are calculated. If not given, 10 is used. critic_smooth_window (int): Optional. Size of window over which smoothing is applied to critic. If not given, 200 is used. error_smooth_window (int): Optional. Size of window over which smoothing is applied to error. If not given, 200 is used. smooth (bool): Optional. Indicates whether errors should be smoothed. If not given, `True` is used. rec_error_type (str): Optional. The method to compute reconstruction error. Can be one of `["point", "area", "dtw"]`. If not given, 'point' is used. comb (str): Optional. How to combine critic and reconstruction error. Can be one of `["mult", "sum", "rec"]`. If not given, 'mult' is used. lambda_rec (float): Optional. Used if `comb="sum"` as a lambda weighted sum to combine scores. If not given, 0.5 is used. Returns: ndarray: Array of anomaly scores.
Compute an array of anomaly scores.
def score_anomalies(y, y_hat, critic, index, score_window=10, critic_smooth_window=None, error_smooth_window=None, smooth=True, rec_error_type="point", comb="mult", lambda_rec=0.5): """Compute an array of anomaly scores. Anomaly scores are calculated using a combination of reconstruction error and critic score. Args: y (ndarray): Ground truth. y_hat (ndarray): Predicted values. Each timestamp has multiple predictions. index (ndarray): time index for each y (start position of the window) critic (ndarray): Critic score. Each timestamp has multiple critic scores. score_window (int): Optional. Size of the window over which the scores are calculated. If not given, 10 is used. critic_smooth_window (int): Optional. Size of window over which smoothing is applied to critic. If not given, 200 is used. error_smooth_window (int): Optional. Size of window over which smoothing is applied to error. If not given, 200 is used. smooth (bool): Optional. Indicates whether errors should be smoothed. If not given, `True` is used. rec_error_type (str): Optional. The method to compute reconstruction error. Can be one of `["point", "area", "dtw"]`. If not given, 'point' is used. comb (str): Optional. How to combine critic and reconstruction error. Can be one of `["mult", "sum", "rec"]`. If not given, 'mult' is used. lambda_rec (float): Optional. Used if `comb="sum"` as a lambda weighted sum to combine scores. If not given, 0.5 is used. Returns: ndarray: Array of anomaly scores. """ critic_smooth_window = critic_smooth_window or math.trunc(y.shape[0] * 0.01) error_smooth_window = error_smooth_window or math.trunc(y.shape[0] * 0.01) step_size = 1 # expected to be 1 true_index = index # no offset true = [item[0] for item in y.reshape((y.shape[0], -1))] for item in y[-1][1:]: true.extend(item) critic_extended = list() for c in critic: critic_extended.extend(np.repeat(c, y_hat.shape[1]).tolist()) critic_extended = np.asarray(critic_extended).reshape((-1, y_hat.shape[1])) critic_kde_max = [] pred_length = y_hat.shape[1] num_errors = y_hat.shape[1] + step_size * (y_hat.shape[0] - 1) for i in range(num_errors): critic_intermediate = [] for j in range(max(0, i - num_errors + pred_length), min(i + 1, pred_length)): critic_intermediate.append(critic_extended[i - j, j]) if len(critic_intermediate) > 1: discr_intermediate = np.asarray(critic_intermediate) try: critic_kde_max.append(discr_intermediate[np.argmax( stats.gaussian_kde(discr_intermediate)(critic_intermediate))]) except np.linalg.LinAlgError: critic_kde_max.append(np.median(discr_intermediate)) else: critic_kde_max.append(np.median(np.asarray(critic_intermediate))) # Compute critic scores critic_scores = _compute_critic_score(critic_kde_max, critic_smooth_window) # Compute reconstruction scores rec_scores, predictions = reconstruction_errors( y, y_hat, step_size, score_window, error_smooth_window, smooth, rec_error_type) rec_scores = stats.zscore(rec_scores) rec_scores = np.clip(rec_scores, a_min=0, a_max=None) + 1 # Combine the two scores if comb == "mult": final_scores = np.multiply(critic_scores, rec_scores) elif comb == "sum": final_scores = (1 - lambda_rec) * (critic_scores - 1) + lambda_rec * (rec_scores - 1) elif comb == "rec": final_scores = rec_scores else: raise ValueError( 'Unknown combination specified {}, use "mult", "sum", or "rec" instead.'.format(comb)) true = [[t] for t in true] return final_scores, true_index, true, predictions
[ "def", "score_anomalies", "(", "y", ",", "y_hat", ",", "critic", ",", "index", ",", "score_window", "=", "10", ",", "critic_smooth_window", "=", "None", ",", "error_smooth_window", "=", "None", ",", "smooth", "=", "True", ",", "rec_error_type", "=", "\"point\"", ",", "comb", "=", "\"mult\"", ",", "lambda_rec", "=", "0.5", ")", ":", "critic_smooth_window", "=", "critic_smooth_window", "or", "math", ".", "trunc", "(", "y", ".", "shape", "[", "0", "]", "*", "0.01", ")", "error_smooth_window", "=", "error_smooth_window", "or", "math", ".", "trunc", "(", "y", ".", "shape", "[", "0", "]", "*", "0.01", ")", "step_size", "=", "1", "# expected to be 1", "true_index", "=", "index", "# no offset", "true", "=", "[", "item", "[", "0", "]", "for", "item", "in", "y", ".", "reshape", "(", "(", "y", ".", "shape", "[", "0", "]", ",", "-", "1", ")", ")", "]", "for", "item", "in", "y", "[", "-", "1", "]", "[", "1", ":", "]", ":", "true", ".", "extend", "(", "item", ")", "critic_extended", "=", "list", "(", ")", "for", "c", "in", "critic", ":", "critic_extended", ".", "extend", "(", "np", ".", "repeat", "(", "c", ",", "y_hat", ".", "shape", "[", "1", "]", ")", ".", "tolist", "(", ")", ")", "critic_extended", "=", "np", ".", "asarray", "(", "critic_extended", ")", ".", "reshape", "(", "(", "-", "1", ",", "y_hat", ".", "shape", "[", "1", "]", ")", ")", "critic_kde_max", "=", "[", "]", "pred_length", "=", "y_hat", ".", "shape", "[", "1", "]", "num_errors", "=", "y_hat", ".", "shape", "[", "1", "]", "+", "step_size", "*", "(", "y_hat", ".", "shape", "[", "0", "]", "-", "1", ")", "for", "i", "in", "range", "(", "num_errors", ")", ":", "critic_intermediate", "=", "[", "]", "for", "j", "in", "range", "(", "max", "(", "0", ",", "i", "-", "num_errors", "+", "pred_length", ")", ",", "min", "(", "i", "+", "1", ",", "pred_length", ")", ")", ":", "critic_intermediate", ".", "append", "(", "critic_extended", "[", "i", "-", "j", ",", "j", "]", ")", "if", "len", "(", "critic_intermediate", ")", ">", "1", ":", "discr_intermediate", "=", "np", ".", "asarray", "(", "critic_intermediate", ")", "try", ":", "critic_kde_max", ".", "append", "(", "discr_intermediate", "[", "np", ".", "argmax", "(", "stats", ".", "gaussian_kde", "(", "discr_intermediate", ")", "(", "critic_intermediate", ")", ")", "]", ")", "except", "np", ".", "linalg", ".", "LinAlgError", ":", "critic_kde_max", ".", "append", "(", "np", ".", "median", "(", "discr_intermediate", ")", ")", "else", ":", "critic_kde_max", ".", "append", "(", "np", ".", "median", "(", "np", ".", "asarray", "(", "critic_intermediate", ")", ")", ")", "# Compute critic scores", "critic_scores", "=", "_compute_critic_score", "(", "critic_kde_max", ",", "critic_smooth_window", ")", "# Compute reconstruction scores", "rec_scores", ",", "predictions", "=", "reconstruction_errors", "(", "y", ",", "y_hat", ",", "step_size", ",", "score_window", ",", "error_smooth_window", ",", "smooth", ",", "rec_error_type", ")", "rec_scores", "=", "stats", ".", "zscore", "(", "rec_scores", ")", "rec_scores", "=", "np", ".", "clip", "(", "rec_scores", ",", "a_min", "=", "0", ",", "a_max", "=", "None", ")", "+", "1", "# Combine the two scores", "if", "comb", "==", "\"mult\"", ":", "final_scores", "=", "np", ".", "multiply", "(", "critic_scores", ",", "rec_scores", ")", "elif", "comb", "==", "\"sum\"", ":", "final_scores", "=", "(", "1", "-", "lambda_rec", ")", "*", "(", "critic_scores", "-", "1", ")", "+", "lambda_rec", "*", "(", "rec_scores", "-", "1", ")", "elif", "comb", "==", "\"rec\"", ":", "final_scores", "=", "rec_scores", "else", ":", "raise", "ValueError", "(", "'Unknown combination specified {}, use \"mult\", \"sum\", or \"rec\" instead.'", ".", "format", "(", "comb", ")", ")", "true", "=", "[", "[", "t", "]", "for", "t", "in", "true", "]", "return", "final_scores", ",", "true_index", ",", "true", ",", "predictions" ]
[ 302, 0 ]
[ 408, 54 ]
python
en
['en', 'en', 'en']
True
RandomWeightedAverage._merge_function
(self, inputs)
Args: inputs[0] x original input inputs[1] x_ predicted input
Args: inputs[0] x original input inputs[1] x_ predicted input
def _merge_function(self, inputs): """ Args: inputs[0] x original input inputs[1] x_ predicted input """ alpha = K.random_uniform((64, 1, 1)) return (alpha * inputs[0]) + ((1 - alpha) * inputs[1])
[ "def", "_merge_function", "(", "self", ",", "inputs", ")", ":", "alpha", "=", "K", ".", "random_uniform", "(", "(", "64", ",", "1", ",", "1", ")", ")", "return", "(", "alpha", "*", "inputs", "[", "0", "]", ")", "+", "(", "(", "1", "-", "alpha", ")", "*", "inputs", "[", "1", "]", ")" ]
[ 24, 4 ]
[ 31, 62 ]
python
en
['en', 'error', 'th']
False
TadGAN.fit
(self, X, **kwargs)
Fit the TadGAN. Args: X (ndarray): N-dimensional array containing the input training sequences for the model.
Fit the TadGAN.
def fit(self, X, **kwargs): """Fit the TadGAN. Args: X (ndarray): N-dimensional array containing the input training sequences for the model. """ self._build_tadgan(**kwargs) X = X.reshape((-1, self.shape[0], 1)) self._fit(X)
[ "def", "fit", "(", "self", ",", "X", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_build_tadgan", "(", "*", "*", "kwargs", ")", "X", "=", "X", ".", "reshape", "(", "(", "-", "1", ",", "self", ".", "shape", "[", "0", "]", ",", "1", ")", ")", "self", ".", "_fit", "(", "X", ")" ]
[ 242, 4 ]
[ 251, 20 ]
python
en
['en', 'mt', 'en']
True
TadGAN.predict
(self, X)
Predict values using the initialized object. Args: X (ndarray): N-dimensional array containing the input sequences for the model. Returns: ndarray: N-dimensional array containing the reconstructions for each input sequence. ndarray: N-dimensional array containing the critic scores for each input sequence.
Predict values using the initialized object.
def predict(self, X): """Predict values using the initialized object. Args: X (ndarray): N-dimensional array containing the input sequences for the model. Returns: ndarray: N-dimensional array containing the reconstructions for each input sequence. ndarray: N-dimensional array containing the critic scores for each input sequence. """ X = X.reshape((-1, self.shape[0], 1)) z_ = self.encoder.predict(X) y_hat = self.generator.predict(z_) critic = self.critic_x.predict(X) return y_hat, critic
[ "def", "predict", "(", "self", ",", "X", ")", ":", "X", "=", "X", ".", "reshape", "(", "(", "-", "1", ",", "self", ".", "shape", "[", "0", "]", ",", "1", ")", ")", "z_", "=", "self", ".", "encoder", ".", "predict", "(", "X", ")", "y_hat", "=", "self", ".", "generator", ".", "predict", "(", "z_", ")", "critic", "=", "self", ".", "critic_x", ".", "predict", "(", "X", ")", "return", "y_hat", ",", "critic" ]
[ 253, 4 ]
[ 272, 28 ]
python
en
['en', 'en', 'en']
True
find_docusaurus_refs
(dir: str)
Finds any Docusaurus links within a target directory (i.e. ```python file=...#L10-20)
Finds any Docusaurus links within a target directory (i.e. ```python file=...#L10-20)
def find_docusaurus_refs(dir: str) -> List[str]: """ Finds any Docusaurus links within a target directory (i.e. ```python file=...#L10-20) """ linked_files: Set[str] = set() pattern: str = ( r"\`\`\`[a-zA-Z]+ file" # Format of internal links used by Docusaurus ) for doc in glob.glob(f"{dir}/**/*.md", recursive=True): for line in open(doc): if re.search(pattern, line): file: str = _parse_file_from_docusaurus_link(line) path: str = os.path.join(os.path.dirname(doc), file) linked_files.add(path) return [file for file in linked_files]
[ "def", "find_docusaurus_refs", "(", "dir", ":", "str", ")", "->", "List", "[", "str", "]", ":", "linked_files", ":", "Set", "[", "str", "]", "=", "set", "(", ")", "pattern", ":", "str", "=", "(", "r\"\\`\\`\\`[a-zA-Z]+ file\"", "# Format of internal links used by Docusaurus", ")", "for", "doc", "in", "glob", ".", "glob", "(", "f\"{dir}/**/*.md\"", ",", "recursive", "=", "True", ")", ":", "for", "line", "in", "open", "(", "doc", ")", ":", "if", "re", ".", "search", "(", "pattern", ",", "line", ")", ":", "file", ":", "str", "=", "_parse_file_from_docusaurus_link", "(", "line", ")", "path", ":", "str", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "doc", ")", ",", "file", ")", "linked_files", ".", "add", "(", "path", ")", "return", "[", "file", "for", "file", "in", "linked_files", "]" ]
[ 36, 0 ]
[ 50, 42 ]
python
en
['en', 'en', 'en']
True
get_local_imports
(files: List[str])
Parses a list of files to determine local imports; external dependencies are discarded
Parses a list of files to determine local imports; external dependencies are discarded
def get_local_imports(files: List[str]) -> List[str]: """ Parses a list of files to determine local imports; external dependencies are discarded """ imports: Set[str] = set() for file in files: with open(file) as f: root: ast.Module = ast.parse(f.read(), file) for node in ast.walk(root): # ast.Import is only used for external deps if not isinstance(node, ast.ImportFrom): continue # Only consider imports relevant to GE (note that "import great_expectations as ge" is discarded) if ( isinstance(node.module, str) and "great_expectations" in node.module and node.module.count(".") > 0 ): imports.add(node.module) return [imp for imp in imports]
[ "def", "get_local_imports", "(", "files", ":", "List", "[", "str", "]", ")", "->", "List", "[", "str", "]", ":", "imports", ":", "Set", "[", "str", "]", "=", "set", "(", ")", "for", "file", "in", "files", ":", "with", "open", "(", "file", ")", "as", "f", ":", "root", ":", "ast", ".", "Module", "=", "ast", ".", "parse", "(", "f", ".", "read", "(", ")", ",", "file", ")", "for", "node", "in", "ast", ".", "walk", "(", "root", ")", ":", "# ast.Import is only used for external deps", "if", "not", "isinstance", "(", "node", ",", "ast", ".", "ImportFrom", ")", ":", "continue", "# Only consider imports relevant to GE (note that \"import great_expectations as ge\" is discarded)", "if", "(", "isinstance", "(", "node", ".", "module", ",", "str", ")", "and", "\"great_expectations\"", "in", "node", ".", "module", "and", "node", ".", "module", ".", "count", "(", "\".\"", ")", ">", "0", ")", ":", "imports", ".", "add", "(", "node", ".", "module", ")", "return", "[", "imp", "for", "imp", "in", "imports", "]" ]
[ 59, 0 ]
[ 80, 35 ]
python
en
['en', 'en', 'en']
True
get_import_paths
(imports: List[str])
Takes a list of imports and determines the relative path to each source file or module
Takes a list of imports and determines the relative path to each source file or module
def get_import_paths(imports: List[str]) -> List[str]: """ Takes a list of imports and determines the relative path to each source file or module """ paths: List[str] = [] for imp in imports: path: str = imp.replace( ".", "/" ) # AST nodes are formatted as great_expectations.module.file _update_paths(paths, path) return paths
[ "def", "get_import_paths", "(", "imports", ":", "List", "[", "str", "]", ")", "->", "List", "[", "str", "]", ":", "paths", ":", "List", "[", "str", "]", "=", "[", "]", "for", "imp", "in", "imports", ":", "path", ":", "str", "=", "imp", ".", "replace", "(", "\".\"", ",", "\"/\"", ")", "# AST nodes are formatted as great_expectations.module.file", "_update_paths", "(", "paths", ",", "path", ")", "return", "paths" ]
[ 83, 0 ]
[ 93, 16 ]
python
en
['en', 'en', 'en']
True
test_snapshot_render_section_page_with_fixture_data
(validation_operator_result)
Make sure the appropriate markdown rendering is done for the applied fixture. Args: validation_operator_result: test fixture Returns: None
Make sure the appropriate markdown rendering is done for the applied fixture. Args: validation_operator_result: test fixture
def test_snapshot_render_section_page_with_fixture_data(validation_operator_result): """ Make sure the appropriate markdown rendering is done for the applied fixture. Args: validation_operator_result: test fixture Returns: None """ validation_operator_result = ValidationOperatorResult(**validation_operator_result) validation_results_page_renderer = ValidationResultsPageRenderer( run_info_at_end=True ) rendered_document_content_list = ( validation_results_page_renderer.render_validation_operator_result( validation_operator_result=validation_operator_result ) ) md_str_list = DefaultMarkdownPageView().render(rendered_document_content_list) md_str = " ".join(md_str_list) md_str = md_str.replace(" ", "").replace("\t", "").replace("\n", "") assert ( md_str == """ # Validation Results ## Overview ### **Expectation Suite:** **basic.warning** **Data asset:** **None** **Status:** **Failed** ### Statistics | | | | ------------ | ------------ | Evaluated Expectations | 11 Successful Expectations | 9 Unsuccessful Expectations | 2 Success Percent | ≈81.82% ## Table-Level Expectations | Status | Expectation | Observed Value | | ------------ | ------------ | ------------ | ❌ | Must have greater than or equal to **27000** and less than or equal to **33000** rows. | 30 ✅ | Must have exactly **3** columns. | 3 ✅ | Must have these columns in this order: **Team**, ** "Payroll (millions)"**, ** "Wins"** | ['Team', ' "Payroll (millions)"', ' "Wins"'] ## "Payroll (millions)" | Status | Expectation | Observed Value | | ------------ | ------------ | ------------ | ✅ | values must never be null. | 100% not null ✅ | minimum value must be greater than or equal to **54.24** and less than or equal to **56.24**. | 55.24 ✅ | maximum value must be greater than or equal to **196.96** and less than or equal to **198.96**. | 197.96 ✅ | mean must be greater than or equal to **97.01899999999998** and less than or equal to **99.01899999999998**. | ≈98.019 ❌ | median must be greater than or equal to **84000.75** and less than or equal to **86000.75**. | 85.75 ✅ | quantiles must be within the following value ranges. | Quantile | Min Value | Max Value | | ------------ | ------------ | ------------ | 0.05 | 54.37 | 56.37 Q1 | 74.48 | 76.48 Median | 82.31 | 84.31 Q3 | 116.62 | 118.62 0.95 | 173.54 | 175.54 | | Quantile | Value | | ------------ | ------------ | 0.05 | 55.37 Q1 | 75.48 Median | 83.31 Q3 | 117.62 0.95 | 174.54 ## Team | Status | Expectation | Observed Value | | ------------ | ------------ | ------------ | ✅ | values must never be null. | 100% not null ✅ | values must always be greater than or equal to **1** characters long. | 0% unexpected ### Info | | | | ------------ | ------------ | Great Expectations Version | 0.11.8+4.g4ab34df3.dirty Run Name | getest run Run Time | 2020-07-27T17:19:32.959193+00:00 ### Batch Markers | | | | ------------ | ------------ | **ge_load_time** | **20200727T171932.954810Z** **pandas_data_fingerprint** | **8c46fdaf0bd356fd58b7bcd9b2e6012d** ### Batch Kwargs | | | | ------------ | ------------ | **PandasInMemoryDF** | **True** **datasource** | **getest** **ge_batch_id** | **56615f40-d02d-11ea-b6ea-acde48001122** ----------------------------------------------------------- Powered by [Great Expectations](https://greatexpectations.io/) # Validation Results ## Overview ### **Expectation Suite:** **basic.warning** **Data asset:** **None** **Status:** **Failed** ### Statistics | | | | ------------ | ------------ | Evaluated Expectations | 11 Successful Expectations | 9 Unsuccessful Expectations | 2 Success Percent | ≈81.82% ## Table-Level Expectations | Status | Expectation | Observed Value | | ------------ | ------------ | ------------ | ❌ | Must have greater than or equal to **27000** and less than or equal to **33000** rows. | 30 ✅ | Must have exactly **3** columns. | 3 ✅ | Must have these columns in this order: **Team**, ** "Payroll (millions)"**, ** "Wins"** | ['Team', ' "Payroll (millions)"', ' "Wins"'] ## "Payroll (millions)" | Status | Expectation | Observed Value | | ------------ | ------------ | ------------ | ✅ | values must never be null. | 100% not null ✅ | minimum value must be greater than or equal to **54.24** and less than or equal to **56.24**. | 55.24 ✅ | maximum value must be greater than or equal to **196.96** and less than or equal to **198.96**. | 197.96 ✅ | mean must be greater than or equal to **97.01899999999998** and less than or equal to **99.01899999999998**. | ≈98.019 ❌ | median must be greater than or equal to **84000.75** and less than or equal to **86000.75**. | 85.75 ✅ | quantiles must be within the following value ranges. | Quantile | Min Value | Max Value | | ------------ | ------------ | ------------ | 0.05 | 54.37 | 56.37 Q1 | 74.48 | 76.48 Median | 82.31 | 84.31 Q3 | 116.62 | 118.62 0.95 | 173.54 | 175.54 | | Quantile | Value | | ------------ | ------------ | 0.05 | 55.37 Q1 | 75.48 Median | 83.31 Q3 | 117.62 0.95 | 174.54 ## Team | Status | Expectation | Observed Value | | ------------ | ------------ | ------------ | ✅ | values must never be null. | 100% not null ✅ | values must always be greater than or equal to **1** characters long. | 0% unexpected ### Info | | | | ------------ | ------------ | Great Expectations Version | 0.11.8+4.g4ab34df3.dirty Run Name | getest run Run Time | 2020-07-27T17:19:32.959193+00:00 ### Batch Markers | | | | ------------ | ------------ | **ge_load_time** | **20200727T171932.954810Z** **pandas_data_fingerprint** | **8c46fdaf0bd356fd58b7bcd9b2e6012d** ### Batch Kwargs | | | | ------------ | ------------ | **PandasInMemoryDF** | **True** **datasource** | **getest** **ge_batch_id** | **56615f40-d02d-11ea-b6ea-acde48001122** ----------------------------------------------------------- Powered by [Great Expectations](https://greatexpectations.io/) """.replace( " ", "" ) .replace("\t", "") .replace("\n", "") )
[ "def", "test_snapshot_render_section_page_with_fixture_data", "(", "validation_operator_result", ")", ":", "validation_operator_result", "=", "ValidationOperatorResult", "(", "*", "*", "validation_operator_result", ")", "validation_results_page_renderer", "=", "ValidationResultsPageRenderer", "(", "run_info_at_end", "=", "True", ")", "rendered_document_content_list", "=", "(", "validation_results_page_renderer", ".", "render_validation_operator_result", "(", "validation_operator_result", "=", "validation_operator_result", ")", ")", "md_str_list", "=", "DefaultMarkdownPageView", "(", ")", ".", "render", "(", "rendered_document_content_list", ")", "md_str", "=", "\" \"", ".", "join", "(", "md_str_list", ")", "md_str", "=", "md_str", ".", "replace", "(", "\" \"", ",", "\"\"", ")", ".", "replace", "(", "\"\\t\"", ",", "\"\"", ")", ".", "replace", "(", "\"\\n\"", ",", "\"\"", ")", "assert", "(", "md_str", "==", "\"\"\"\n# Validation Results\n## Overview\n### **Expectation Suite:** **basic.warning**\n**Data asset:** **None**\n**Status:** **Failed**\n### Statistics\n | | |\n | ------------ | ------------ |\nEvaluated Expectations | 11\nSuccessful Expectations | 9\nUnsuccessful Expectations | 2\nSuccess Percent | ≈81.82%\n## Table-Level Expectations\n | Status | Expectation | Observed Value |\n | ------------ | ------------ | ------------ |\n❌ | Must have greater than or equal to **27000** and less than or equal to **33000** rows. | 30\n✅ | Must have exactly **3** columns. | 3\n✅ | Must have these columns in this order: **Team**, ** \"Payroll (millions)\"**, ** \"Wins\"** | ['Team', ' \"Payroll (millions)\"', ' \"Wins\"']\n## \"Payroll (millions)\"\n | Status | Expectation | Observed Value |\n | ------------ | ------------ | ------------ |\n✅ | values must never be null. | 100% not null\n✅ | minimum value must be greater than or equal to **54.24** and less than or equal to **56.24**. | 55.24\n✅ | maximum value must be greater than or equal to **196.96** and less than or equal to **198.96**. | 197.96\n✅ | mean must be greater than or equal to **97.01899999999998** and less than or equal to **99.01899999999998**. | ≈98.019\n❌ | median must be greater than or equal to **84000.75** and less than or equal to **86000.75**. | 85.75\n✅ | quantiles must be within the following value ranges.\n | Quantile | Min Value | Max Value |\n | ------------ | ------------ | ------------ |\n0.05 | 54.37 | 56.37\nQ1 | 74.48 | 76.48\nMedian | 82.31 | 84.31\nQ3 | 116.62 | 118.62\n0.95 | 173.54 | 175.54\n |\n | Quantile | Value |\n | ------------ | ------------ |\n0.05 | 55.37\nQ1 | 75.48\nMedian | 83.31\nQ3 | 117.62\n0.95 | 174.54\n## Team\n | Status | Expectation | Observed Value |\n | ------------ | ------------ | ------------ |\n✅ | values must never be null. | 100% not null\n✅ | values must always be greater than or equal to **1** characters long. | 0% unexpected\n### Info\n | | |\n | ------------ | ------------ |\nGreat Expectations Version | 0.11.8+4.g4ab34df3.dirty\nRun Name | getest run\nRun Time | 2020-07-27T17:19:32.959193+00:00\n### Batch Markers\n | | |\n | ------------ | ------------ |\n**ge_load_time** | **20200727T171932.954810Z**\n**pandas_data_fingerprint** | **8c46fdaf0bd356fd58b7bcd9b2e6012d**\n### Batch Kwargs\n | | |\n | ------------ | ------------ |\n**PandasInMemoryDF** | **True**\n**datasource** | **getest**\n**ge_batch_id** | **56615f40-d02d-11ea-b6ea-acde48001122**\n-----------------------------------------------------------\nPowered by [Great Expectations](https://greatexpectations.io/)\n# Validation Results\n## Overview\n### **Expectation Suite:** **basic.warning**\n**Data asset:** **None**\n**Status:** **Failed**\n### Statistics\n | | |\n | ------------ | ------------ |\nEvaluated Expectations | 11\nSuccessful Expectations | 9\nUnsuccessful Expectations | 2\nSuccess Percent | ≈81.82%\n## Table-Level Expectations\n | Status | Expectation | Observed Value |\n | ------------ | ------------ | ------------ |\n❌ | Must have greater than or equal to **27000** and less than or equal to **33000** rows. | 30\n✅ | Must have exactly **3** columns. | 3\n✅ | Must have these columns in this order: **Team**, ** \"Payroll (millions)\"**, ** \"Wins\"** | ['Team', ' \"Payroll (millions)\"', ' \"Wins\"']\n## \"Payroll (millions)\"\n | Status | Expectation | Observed Value |\n | ------------ | ------------ | ------------ |\n✅ | values must never be null. | 100% not null\n✅ | minimum value must be greater than or equal to **54.24** and less than or equal to **56.24**. | 55.24\n✅ | maximum value must be greater than or equal to **196.96** and less than or equal to **198.96**. | 197.96\n✅ | mean must be greater than or equal to **97.01899999999998** and less than or equal to **99.01899999999998**. | ≈98.019\n❌ | median must be greater than or equal to **84000.75** and less than or equal to **86000.75**. | 85.75\n✅ | quantiles must be within the following value ranges.\n | Quantile | Min Value | Max Value |\n | ------------ | ------------ | ------------ |\n0.05 | 54.37 | 56.37\nQ1 | 74.48 | 76.48\nMedian | 82.31 | 84.31\nQ3 | 116.62 | 118.62\n0.95 | 173.54 | 175.54\n |\n | Quantile | Value |\n | ------------ | ------------ |\n0.05 | 55.37\nQ1 | 75.48\nMedian | 83.31\nQ3 | 117.62\n0.95 | 174.54\n## Team\n | Status | Expectation | Observed Value |\n | ------------ | ------------ | ------------ |\n✅ | values must never be null. | 100% not null\n✅ | values must always be greater than or equal to **1** characters long. | 0% unexpected\n### Info\n | | |\n | ------------ | ------------ |\nGreat Expectations Version | 0.11.8+4.g4ab34df3.dirty\nRun Name | getest run\nRun Time | 2020-07-27T17:19:32.959193+00:00\n### Batch Markers\n | | |\n | ------------ | ------------ |\n**ge_load_time** | **20200727T171932.954810Z**\n**pandas_data_fingerprint** | **8c46fdaf0bd356fd58b7bcd9b2e6012d**\n### Batch Kwargs\n | | |\n | ------------ | ------------ |\n**PandasInMemoryDF** | **True**\n**datasource** | **getest**\n**ge_batch_id** | **56615f40-d02d-11ea-b6ea-acde48001122**\n-----------------------------------------------------------\nPowered by [Great Expectations](https://greatexpectations.io/)\n\"\"\"", ".", "replace", "(", "\" \"", ",", "\"\"", ")", ".", "replace", "(", "\"\\t\"", ",", "\"\"", ")", ".", "replace", "(", "\"\\n\"", ",", "\"\"", ")", ")" ]
[ 137, 0 ]
[ 304, 5 ]
python
en
['en', 'error', 'th']
False
test_render_section_page_with_fixture_data_multiple_validations
( validation_operator_result, )
Make sure the appropriate markdown rendering is done for the applied fixture. :param validation_operator_result: test fixture :return: None
Make sure the appropriate markdown rendering is done for the applied fixture. :param validation_operator_result: test fixture :return: None
def test_render_section_page_with_fixture_data_multiple_validations( validation_operator_result, ): """ Make sure the appropriate markdown rendering is done for the applied fixture. :param validation_operator_result: test fixture :return: None """ validation_operator_result = ValidationOperatorResult(**validation_operator_result) validation_results_page_renderer = ValidationResultsPageRenderer( run_info_at_end=True ) rendered_document_content_list = ( validation_results_page_renderer.render_validation_operator_result( validation_operator_result=validation_operator_result ) ) md_str_list = DefaultMarkdownPageView().render(rendered_document_content_list) md_str = " ".join(md_str_list) md_str = md_str.replace(" ", "").replace("\t", "").replace("\n", "") # print(md_str) assert ( md_str == """ # Validation Results ## Overview ### **Expectation Suite:** **basic.warning** **Dataasset:** **None** **Status:** **Failed** ### Statistics | | | | ------------ | ------------ | Evaluated Expectations | 11 Successful Expectations | 9 Unsuccessful Expectations | 2 Success Percent | ≈81.82% ## Table-Level Expectations | Status | Expectation | Observed Value | | ------------ | ------------ | ------------ | ❌ | Must have greater than or equal to **27000** and less than or equal to **33000** rows. | 30 ✅ | Must have exactly **3** columns. | 3 ✅ | Must have these columns in this order: **Team**, ** "Payroll (millions)"**, ** "Wins"** | ['Team', ' "Payroll (millions)"', ' "Wins"'] ## "Payroll (millions)" | Status | Expectation | Observed Value | | ------------ | ------------ | ------------ | ✅ | values must never be null. | 100% not null ✅ | minimum value must be greater than or equal to **54.24** and less than or equal to **56.24**. | 55.24 ✅ | maximum value must be greater than or equal to **196.96** and less than or equal to **198.96**. | 197.96 ✅ | mean must be greater than or equal to **97.01899999999998** and less than or equal to **99.01899999999998**. | ≈98.019 ❌ | median must be greater than or equal to **84000.75** and less than or equal to **86000.75**. | 85.75 ✅ | quantiles must be within the following value ranges. | Quantile | Min Value | Max Value | | ------------ | ------------ | ------------ | 0.05 | 54.37 | 56.37 Q1 | 74.48 | 76.48 Median | 82.31 | 84.31 Q3 | 116.62 | 118.62 0.95 | 173.54 | 175.54 | | Quantile | Value | | ------------ | ------------ | 0.05 | 55.37 Q1 | 75.48 Median | 83.31 Q3 | 117.62 0.95 | 174.54 ## Team | Status | Expectation | Observed Value | | ------------ | ------------ | ------------ | ✅ | values must never be null. | 100% not null ✅ | values must always be greater than or equal to **1** characters long. | 0% unexpected ### Info | | | | ------------ | ------------ | Great Expectations Version | 0.11.8+4.g4ab34df3.dirty Run Name | getest run Run Time | 2020-07-27T17:19:32.959193+00:00 ### Batch Markers | | | | ------------ | ------------ | **ge_load_time** | **20200727T171932.954810Z** **pandas_data_fingerprint** | **8c46fdaf0bd356fd58b7bcd9b2e6012d** ### Batch Kwargs | | | | ------------ | ------------ | **PandasInMemoryDF** | **True** **datasource** | **getest** **ge_batch_id** | **56615f40-d02d-11ea-b6ea-acde48001122** ----------------------------------------------------------- Powered by [Great Expectations](https://greatexpectations.io/) # Validation Results ## Overview ### **Expectation Suite:** **basic.warning** **Dataasset:** **None** **Status:** **Failed** ### Statistics | | | | ------------ | ------------ | Evaluated Expectations | 11 Successful Expectations | 9 Unsuccessful Expectations | 2 Success Percent | ≈81.82% ## Table-Level Expectations | Status | Expectation | Observed Value | | ------------ | ------------ | ------------ | ❌ | Must have greater than or equal to **27000** and less than or equal to **33000** rows. | 30 ✅ | Must have exactly **3** columns. | 3 ✅ | Must have these columns in this order: **Team**, ** "Payroll (millions)"**, ** "Wins"** | ['Team', ' "Payroll (millions)"', ' "Wins"'] ## "Payroll (millions)" | Status | Expectation | Observed Value | | ------------ | ------------ | ------------ | ✅ | values must never be null. | 100% not null ✅ | minimum value must be greater than or equal to **54.24** and less than or equal to **56.24**. | 55.24 ✅ | maximum value must be greater than or equal to **196.96** and less than or equal to **198.96**. | 197.96 ✅ | mean must be greater than or equal to **97.01899999999998** and less than or equal to **99.01899999999998**. | ≈98.019 ❌ | median must be greater than or equal to **84000.75** and less than or equal to **86000.75**. | 85.75 ✅ | quantiles must be within the following value ranges. | Quantile | Min Value | Max Value | | ------------ | ------------ | ------------ | 0.05 | 54.37 | 56.37 Q1 | 74.48 | 76.48 Median | 82.31 | 84.31 Q3 | 116.62 | 118.62 0.95 | 173.54 | 175.54 | | Quantile | Value | | ------------ | ------------ | 0.05 | 55.37 Q1 | 75.48 Median | 83.31 Q3 | 117.62 0.95 | 174.54 ## Team | Status | Expectation | Observed Value | | ------------ | ------------ | ------------ | ✅ | values must never be null. | 100% not null ✅ | values must always be greater than or equal to **1** characters long. | 0% unexpected ### Info | | | | ------------ | ------------ | Great Expectations Version | 0.11.8+4.g4ab34df3.dirty Run Name | getest run Run Time | 2020-07-27T17:19:32.959193+00:00 ### Batch Markers | | | | ------------ | ------------ | **ge_load_time** | **20200727T171932.954810Z** **pandas_data_fingerprint** | **8c46fdaf0bd356fd58b7bcd9b2e6012d** ### Batch Kwargs | | | | ------------ | ------------ | **PandasInMemoryDF** | **True** **datasource** | **getest** **ge_batch_id** | **56615f40-d02d-11ea-b6ea-acde48001122** ----------------------------------------------------------- Powered by [Great Expectations](https://greatexpectations.io/) """.replace( " ", "" ) .replace("\t", "") .replace("\n", "") )
[ "def", "test_render_section_page_with_fixture_data_multiple_validations", "(", "validation_operator_result", ",", ")", ":", "validation_operator_result", "=", "ValidationOperatorResult", "(", "*", "*", "validation_operator_result", ")", "validation_results_page_renderer", "=", "ValidationResultsPageRenderer", "(", "run_info_at_end", "=", "True", ")", "rendered_document_content_list", "=", "(", "validation_results_page_renderer", ".", "render_validation_operator_result", "(", "validation_operator_result", "=", "validation_operator_result", ")", ")", "md_str_list", "=", "DefaultMarkdownPageView", "(", ")", ".", "render", "(", "rendered_document_content_list", ")", "md_str", "=", "\" \"", ".", "join", "(", "md_str_list", ")", "md_str", "=", "md_str", ".", "replace", "(", "\" \"", ",", "\"\"", ")", ".", "replace", "(", "\"\\t\"", ",", "\"\"", ")", ".", "replace", "(", "\"\\n\"", ",", "\"\"", ")", "# print(md_str)", "assert", "(", "md_str", "==", "\"\"\"\n# Validation Results\n## Overview\n### **Expectation Suite:** **basic.warning**\n**Dataasset:** **None**\n**Status:** **Failed**\n### Statistics\n | | |\n | ------------ | ------------ |\nEvaluated Expectations | 11\nSuccessful Expectations | 9\nUnsuccessful Expectations | 2\nSuccess Percent | ≈81.82%\n## Table-Level Expectations\n | Status | Expectation | Observed Value |\n | ------------ | ------------ | ------------ |\n❌ | Must have greater than or equal to **27000** and less than or equal to **33000** rows. | 30\n✅ | Must have exactly **3** columns. | 3\n✅ | Must have these columns in this order: **Team**, ** \"Payroll (millions)\"**, ** \"Wins\"** | ['Team', ' \"Payroll (millions)\"', ' \"Wins\"']\n## \"Payroll (millions)\"\n | Status | Expectation | Observed Value |\n | ------------ | ------------ | ------------ |\n✅ | values must never be null. | 100% not null\n✅ | minimum value must be greater than or equal to **54.24** and less than or equal to **56.24**. | 55.24\n✅ | maximum value must be greater than or equal to **196.96** and less than or equal to **198.96**. | 197.96\n✅ | mean must be greater than or equal to **97.01899999999998** and less than or equal to **99.01899999999998**. | ≈98.019\n❌ | median must be greater than or equal to **84000.75** and less than or equal to **86000.75**. | 85.75\n✅ | quantiles must be within the following value ranges.\n | Quantile | Min Value | Max Value |\n | ------------ | ------------ | ------------ |\n0.05 | 54.37 | 56.37\nQ1 | 74.48 | 76.48\nMedian | 82.31 | 84.31\nQ3 | 116.62 | 118.62\n0.95 | 173.54 | 175.54\n |\n | Quantile | Value |\n | ------------ | ------------ |\n0.05 | 55.37\nQ1 | 75.48\nMedian | 83.31\nQ3 | 117.62\n0.95 | 174.54\n## Team\n | Status | Expectation | Observed Value |\n | ------------ | ------------ | ------------ |\n✅ | values must never be null. | 100% not null\n✅ | values must always be greater than or equal to **1** characters long. | 0% unexpected\n### Info\n | | |\n | ------------ | ------------ |\nGreat Expectations Version | 0.11.8+4.g4ab34df3.dirty\nRun Name | getest run\nRun Time | 2020-07-27T17:19:32.959193+00:00\n### Batch Markers\n | | |\n | ------------ | ------------ |\n**ge_load_time** | **20200727T171932.954810Z**\n**pandas_data_fingerprint** | **8c46fdaf0bd356fd58b7bcd9b2e6012d**\n### Batch Kwargs\n | | |\n | ------------ | ------------ |\n**PandasInMemoryDF** | **True**\n**datasource** | **getest**\n**ge_batch_id** | **56615f40-d02d-11ea-b6ea-acde48001122**\n-----------------------------------------------------------\nPowered by [Great Expectations](https://greatexpectations.io/)\n# Validation Results\n## Overview\n### **Expectation Suite:** **basic.warning**\n**Dataasset:** **None**\n**Status:** **Failed**\n### Statistics\n | | |\n | ------------ | ------------ |\nEvaluated Expectations | 11\nSuccessful Expectations | 9\nUnsuccessful Expectations | 2\nSuccess Percent | ≈81.82%\n## Table-Level Expectations\n | Status | Expectation | Observed Value |\n | ------------ | ------------ | ------------ |\n❌ | Must have greater than or equal to **27000** and less than or equal to **33000** rows. | 30\n✅ | Must have exactly **3** columns. | 3\n✅ | Must have these columns in this order: **Team**, ** \"Payroll (millions)\"**, ** \"Wins\"** | ['Team', ' \"Payroll (millions)\"', ' \"Wins\"']\n## \"Payroll (millions)\"\n | Status | Expectation | Observed Value |\n | ------------ | ------------ | ------------ |\n✅ | values must never be null. | 100% not null\n✅ | minimum value must be greater than or equal to **54.24** and less than or equal to **56.24**. | 55.24\n✅ | maximum value must be greater than or equal to **196.96** and less than or equal to **198.96**. | 197.96\n✅ | mean must be greater than or equal to **97.01899999999998** and less than or equal to **99.01899999999998**. | ≈98.019\n❌ | median must be greater than or equal to **84000.75** and less than or equal to **86000.75**. | 85.75\n✅ | quantiles must be within the following value ranges.\n | Quantile | Min Value | Max Value |\n | ------------ | ------------ | ------------ |\n0.05 | 54.37 | 56.37\nQ1 | 74.48 | 76.48\nMedian | 82.31 | 84.31\nQ3 | 116.62 | 118.62\n0.95 | 173.54 | 175.54\n |\n | Quantile | Value |\n | ------------ | ------------ |\n0.05 | 55.37\nQ1 | 75.48\nMedian | 83.31\nQ3 | 117.62\n0.95 | 174.54\n## Team\n | Status | Expectation | Observed Value |\n | ------------ | ------------ | ------------ |\n✅ | values must never be null. | 100% not null\n✅ | values must always be greater than or equal to **1** characters long. | 0% unexpected\n### Info\n | | |\n | ------------ | ------------ |\nGreat Expectations Version | 0.11.8+4.g4ab34df3.dirty\nRun Name | getest run\nRun Time | 2020-07-27T17:19:32.959193+00:00\n### Batch Markers\n | | |\n | ------------ | ------------ |\n**ge_load_time** | **20200727T171932.954810Z**\n**pandas_data_fingerprint** | **8c46fdaf0bd356fd58b7bcd9b2e6012d**\n### Batch Kwargs\n | | |\n | ------------ | ------------ |\n**PandasInMemoryDF** | **True**\n**datasource** | **getest**\n**ge_batch_id** | **56615f40-d02d-11ea-b6ea-acde48001122**\n-----------------------------------------------------------\nPowered by [Great Expectations](https://greatexpectations.io/)\n\"\"\"", ".", "replace", "(", "\" \"", ",", "\"\"", ")", ".", "replace", "(", "\"\\t\"", ",", "\"\"", ")", ".", "replace", "(", "\"\\n\"", ",", "\"\"", ")", ")" ]
[ 307, 0 ]
[ 476, 5 ]
python
en
['en', 'error', 'th']
False
_GetLargePdbShimCcPath
()
Returns the path of the large_pdb_shim.cc file.
Returns the path of the large_pdb_shim.cc file.
def _GetLargePdbShimCcPath(): """Returns the path of the large_pdb_shim.cc file.""" this_dir = os.path.abspath(os.path.dirname(__file__)) src_dir = os.path.abspath(os.path.join(this_dir, '..', '..')) win_data_dir = os.path.join(src_dir, 'data', 'win') large_pdb_shim_cc = os.path.join(win_data_dir, 'large-pdb-shim.cc') return large_pdb_shim_cc
[ "def", "_GetLargePdbShimCcPath", "(", ")", ":", "this_dir", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ")", "src_dir", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "this_dir", ",", "'..'", ",", "'..'", ")", ")", "win_data_dir", "=", "os", ".", "path", ".", "join", "(", "src_dir", ",", "'data'", ",", "'win'", ")", "large_pdb_shim_cc", "=", "os", ".", "path", ".", "join", "(", "win_data_dir", ",", "'large-pdb-shim.cc'", ")", "return", "large_pdb_shim_cc" ]
[ 19, 0 ]
[ 25, 26 ]
python
en
['en', 'en', 'en']
True
_DeepCopySomeKeys
(in_dict, keys)
Performs a partial deep-copy on |in_dict|, only copying the keys in |keys|. Arguments: in_dict: The dictionary to copy. keys: The keys to be copied. If a key is in this list and doesn't exist in |in_dict| this is not an error. Returns: The partially deep-copied dictionary.
Performs a partial deep-copy on |in_dict|, only copying the keys in |keys|.
def _DeepCopySomeKeys(in_dict, keys): """Performs a partial deep-copy on |in_dict|, only copying the keys in |keys|. Arguments: in_dict: The dictionary to copy. keys: The keys to be copied. If a key is in this list and doesn't exist in |in_dict| this is not an error. Returns: The partially deep-copied dictionary. """ d = {} for key in keys: if key not in in_dict: continue d[key] = copy.deepcopy(in_dict[key]) return d
[ "def", "_DeepCopySomeKeys", "(", "in_dict", ",", "keys", ")", ":", "d", "=", "{", "}", "for", "key", "in", "keys", ":", "if", "key", "not", "in", "in_dict", ":", "continue", "d", "[", "key", "]", "=", "copy", ".", "deepcopy", "(", "in_dict", "[", "key", "]", ")", "return", "d" ]
[ 28, 0 ]
[ 43, 10 ]
python
en
['en', 'en', 'en']
True
_SuffixName
(name, suffix)
Add a suffix to the end of a target. Arguments: name: name of the target (foo#target) suffix: the suffix to be added Returns: Target name with suffix added (foo_suffix#target)
Add a suffix to the end of a target.
def _SuffixName(name, suffix): """Add a suffix to the end of a target. Arguments: name: name of the target (foo#target) suffix: the suffix to be added Returns: Target name with suffix added (foo_suffix#target) """ parts = name.rsplit('#', 1) parts[0] = '%s_%s' % (parts[0], suffix) return '#'.join(parts)
[ "def", "_SuffixName", "(", "name", ",", "suffix", ")", ":", "parts", "=", "name", ".", "rsplit", "(", "'#'", ",", "1", ")", "parts", "[", "0", "]", "=", "'%s_%s'", "%", "(", "parts", "[", "0", "]", ",", "suffix", ")", "return", "'#'", ".", "join", "(", "parts", ")" ]
[ 46, 0 ]
[ 57, 24 ]
python
en
['en', 'en', 'en']
True
_ShardName
(name, number)
Add a shard number to the end of a target. Arguments: name: name of the target (foo#target) number: shard number Returns: Target name with shard added (foo_1#target)
Add a shard number to the end of a target.
def _ShardName(name, number): """Add a shard number to the end of a target. Arguments: name: name of the target (foo#target) number: shard number Returns: Target name with shard added (foo_1#target) """ return _SuffixName(name, str(number))
[ "def", "_ShardName", "(", "name", ",", "number", ")", ":", "return", "_SuffixName", "(", "name", ",", "str", "(", "number", ")", ")" ]
[ 60, 0 ]
[ 69, 39 ]
python
en
['en', 'en', 'en']
True
ShardTargets
(target_list, target_dicts)
Shard some targets apart to work around the linkers limits. Arguments: target_list: List of target pairs: 'base/base.gyp:base'. target_dicts: Dict of target properties keyed on target pair. Returns: Tuple of the new sharded versions of the inputs.
Shard some targets apart to work around the linkers limits.
def ShardTargets(target_list, target_dicts): """Shard some targets apart to work around the linkers limits. Arguments: target_list: List of target pairs: 'base/base.gyp:base'. target_dicts: Dict of target properties keyed on target pair. Returns: Tuple of the new sharded versions of the inputs. """ # Gather the targets to shard, and how many pieces. targets_to_shard = {} for t in target_dicts: shards = int(target_dicts[t].get('msvs_shard', 0)) if shards: targets_to_shard[t] = shards # Shard target_list. new_target_list = [] for t in target_list: if t in targets_to_shard: for i in range(targets_to_shard[t]): new_target_list.append(_ShardName(t, i)) else: new_target_list.append(t) # Shard target_dict. new_target_dicts = {} for t in target_dicts: if t in targets_to_shard: for i in range(targets_to_shard[t]): name = _ShardName(t, i) new_target_dicts[name] = copy.copy(target_dicts[t]) new_target_dicts[name]['target_name'] = _ShardName( new_target_dicts[name]['target_name'], i) sources = new_target_dicts[name].get('sources', []) new_sources = [] for pos in range(i, len(sources), targets_to_shard[t]): new_sources.append(sources[pos]) new_target_dicts[name]['sources'] = new_sources else: new_target_dicts[t] = target_dicts[t] # Shard dependencies. for t in new_target_dicts: for deptype in ('dependencies', 'dependencies_original'): dependencies = copy.copy(new_target_dicts[t].get(deptype, [])) new_dependencies = [] for d in dependencies: if d in targets_to_shard: for i in range(targets_to_shard[d]): new_dependencies.append(_ShardName(d, i)) else: new_dependencies.append(d) new_target_dicts[t][deptype] = new_dependencies return (new_target_list, new_target_dicts)
[ "def", "ShardTargets", "(", "target_list", ",", "target_dicts", ")", ":", "# Gather the targets to shard, and how many pieces.", "targets_to_shard", "=", "{", "}", "for", "t", "in", "target_dicts", ":", "shards", "=", "int", "(", "target_dicts", "[", "t", "]", ".", "get", "(", "'msvs_shard'", ",", "0", ")", ")", "if", "shards", ":", "targets_to_shard", "[", "t", "]", "=", "shards", "# Shard target_list.", "new_target_list", "=", "[", "]", "for", "t", "in", "target_list", ":", "if", "t", "in", "targets_to_shard", ":", "for", "i", "in", "range", "(", "targets_to_shard", "[", "t", "]", ")", ":", "new_target_list", ".", "append", "(", "_ShardName", "(", "t", ",", "i", ")", ")", "else", ":", "new_target_list", ".", "append", "(", "t", ")", "# Shard target_dict.", "new_target_dicts", "=", "{", "}", "for", "t", "in", "target_dicts", ":", "if", "t", "in", "targets_to_shard", ":", "for", "i", "in", "range", "(", "targets_to_shard", "[", "t", "]", ")", ":", "name", "=", "_ShardName", "(", "t", ",", "i", ")", "new_target_dicts", "[", "name", "]", "=", "copy", ".", "copy", "(", "target_dicts", "[", "t", "]", ")", "new_target_dicts", "[", "name", "]", "[", "'target_name'", "]", "=", "_ShardName", "(", "new_target_dicts", "[", "name", "]", "[", "'target_name'", "]", ",", "i", ")", "sources", "=", "new_target_dicts", "[", "name", "]", ".", "get", "(", "'sources'", ",", "[", "]", ")", "new_sources", "=", "[", "]", "for", "pos", "in", "range", "(", "i", ",", "len", "(", "sources", ")", ",", "targets_to_shard", "[", "t", "]", ")", ":", "new_sources", ".", "append", "(", "sources", "[", "pos", "]", ")", "new_target_dicts", "[", "name", "]", "[", "'sources'", "]", "=", "new_sources", "else", ":", "new_target_dicts", "[", "t", "]", "=", "target_dicts", "[", "t", "]", "# Shard dependencies.", "for", "t", "in", "new_target_dicts", ":", "for", "deptype", "in", "(", "'dependencies'", ",", "'dependencies_original'", ")", ":", "dependencies", "=", "copy", ".", "copy", "(", "new_target_dicts", "[", "t", "]", ".", "get", "(", "deptype", ",", "[", "]", ")", ")", "new_dependencies", "=", "[", "]", "for", "d", "in", "dependencies", ":", "if", "d", "in", "targets_to_shard", ":", "for", "i", "in", "range", "(", "targets_to_shard", "[", "d", "]", ")", ":", "new_dependencies", ".", "append", "(", "_ShardName", "(", "d", ",", "i", ")", ")", "else", ":", "new_dependencies", ".", "append", "(", "d", ")", "new_target_dicts", "[", "t", "]", "[", "deptype", "]", "=", "new_dependencies", "return", "(", "new_target_list", ",", "new_target_dicts", ")" ]
[ 72, 0 ]
[ 124, 44 ]
python
en
['en', 'en', 'en']
True
_GetPdbPath
(target_dict, config_name, vars)
Returns the path to the PDB file that will be generated by a given configuration. The lookup proceeds as follows: - Look for an explicit path in the VCLinkerTool configuration block. - Look for an 'msvs_large_pdb_path' variable. - Use '<(PRODUCT_DIR)/<(product_name).(exe|dll).pdb' if 'product_name' is specified. - Use '<(PRODUCT_DIR)/<(target_name).(exe|dll).pdb'. Arguments: target_dict: The target dictionary to be searched. config_name: The name of the configuration of interest. vars: A dictionary of common GYP variables with generator-specific values. Returns: The path of the corresponding PDB file.
Returns the path to the PDB file that will be generated by a given configuration.
def _GetPdbPath(target_dict, config_name, vars): """Returns the path to the PDB file that will be generated by a given configuration. The lookup proceeds as follows: - Look for an explicit path in the VCLinkerTool configuration block. - Look for an 'msvs_large_pdb_path' variable. - Use '<(PRODUCT_DIR)/<(product_name).(exe|dll).pdb' if 'product_name' is specified. - Use '<(PRODUCT_DIR)/<(target_name).(exe|dll).pdb'. Arguments: target_dict: The target dictionary to be searched. config_name: The name of the configuration of interest. vars: A dictionary of common GYP variables with generator-specific values. Returns: The path of the corresponding PDB file. """ config = target_dict['configurations'][config_name] msvs = config.setdefault('msvs_settings', {}) linker = msvs.get('VCLinkerTool', {}) pdb_path = linker.get('ProgramDatabaseFile') if pdb_path: return pdb_path variables = target_dict.get('variables', {}) pdb_path = variables.get('msvs_large_pdb_path', None) if pdb_path: return pdb_path pdb_base = target_dict.get('product_name', target_dict['target_name']) pdb_base = '%s.%s.pdb' % (pdb_base, TARGET_TYPE_EXT[target_dict['type']]) pdb_path = vars['PRODUCT_DIR'] + '/' + pdb_base return pdb_path
[ "def", "_GetPdbPath", "(", "target_dict", ",", "config_name", ",", "vars", ")", ":", "config", "=", "target_dict", "[", "'configurations'", "]", "[", "config_name", "]", "msvs", "=", "config", ".", "setdefault", "(", "'msvs_settings'", ",", "{", "}", ")", "linker", "=", "msvs", ".", "get", "(", "'VCLinkerTool'", ",", "{", "}", ")", "pdb_path", "=", "linker", ".", "get", "(", "'ProgramDatabaseFile'", ")", "if", "pdb_path", ":", "return", "pdb_path", "variables", "=", "target_dict", ".", "get", "(", "'variables'", ",", "{", "}", ")", "pdb_path", "=", "variables", ".", "get", "(", "'msvs_large_pdb_path'", ",", "None", ")", "if", "pdb_path", ":", "return", "pdb_path", "pdb_base", "=", "target_dict", ".", "get", "(", "'product_name'", ",", "target_dict", "[", "'target_name'", "]", ")", "pdb_base", "=", "'%s.%s.pdb'", "%", "(", "pdb_base", ",", "TARGET_TYPE_EXT", "[", "target_dict", "[", "'type'", "]", "]", ")", "pdb_path", "=", "vars", "[", "'PRODUCT_DIR'", "]", "+", "'/'", "+", "pdb_base", "return", "pdb_path" ]
[ 127, 0 ]
[ 164, 17 ]
python
en
['en', 'en', 'en']
True
InsertLargePdbShims
(target_list, target_dicts, vars)
Insert a shim target that forces the linker to use 4KB pagesize PDBs. This is a workaround for targets with PDBs greater than 1GB in size, the limit for the 1KB pagesize PDBs created by the linker by default. Arguments: target_list: List of target pairs: 'base/base.gyp:base'. target_dicts: Dict of target properties keyed on target pair. vars: A dictionary of common GYP variables with generator-specific values. Returns: Tuple of the shimmed version of the inputs.
Insert a shim target that forces the linker to use 4KB pagesize PDBs.
def InsertLargePdbShims(target_list, target_dicts, vars): """Insert a shim target that forces the linker to use 4KB pagesize PDBs. This is a workaround for targets with PDBs greater than 1GB in size, the limit for the 1KB pagesize PDBs created by the linker by default. Arguments: target_list: List of target pairs: 'base/base.gyp:base'. target_dicts: Dict of target properties keyed on target pair. vars: A dictionary of common GYP variables with generator-specific values. Returns: Tuple of the shimmed version of the inputs. """ # Determine which targets need shimming. targets_to_shim = [] for t in target_dicts: target_dict = target_dicts[t] # We only want to shim targets that have msvs_large_pdb enabled. if not int(target_dict.get('msvs_large_pdb', 0)): continue # This is intended for executable, shared_library and loadable_module # targets where every configuration is set up to produce a PDB output. # If any of these conditions is not true then the shim logic will fail # below. targets_to_shim.append(t) large_pdb_shim_cc = _GetLargePdbShimCcPath() for t in targets_to_shim: target_dict = target_dicts[t] target_name = target_dict.get('target_name') base_dict = _DeepCopySomeKeys(target_dict, ['configurations', 'default_configuration', 'toolset']) # This is the dict for copying the source file (part of the GYP tree) # to the intermediate directory of the project. This is necessary because # we can't always build a relative path to the shim source file (on Windows # GYP and the project may be on different drives), and Ninja hates absolute # paths (it ends up generating the .obj and .obj.d alongside the source # file, polluting GYPs tree). copy_suffix = 'large_pdb_copy' copy_target_name = target_name + '_' + copy_suffix full_copy_target_name = _SuffixName(t, copy_suffix) shim_cc_basename = os.path.basename(large_pdb_shim_cc) shim_cc_dir = vars['SHARED_INTERMEDIATE_DIR'] + '/' + copy_target_name shim_cc_path = shim_cc_dir + '/' + shim_cc_basename copy_dict = copy.deepcopy(base_dict) copy_dict['target_name'] = copy_target_name copy_dict['type'] = 'none' copy_dict['sources'] = [ large_pdb_shim_cc ] copy_dict['copies'] = [{ 'destination': shim_cc_dir, 'files': [ large_pdb_shim_cc ] }] # This is the dict for the PDB generating shim target. It depends on the # copy target. shim_suffix = 'large_pdb_shim' shim_target_name = target_name + '_' + shim_suffix full_shim_target_name = _SuffixName(t, shim_suffix) shim_dict = copy.deepcopy(base_dict) shim_dict['target_name'] = shim_target_name shim_dict['type'] = 'static_library' shim_dict['sources'] = [ shim_cc_path ] shim_dict['dependencies'] = [ full_copy_target_name ] # Set up the shim to output its PDB to the same location as the final linker # target. for config_name, config in shim_dict.get('configurations').iteritems(): pdb_path = _GetPdbPath(target_dict, config_name, vars) # A few keys that we don't want to propagate. for key in ['msvs_precompiled_header', 'msvs_precompiled_source', 'test']: config.pop(key, None) msvs = config.setdefault('msvs_settings', {}) # Update the compiler directives in the shim target. compiler = msvs.setdefault('VCCLCompilerTool', {}) compiler['DebugInformationFormat'] = '3' compiler['ProgramDataBaseFileName'] = pdb_path # Set the explicit PDB path in the appropriate configuration of the # original target. config = target_dict['configurations'][config_name] msvs = config.setdefault('msvs_settings', {}) linker = msvs.setdefault('VCLinkerTool', {}) linker['GenerateDebugInformation'] = 'true' linker['ProgramDatabaseFile'] = pdb_path # Add the new targets. They must go to the beginning of the list so that # the dependency generation works as expected in ninja. target_list.insert(0, full_copy_target_name) target_list.insert(0, full_shim_target_name) target_dicts[full_copy_target_name] = copy_dict target_dicts[full_shim_target_name] = shim_dict # Update the original target to depend on the shim target. target_dict.setdefault('dependencies', []).append(full_shim_target_name) return (target_list, target_dicts)
[ "def", "InsertLargePdbShims", "(", "target_list", ",", "target_dicts", ",", "vars", ")", ":", "# Determine which targets need shimming.", "targets_to_shim", "=", "[", "]", "for", "t", "in", "target_dicts", ":", "target_dict", "=", "target_dicts", "[", "t", "]", "# We only want to shim targets that have msvs_large_pdb enabled.", "if", "not", "int", "(", "target_dict", ".", "get", "(", "'msvs_large_pdb'", ",", "0", ")", ")", ":", "continue", "# This is intended for executable, shared_library and loadable_module", "# targets where every configuration is set up to produce a PDB output.", "# If any of these conditions is not true then the shim logic will fail", "# below.", "targets_to_shim", ".", "append", "(", "t", ")", "large_pdb_shim_cc", "=", "_GetLargePdbShimCcPath", "(", ")", "for", "t", "in", "targets_to_shim", ":", "target_dict", "=", "target_dicts", "[", "t", "]", "target_name", "=", "target_dict", ".", "get", "(", "'target_name'", ")", "base_dict", "=", "_DeepCopySomeKeys", "(", "target_dict", ",", "[", "'configurations'", ",", "'default_configuration'", ",", "'toolset'", "]", ")", "# This is the dict for copying the source file (part of the GYP tree)", "# to the intermediate directory of the project. This is necessary because", "# we can't always build a relative path to the shim source file (on Windows", "# GYP and the project may be on different drives), and Ninja hates absolute", "# paths (it ends up generating the .obj and .obj.d alongside the source", "# file, polluting GYPs tree).", "copy_suffix", "=", "'large_pdb_copy'", "copy_target_name", "=", "target_name", "+", "'_'", "+", "copy_suffix", "full_copy_target_name", "=", "_SuffixName", "(", "t", ",", "copy_suffix", ")", "shim_cc_basename", "=", "os", ".", "path", ".", "basename", "(", "large_pdb_shim_cc", ")", "shim_cc_dir", "=", "vars", "[", "'SHARED_INTERMEDIATE_DIR'", "]", "+", "'/'", "+", "copy_target_name", "shim_cc_path", "=", "shim_cc_dir", "+", "'/'", "+", "shim_cc_basename", "copy_dict", "=", "copy", ".", "deepcopy", "(", "base_dict", ")", "copy_dict", "[", "'target_name'", "]", "=", "copy_target_name", "copy_dict", "[", "'type'", "]", "=", "'none'", "copy_dict", "[", "'sources'", "]", "=", "[", "large_pdb_shim_cc", "]", "copy_dict", "[", "'copies'", "]", "=", "[", "{", "'destination'", ":", "shim_cc_dir", ",", "'files'", ":", "[", "large_pdb_shim_cc", "]", "}", "]", "# This is the dict for the PDB generating shim target. It depends on the", "# copy target.", "shim_suffix", "=", "'large_pdb_shim'", "shim_target_name", "=", "target_name", "+", "'_'", "+", "shim_suffix", "full_shim_target_name", "=", "_SuffixName", "(", "t", ",", "shim_suffix", ")", "shim_dict", "=", "copy", ".", "deepcopy", "(", "base_dict", ")", "shim_dict", "[", "'target_name'", "]", "=", "shim_target_name", "shim_dict", "[", "'type'", "]", "=", "'static_library'", "shim_dict", "[", "'sources'", "]", "=", "[", "shim_cc_path", "]", "shim_dict", "[", "'dependencies'", "]", "=", "[", "full_copy_target_name", "]", "# Set up the shim to output its PDB to the same location as the final linker", "# target.", "for", "config_name", ",", "config", "in", "shim_dict", ".", "get", "(", "'configurations'", ")", ".", "iteritems", "(", ")", ":", "pdb_path", "=", "_GetPdbPath", "(", "target_dict", ",", "config_name", ",", "vars", ")", "# A few keys that we don't want to propagate.", "for", "key", "in", "[", "'msvs_precompiled_header'", ",", "'msvs_precompiled_source'", ",", "'test'", "]", ":", "config", ".", "pop", "(", "key", ",", "None", ")", "msvs", "=", "config", ".", "setdefault", "(", "'msvs_settings'", ",", "{", "}", ")", "# Update the compiler directives in the shim target.", "compiler", "=", "msvs", ".", "setdefault", "(", "'VCCLCompilerTool'", ",", "{", "}", ")", "compiler", "[", "'DebugInformationFormat'", "]", "=", "'3'", "compiler", "[", "'ProgramDataBaseFileName'", "]", "=", "pdb_path", "# Set the explicit PDB path in the appropriate configuration of the", "# original target.", "config", "=", "target_dict", "[", "'configurations'", "]", "[", "config_name", "]", "msvs", "=", "config", ".", "setdefault", "(", "'msvs_settings'", ",", "{", "}", ")", "linker", "=", "msvs", ".", "setdefault", "(", "'VCLinkerTool'", ",", "{", "}", ")", "linker", "[", "'GenerateDebugInformation'", "]", "=", "'true'", "linker", "[", "'ProgramDatabaseFile'", "]", "=", "pdb_path", "# Add the new targets. They must go to the beginning of the list so that", "# the dependency generation works as expected in ninja.", "target_list", ".", "insert", "(", "0", ",", "full_copy_target_name", ")", "target_list", ".", "insert", "(", "0", ",", "full_shim_target_name", ")", "target_dicts", "[", "full_copy_target_name", "]", "=", "copy_dict", "target_dicts", "[", "full_shim_target_name", "]", "=", "shim_dict", "# Update the original target to depend on the shim target.", "target_dict", ".", "setdefault", "(", "'dependencies'", ",", "[", "]", ")", ".", "append", "(", "full_shim_target_name", ")", "return", "(", "target_list", ",", "target_dicts", ")" ]
[ 167, 0 ]
[ 269, 36 ]
python
en
['en', 'en', 'en']
True
test_cli_datasource_list
( mock_emit, empty_data_context, empty_sqlite_db, caplog, monkeypatch )
Test an empty project and after adding a single datasource.
Test an empty project and after adding a single datasource.
def test_cli_datasource_list( mock_emit, empty_data_context, empty_sqlite_db, caplog, monkeypatch ): """Test an empty project and after adding a single datasource.""" monkeypatch.delenv( "GE_USAGE_STATS", raising=False ) # Undo the project-wide test default context: DataContext = empty_data_context runner = CliRunner(mix_stderr=False) monkeypatch.chdir(os.path.dirname(context.root_directory)) result = runner.invoke( cli, "--v3-api datasource list", catch_exceptions=False, ) stdout = result.stdout.strip() assert "No Datasources found" in stdout assert context.list_datasources() == [] datasource_name = "wow_a_datasource" _add_datasource_and_credentials_to_context( context, datasource_name, empty_sqlite_db ) runner = CliRunner(mix_stderr=False) monkeypatch.chdir(os.path.dirname(context.root_directory)) result = runner.invoke( cli, ["--v3-api", "datasource", "list"], catch_exceptions=False, ) expected_output = """\ Using v3 (Batch Request) API\x1b[0m 1 Datasource found:  - name: wow_a_datasource class_name: SqlAlchemyDatasource """.strip() stdout = result.stdout.strip() assert stdout == expected_output assert_no_logging_messages_or_tracebacks(caplog, result) expected_call_args_list = [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.datasource.list.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.datasource.list.end", "event_payload": {"api_version": "v3"}, "success": True, } ), ] assert mock_emit.call_count == len(expected_call_args_list) assert mock_emit.call_args_list == expected_call_args_list
[ "def", "test_cli_datasource_list", "(", "mock_emit", ",", "empty_data_context", ",", "empty_sqlite_db", ",", "caplog", ",", "monkeypatch", ")", ":", "monkeypatch", ".", "delenv", "(", "\"GE_USAGE_STATS\"", ",", "raising", "=", "False", ")", "# Undo the project-wide test default", "context", ":", "DataContext", "=", "empty_data_context", "runner", "=", "CliRunner", "(", "mix_stderr", "=", "False", ")", "monkeypatch", ".", "chdir", "(", "os", ".", "path", ".", "dirname", "(", "context", ".", "root_directory", ")", ")", "result", "=", "runner", ".", "invoke", "(", "cli", ",", "\"--v3-api datasource list\"", ",", "catch_exceptions", "=", "False", ",", ")", "stdout", "=", "result", ".", "stdout", ".", "strip", "(", ")", "assert", "\"No Datasources found\"", "in", "stdout", "assert", "context", ".", "list_datasources", "(", ")", "==", "[", "]", "datasource_name", "=", "\"wow_a_datasource\"", "_add_datasource_and_credentials_to_context", "(", "context", ",", "datasource_name", ",", "empty_sqlite_db", ")", "runner", "=", "CliRunner", "(", "mix_stderr", "=", "False", ")", "monkeypatch", ".", "chdir", "(", "os", ".", "path", ".", "dirname", "(", "context", ".", "root_directory", ")", ")", "result", "=", "runner", ".", "invoke", "(", "cli", ",", "[", "\"--v3-api\"", ",", "\"datasource\"", ",", "\"list\"", "]", ",", "catch_exceptions", "=", "False", ",", ")", "expected_output", "=", "\"\"\"\\\nUsing v3 (Batch Request) API\\x1b[0m\n1 Datasource found:\u001b[0m\n\u001b[0m\n - \u001b[36mname:\u001b[0m wow_a_datasource\u001b[0m\n \u001b[36mclass_name:\u001b[0m SqlAlchemyDatasource\u001b[0m\n\"\"\"", ".", "strip", "(", ")", "stdout", "=", "result", ".", "stdout", ".", "strip", "(", ")", "assert", "stdout", "==", "expected_output", "assert_no_logging_messages_or_tracebacks", "(", "caplog", ",", "result", ")", "expected_call_args_list", "=", "[", "mock", ".", "call", "(", "{", "\"event_payload\"", ":", "{", "}", ",", "\"event\"", ":", "\"data_context.__init__\"", ",", "\"success\"", ":", "True", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"cli.datasource.list.begin\"", ",", "\"event_payload\"", ":", "{", "\"api_version\"", ":", "\"v3\"", "}", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"cli.datasource.list.end\"", ",", "\"event_payload\"", ":", "{", "\"api_version\"", ":", "\"v3\"", "}", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "]", "assert", "mock_emit", ".", "call_count", "==", "len", "(", "expected_call_args_list", ")", "assert", "mock_emit", ".", "call_args_list", "==", "expected_call_args_list" ]
[ 16, 0 ]
[ 83, 62 ]
python
en
['en', 'en', 'en']
True
CallToActionRenderer.render
(cls, cta_object)
:param cta_object: dict { "header": # optional, can be a string or string template "buttons": # list of CallToActionButtons } :return: dict { "header": # optional, can be a string or string template "buttons": # list of CallToActionButtons }
:param cta_object: dict { "header": # optional, can be a string or string template "buttons": # list of CallToActionButtons } :return: dict { "header": # optional, can be a string or string template "buttons": # list of CallToActionButtons }
def render(cls, cta_object): """ :param cta_object: dict { "header": # optional, can be a string or string template "buttons": # list of CallToActionButtons } :return: dict { "header": # optional, can be a string or string template "buttons": # list of CallToActionButtons } """ if not cta_object.get("header"): cta_object["header"] = cls._document_defaults.get("header") cta_object["styling"] = cls._document_defaults.get("styling") cta_object["tooltip_icon"] = { "template": "$icon", "params": {"icon": ""}, "tooltip": { "content": "To disable this footer, set the show_how_to_buttons flag in your project's data_docs_sites config to false." }, "styling": { "params": { "icon": { "tag": "i", "classes": ["m-1", "fas", "fa-question-circle"], } } }, } return cta_object
[ "def", "render", "(", "cls", ",", "cta_object", ")", ":", "if", "not", "cta_object", ".", "get", "(", "\"header\"", ")", ":", "cta_object", "[", "\"header\"", "]", "=", "cls", ".", "_document_defaults", ".", "get", "(", "\"header\"", ")", "cta_object", "[", "\"styling\"", "]", "=", "cls", ".", "_document_defaults", ".", "get", "(", "\"styling\"", ")", "cta_object", "[", "\"tooltip_icon\"", "]", "=", "{", "\"template\"", ":", "\"$icon\"", ",", "\"params\"", ":", "{", "\"icon\"", ":", "\"\"", "}", ",", "\"tooltip\"", ":", "{", "\"content\"", ":", "\"To disable this footer, set the show_how_to_buttons flag in your project's data_docs_sites config to false.\"", "}", ",", "\"styling\"", ":", "{", "\"params\"", ":", "{", "\"icon\"", ":", "{", "\"tag\"", ":", "\"i\"", ",", "\"classes\"", ":", "[", "\"m-1\"", ",", "\"fas\"", ",", "\"fa-question-circle\"", "]", ",", "}", "}", "}", ",", "}", "return", "cta_object" ]
[ 22, 4 ]
[ 56, 25 ]
python
en
['en', 'error', 'th']
False
IRResource.lookup_default
(self, key: str, default_value: Optional[Any]=None, lookup_class: Optional[str]=None)
Look up a key in the Ambassador module's "defaults" element. The "lookup class" is - the lookup_class parameter if one was passed, else - self.default_class if that's set, else - None. We can look in two places for key -- the first match wins: 1. defaults[lookup class][key] if the lookup key is neither None nor "/" 2. defaults[key] (A lookup class of "/" skips step 1.) If we don't find the key in either place, return the given default_value. If we _do_ find the key, _return a copy of the data!_ If we return the data itself and the caller later modifies it... that's a problem. :param key: the key to look up :param default_value: the value to return if nothing is found in defaults. :param lookup_class: the lookup class, see above :return: Any
Look up a key in the Ambassador module's "defaults" element.
def lookup_default(self, key: str, default_value: Optional[Any]=None, lookup_class: Optional[str]=None) -> Any: """ Look up a key in the Ambassador module's "defaults" element. The "lookup class" is - the lookup_class parameter if one was passed, else - self.default_class if that's set, else - None. We can look in two places for key -- the first match wins: 1. defaults[lookup class][key] if the lookup key is neither None nor "/" 2. defaults[key] (A lookup class of "/" skips step 1.) If we don't find the key in either place, return the given default_value. If we _do_ find the key, _return a copy of the data!_ If we return the data itself and the caller later modifies it... that's a problem. :param key: the key to look up :param default_value: the value to return if nothing is found in defaults. :param lookup_class: the lookup class, see above :return: Any """ defaults = self.ir.ambassador_module.get('defaults', {}) lclass = lookup_class if not lclass: lclass = self.get('default_class', None) if lclass and (lclass != '/'): # Case 1. classdict = defaults.get(lclass, None) if classdict and (key in classdict): return copy.deepcopy(classdict[key]) # We didn't find anything in case 1. Try case 2. if defaults and (key in defaults): return copy.deepcopy(defaults[key]) # We didn't find anything in either case. Return the default value. return default_value
[ "def", "lookup_default", "(", "self", ",", "key", ":", "str", ",", "default_value", ":", "Optional", "[", "Any", "]", "=", "None", ",", "lookup_class", ":", "Optional", "[", "str", "]", "=", "None", ")", "->", "Any", ":", "defaults", "=", "self", ".", "ir", ".", "ambassador_module", ".", "get", "(", "'defaults'", ",", "{", "}", ")", "lclass", "=", "lookup_class", "if", "not", "lclass", ":", "lclass", "=", "self", ".", "get", "(", "'default_class'", ",", "None", ")", "if", "lclass", "and", "(", "lclass", "!=", "'/'", ")", ":", "# Case 1.", "classdict", "=", "defaults", ".", "get", "(", "lclass", ",", "None", ")", "if", "classdict", "and", "(", "key", "in", "classdict", ")", ":", "return", "copy", ".", "deepcopy", "(", "classdict", "[", "key", "]", ")", "# We didn't find anything in case 1. Try case 2.", "if", "defaults", "and", "(", "key", "in", "defaults", ")", ":", "return", "copy", ".", "deepcopy", "(", "defaults", "[", "key", "]", ")", "# We didn't find anything in either case. Return the default value.", "return", "default_value" ]
[ 87, 4 ]
[ 132, 28 ]
python
en
['en', 'error', 'th']
False
IRResource.lookup
(self, key: str, *args, default_class: Optional[str]=None, default_key: Optional[str]=None)
Look up a key in this IRResource, with a fallback to the Ambassador module's "defaults" element. Here's the resolution order: - if key is present in self, use its value. - if not, use lookup_default above to try to find a value in the Ambassador module - if we don't find anything, but a default value was passed in as *args[0], return that. - if all else fails, return None. :param key: the key to look up :param default_class: the default class for the fallback lookup (optional, see above) :param default_key: the key for the fallback lookup (optional, defaults to key) :param args: an all-else-fails default value can go here, see above :return: Any
Look up a key in this IRResource, with a fallback to the Ambassador module's "defaults" element.
def lookup(self, key: str, *args, default_class: Optional[str]=None, default_key: Optional[str]=None) -> Any: """ Look up a key in this IRResource, with a fallback to the Ambassador module's "defaults" element. Here's the resolution order: - if key is present in self, use its value. - if not, use lookup_default above to try to find a value in the Ambassador module - if we don't find anything, but a default value was passed in as *args[0], return that. - if all else fails, return None. :param key: the key to look up :param default_class: the default class for the fallback lookup (optional, see above) :param default_key: the key for the fallback lookup (optional, defaults to key) :param args: an all-else-fails default value can go here, see above :return: Any """ value = self.get(key, None) default_value = None if len(args) > 0: default_value = args[0] if value is None: if not default_key: default_key = key value = self.lookup_default(default_key, default_value=default_value, lookup_class=default_class) return value
[ "def", "lookup", "(", "self", ",", "key", ":", "str", ",", "*", "args", ",", "default_class", ":", "Optional", "[", "str", "]", "=", "None", ",", "default_key", ":", "Optional", "[", "str", "]", "=", "None", ")", "->", "Any", ":", "value", "=", "self", ".", "get", "(", "key", ",", "None", ")", "default_value", "=", "None", "if", "len", "(", "args", ")", ">", "0", ":", "default_value", "=", "args", "[", "0", "]", "if", "value", "is", "None", ":", "if", "not", "default_key", ":", "default_key", "=", "key", "value", "=", "self", ".", "lookup_default", "(", "default_key", ",", "default_value", "=", "default_value", ",", "lookup_class", "=", "default_class", ")", "return", "value" ]
[ 134, 4 ]
[ 166, 20 ]
python
en
['en', 'error', 'th']
False
XephyrDisplay.__init__
(self, size=(1024, 768), color_depth=24, bgcolor='black')
:param bgcolor: 'black' or 'white'
:param bgcolor: 'black' or 'white'
def __init__(self, size=(1024, 768), color_depth=24, bgcolor='black'): ''' :param bgcolor: 'black' or 'white' ''' self.color_depth = color_depth self.size = size self.bgcolor = bgcolor self.screen = 0 self.process = None self.display = None AbstractDisplay.__init__(self)
[ "def", "__init__", "(", "self", ",", "size", "=", "(", "1024", ",", "768", ")", ",", "color_depth", "=", "24", ",", "bgcolor", "=", "'black'", ")", ":", "self", ".", "color_depth", "=", "color_depth", "self", ".", "size", "=", "size", "self", ".", "bgcolor", "=", "bgcolor", "self", ".", "screen", "=", "0", "self", ".", "process", "=", "None", "self", ".", "display", "=", "None", "AbstractDisplay", ".", "__init__", "(", "self", ")" ]
[ 12, 4 ]
[ 22, 38 ]
python
en
['en', 'error', 'th']
False
CounterfactualUnitSelector.fit
(self, data, treatment, outcome)
Fits the class.
Fits the class.
def fit(self, data, treatment, outcome): ''' Fits the class. ''' if self._gain_equality_check(): self._fit_segment_model(data, treatment, outcome) else: self._fit_segment_model(data, treatment, outcome) self._fit_condprob_models(data, treatment, outcome)
[ "def", "fit", "(", "self", ",", "data", ",", "treatment", ",", "outcome", ")", ":", "if", "self", ".", "_gain_equality_check", "(", ")", ":", "self", ".", "_fit_segment_model", "(", "data", ",", "treatment", ",", "outcome", ")", "else", ":", "self", ".", "_fit_segment_model", "(", "data", ",", "treatment", ",", "outcome", ")", "self", ".", "_fit_condprob_models", "(", "data", ",", "treatment", ",", "outcome", ")" ]
[ 65, 4 ]
[ 77, 63 ]
python
en
['en', 'error', 'th']
False
CounterfactualUnitSelector.predict
(self, data, treatment, outcome)
Predicts an individual-level payoff. If gain equality is satisfied, uses the exact function; if not, uses the midpoint between bounds.
Predicts an individual-level payoff. If gain equality is satisfied, uses the exact function; if not, uses the midpoint between bounds.
def predict(self, data, treatment, outcome): ''' Predicts an individual-level payoff. If gain equality is satisfied, uses the exact function; if not, uses the midpoint between bounds. ''' if self._gain_equality_check(): est_payoff = self._get_exact_benefit(data, treatment, outcome) else: est_payoff = self._obj_func_midp(data, treatment, outcome) return est_payoff
[ "def", "predict", "(", "self", ",", "data", ",", "treatment", ",", "outcome", ")", ":", "if", "self", ".", "_gain_equality_check", "(", ")", ":", "est_payoff", "=", "self", ".", "_get_exact_benefit", "(", "data", ",", "treatment", ",", "outcome", ")", "else", ":", "est_payoff", "=", "self", ".", "_obj_func_midp", "(", "data", ",", "treatment", ",", "outcome", ")", "return", "est_payoff" ]
[ 79, 4 ]
[ 93, 25 ]
python
en
['en', 'error', 'th']
False
CounterfactualUnitSelector._gain_equality_check
(self)
Checks if gain equality is satisfied. If so, the optimization task can be simplified.
Checks if gain equality is satisfied. If so, the optimization task can be simplified.
def _gain_equality_check(self): ''' Checks if gain equality is satisfied. If so, the optimization task can be simplified. ''' return self.complier_payoff + self.defier_payoff == \ self.alwaystaker_payoff + self.nevertaker_payoff
[ "def", "_gain_equality_check", "(", "self", ")", ":", "return", "self", ".", "complier_payoff", "+", "self", ".", "defier_payoff", "==", "self", ".", "alwaystaker_payoff", "+", "self", ".", "nevertaker_payoff" ]
[ 95, 4 ]
[ 102, 60 ]
python
en
['en', 'error', 'th']
False
CounterfactualUnitSelector._make_segments
(data, treatment, outcome)
Constructs the following segments: * AC = Pr(Y = 1, W = 1 /mid X) * AD = Pr(Y = 1, W = 0 /mid X) * ND = Pr(Y = 0, W = 1 /mid X) * ND = Pr(Y = 0, W = 0 /mid X) where the names of the outcomes correspond the combinations of the relevant segments, eg AC = Always-taker or Complier.
Constructs the following segments:
def _make_segments(data, treatment, outcome): ''' Constructs the following segments: * AC = Pr(Y = 1, W = 1 /mid X) * AD = Pr(Y = 1, W = 0 /mid X) * ND = Pr(Y = 0, W = 1 /mid X) * ND = Pr(Y = 0, W = 0 /mid X) where the names of the outcomes correspond the combinations of the relevant segments, eg AC = Always-taker or Complier. ''' segments = np.empty(data.shape[0], dtype='object') segments[(data[treatment] == 1) & (data[outcome] == 1)] = 'AC' segments[(data[treatment] == 0) & (data[outcome] == 1)] = 'AD' segments[(data[treatment] == 1) & (data[outcome] == 0)] = 'ND' segments[(data[treatment] == 0) & (data[outcome] == 0)] = 'NC' return segments
[ "def", "_make_segments", "(", "data", ",", "treatment", ",", "outcome", ")", ":", "segments", "=", "np", ".", "empty", "(", "data", ".", "shape", "[", "0", "]", ",", "dtype", "=", "'object'", ")", "segments", "[", "(", "data", "[", "treatment", "]", "==", "1", ")", "&", "(", "data", "[", "outcome", "]", "==", "1", ")", "]", "=", "'AC'", "segments", "[", "(", "data", "[", "treatment", "]", "==", "0", ")", "&", "(", "data", "[", "outcome", "]", "==", "1", ")", "]", "=", "'AD'", "segments", "[", "(", "data", "[", "treatment", "]", "==", "1", ")", "&", "(", "data", "[", "outcome", "]", "==", "0", ")", "]", "=", "'ND'", "segments", "[", "(", "data", "[", "treatment", "]", "==", "0", ")", "&", "(", "data", "[", "outcome", "]", "==", "0", ")", "]", "=", "'NC'", "return", "segments" ]
[ 105, 4 ]
[ 125, 23 ]
python
en
['en', 'error', 'th']
False
CounterfactualUnitSelector._fit_segment_model
(self, data, treatment, outcome)
Fits a classifier for estimating the probabilities for the unit segment combinations.
Fits a classifier for estimating the probabilities for the unit segment combinations.
def _fit_segment_model(self, data, treatment, outcome): ''' Fits a classifier for estimating the probabilities for the unit segment combinations. ''' model = clone(self.learner) X = data.drop([treatment, outcome], axis=1) y = self._make_segments(data, treatment, outcome) self.segment_model = model.fit(X, y)
[ "def", "_fit_segment_model", "(", "self", ",", "data", ",", "treatment", ",", "outcome", ")", ":", "model", "=", "clone", "(", "self", ".", "learner", ")", "X", "=", "data", ".", "drop", "(", "[", "treatment", ",", "outcome", "]", ",", "axis", "=", "1", ")", "y", "=", "self", ".", "_make_segments", "(", "data", ",", "treatment", ",", "outcome", ")", "self", ".", "segment_model", "=", "model", ".", "fit", "(", "X", ",", "y", ")" ]
[ 127, 4 ]
[ 138, 44 ]
python
en
['en', 'error', 'th']
False
CounterfactualUnitSelector._fit_condprob_models
(self, data, treatment, outcome)
Fits two classifiers to estimate conversion probabilities conditional on the treatment.
Fits two classifiers to estimate conversion probabilities conditional on the treatment.
def _fit_condprob_models(self, data, treatment, outcome): ''' Fits two classifiers to estimate conversion probabilities conditional on the treatment. ''' trt_learner = clone(self.learner) ctr_learner = clone(self.learner) treated = data[treatment] == 1 X = data.drop([treatment, outcome], axis=1) y = data['outcome'] self.trt_model = trt_learner.fit(X[treated], y[treated]) self.ctr_model = ctr_learner.fit(X[~treated], y[~treated])
[ "def", "_fit_condprob_models", "(", "self", ",", "data", ",", "treatment", ",", "outcome", ")", ":", "trt_learner", "=", "clone", "(", "self", ".", "learner", ")", "ctr_learner", "=", "clone", "(", "self", ".", "learner", ")", "treated", "=", "data", "[", "treatment", "]", "==", "1", "X", "=", "data", ".", "drop", "(", "[", "treatment", ",", "outcome", "]", ",", "axis", "=", "1", ")", "y", "=", "data", "[", "'outcome'", "]", "self", ".", "trt_model", "=", "trt_learner", ".", "fit", "(", "X", "[", "treated", "]", ",", "y", "[", "treated", "]", ")", "self", ".", "ctr_model", "=", "ctr_learner", ".", "fit", "(", "X", "[", "~", "treated", "]", ",", "y", "[", "~", "treated", "]", ")" ]
[ 140, 4 ]
[ 155, 66 ]
python
en
['en', 'error', 'th']
False
CounterfactualUnitSelector._get_exact_benefit
(self, data, treatment, outcome)
Calculates the exact benefit function of Theorem 4 in Li and Pearl (2019). Returns the exact benefit.
Calculates the exact benefit function of Theorem 4 in Li and Pearl (2019). Returns the exact benefit.
def _get_exact_benefit(self, data, treatment, outcome): ''' Calculates the exact benefit function of Theorem 4 in Li and Pearl (2019). Returns the exact benefit. ''' beta = self.complier_payoff gamma = self.alwaystaker_payoff theta = self.nevertaker_payoff X = data.drop([treatment, outcome], axis=1) segment_prob = self.segment_model.predict_proba(X) segment_name = self.segment_model.classes_ benefit = (beta - theta) * segment_prob[:, segment_name == 'AC'] + \ (gamma - beta) * segment_prob[:, segment_name == 'AD'] + theta return benefit
[ "def", "_get_exact_benefit", "(", "self", ",", "data", ",", "treatment", ",", "outcome", ")", ":", "beta", "=", "self", ".", "complier_payoff", "gamma", "=", "self", ".", "alwaystaker_payoff", "theta", "=", "self", ".", "nevertaker_payoff", "X", "=", "data", ".", "drop", "(", "[", "treatment", ",", "outcome", "]", ",", "axis", "=", "1", ")", "segment_prob", "=", "self", ".", "segment_model", ".", "predict_proba", "(", "X", ")", "segment_name", "=", "self", ".", "segment_model", ".", "classes_", "benefit", "=", "(", "beta", "-", "theta", ")", "*", "segment_prob", "[", ":", ",", "segment_name", "==", "'AC'", "]", "+", "(", "gamma", "-", "beta", ")", "*", "segment_prob", "[", ":", ",", "segment_name", "==", "'AD'", "]", "+", "theta", "return", "benefit" ]
[ 157, 4 ]
[ 174, 22 ]
python
en
['en', 'error', 'th']
False
CounterfactualUnitSelector._obj_func_midp
(self, data, treatment, outcome)
Calculates bounds for the objective function. Returns the midpoint between bounds. Parameters ---------- pr_y1_w1 : float The probability of conversion given treatment assignment. pr_y1_w0 : float The probability of conversion given control assignment. pr_y0_w1 : float The probability of no conversion given treatment assignment (1 - pr_y1_w1). pr_y0_w0 : float The probability of no conversion given control assignment (1 - pr_1y_w0) pr_y1w1_x : float Probability of complier or always-taker given X. pr_y0w0_x : float Probability of complier or never-taker given X. pr_y1w0_x : float Probability of defier or always-taker given X. pr_y0w1_x : float Probability of never-taker or defier given X. pr_y_x : float Organic probability of conversion.
Calculates bounds for the objective function. Returns the midpoint between bounds.
def _obj_func_midp(self, data, treatment, outcome): ''' Calculates bounds for the objective function. Returns the midpoint between bounds. Parameters ---------- pr_y1_w1 : float The probability of conversion given treatment assignment. pr_y1_w0 : float The probability of conversion given control assignment. pr_y0_w1 : float The probability of no conversion given treatment assignment (1 - pr_y1_w1). pr_y0_w0 : float The probability of no conversion given control assignment (1 - pr_1y_w0) pr_y1w1_x : float Probability of complier or always-taker given X. pr_y0w0_x : float Probability of complier or never-taker given X. pr_y1w0_x : float Probability of defier or always-taker given X. pr_y0w1_x : float Probability of never-taker or defier given X. pr_y_x : float Organic probability of conversion. ''' X = data.drop([treatment, outcome], axis=1) beta = self.complier_payoff gamma = self.alwaystaker_payoff theta = self.nevertaker_payoff delta = self.defier_payoff pr_y0_w1, pr_y1_w1 = np.split(self.trt_model.predict_proba(X), indices_or_sections=2, axis=1) pr_y0_w0, pr_y1_w0 = np.split(self.ctr_model.predict_proba(X), indices_or_sections=2, axis=1) segment_prob = self.segment_model.predict_proba(X) segment_name = self.segment_model.classes_ pr_y1w1_x = segment_prob[:, segment_name == 'AC'] pr_y0w0_x = segment_prob[:, segment_name == 'NC'] pr_y1w0_x = segment_prob[:, segment_name == 'AD'] pr_y0w1_x = segment_prob[:, segment_name == 'ND'] if self.organic_conversion is not None: pr_y_x = self.organic_conversion else: pr_y_x = pr_y1_w0 warnings.warn( 'Probability of organic conversion estimated from control observations.') p1 = (beta - theta) * pr_y1_w1 + delta * pr_y1_w0 + theta * pr_y0_w0 p2 = gamma * pr_y1_w1 + delta * pr_y0_w1 + (beta - gamma) * pr_y0_w0 p3 = (gamma - delta) * pr_y1_w1 + delta * pr_y1_w0 + theta * \ pr_y0_w0 + (beta - gamma - theta + delta) * (pr_y1w1_x + pr_y0w0_x) p4 = (beta - theta) * pr_y1_w1 - (beta - gamma - theta) * pr_y1_w0 + \ theta * pr_y0_w0 + (beta - gamma - theta + delta) * \ (pr_y1w0_x + pr_y0w1_x) p5 = (gamma - delta) * pr_y1_w1 + delta * pr_y1_w0 + theta * pr_y0_w0 p6 = (beta - theta) * pr_y1_w1 - (beta - gamma - theta) * pr_y1_w0 + \ theta * pr_y0_w0 p7 = (gamma - delta) * pr_y1_w1 - (beta - gamma - theta) * pr_y1_w0 + \ theta * pr_y0_w0 + (beta - gamma - theta + delta) * pr_y_x p8 = (beta - theta) * pr_y1_w1 + delta * pr_y1_w0 + theta * \ pr_y0_w0 - (beta - gamma - theta + delta) * pr_y_x params_1 = np.concatenate((p1, p2, p3, p4), axis=1) params_2 = np.concatenate((p5, p6, p7, p8), axis=1) sigma = beta - gamma - theta + delta if sigma < 0: lower_bound = np.max(params_1, axis=1) upper_bound = np.min(params_2, axis=1) elif sigma > 0: lower_bound = np.max(params_2, axis=1) upper_bound = np.min(params_1, axis=1) return (lower_bound + upper_bound) / 2
[ "def", "_obj_func_midp", "(", "self", ",", "data", ",", "treatment", ",", "outcome", ")", ":", "X", "=", "data", ".", "drop", "(", "[", "treatment", ",", "outcome", "]", ",", "axis", "=", "1", ")", "beta", "=", "self", ".", "complier_payoff", "gamma", "=", "self", ".", "alwaystaker_payoff", "theta", "=", "self", ".", "nevertaker_payoff", "delta", "=", "self", ".", "defier_payoff", "pr_y0_w1", ",", "pr_y1_w1", "=", "np", ".", "split", "(", "self", ".", "trt_model", ".", "predict_proba", "(", "X", ")", ",", "indices_or_sections", "=", "2", ",", "axis", "=", "1", ")", "pr_y0_w0", ",", "pr_y1_w0", "=", "np", ".", "split", "(", "self", ".", "ctr_model", ".", "predict_proba", "(", "X", ")", ",", "indices_or_sections", "=", "2", ",", "axis", "=", "1", ")", "segment_prob", "=", "self", ".", "segment_model", ".", "predict_proba", "(", "X", ")", "segment_name", "=", "self", ".", "segment_model", ".", "classes_", "pr_y1w1_x", "=", "segment_prob", "[", ":", ",", "segment_name", "==", "'AC'", "]", "pr_y0w0_x", "=", "segment_prob", "[", ":", ",", "segment_name", "==", "'NC'", "]", "pr_y1w0_x", "=", "segment_prob", "[", ":", ",", "segment_name", "==", "'AD'", "]", "pr_y0w1_x", "=", "segment_prob", "[", ":", ",", "segment_name", "==", "'ND'", "]", "if", "self", ".", "organic_conversion", "is", "not", "None", ":", "pr_y_x", "=", "self", ".", "organic_conversion", "else", ":", "pr_y_x", "=", "pr_y1_w0", "warnings", ".", "warn", "(", "'Probability of organic conversion estimated from control observations.'", ")", "p1", "=", "(", "beta", "-", "theta", ")", "*", "pr_y1_w1", "+", "delta", "*", "pr_y1_w0", "+", "theta", "*", "pr_y0_w0", "p2", "=", "gamma", "*", "pr_y1_w1", "+", "delta", "*", "pr_y0_w1", "+", "(", "beta", "-", "gamma", ")", "*", "pr_y0_w0", "p3", "=", "(", "gamma", "-", "delta", ")", "*", "pr_y1_w1", "+", "delta", "*", "pr_y1_w0", "+", "theta", "*", "pr_y0_w0", "+", "(", "beta", "-", "gamma", "-", "theta", "+", "delta", ")", "*", "(", "pr_y1w1_x", "+", "pr_y0w0_x", ")", "p4", "=", "(", "beta", "-", "theta", ")", "*", "pr_y1_w1", "-", "(", "beta", "-", "gamma", "-", "theta", ")", "*", "pr_y1_w0", "+", "theta", "*", "pr_y0_w0", "+", "(", "beta", "-", "gamma", "-", "theta", "+", "delta", ")", "*", "(", "pr_y1w0_x", "+", "pr_y0w1_x", ")", "p5", "=", "(", "gamma", "-", "delta", ")", "*", "pr_y1_w1", "+", "delta", "*", "pr_y1_w0", "+", "theta", "*", "pr_y0_w0", "p6", "=", "(", "beta", "-", "theta", ")", "*", "pr_y1_w1", "-", "(", "beta", "-", "gamma", "-", "theta", ")", "*", "pr_y1_w0", "+", "theta", "*", "pr_y0_w0", "p7", "=", "(", "gamma", "-", "delta", ")", "*", "pr_y1_w1", "-", "(", "beta", "-", "gamma", "-", "theta", ")", "*", "pr_y1_w0", "+", "theta", "*", "pr_y0_w0", "+", "(", "beta", "-", "gamma", "-", "theta", "+", "delta", ")", "*", "pr_y_x", "p8", "=", "(", "beta", "-", "theta", ")", "*", "pr_y1_w1", "+", "delta", "*", "pr_y1_w0", "+", "theta", "*", "pr_y0_w0", "-", "(", "beta", "-", "gamma", "-", "theta", "+", "delta", ")", "*", "pr_y_x", "params_1", "=", "np", ".", "concatenate", "(", "(", "p1", ",", "p2", ",", "p3", ",", "p4", ")", ",", "axis", "=", "1", ")", "params_2", "=", "np", ".", "concatenate", "(", "(", "p5", ",", "p6", ",", "p7", ",", "p8", ")", ",", "axis", "=", "1", ")", "sigma", "=", "beta", "-", "gamma", "-", "theta", "+", "delta", "if", "sigma", "<", "0", ":", "lower_bound", "=", "np", ".", "max", "(", "params_1", ",", "axis", "=", "1", ")", "upper_bound", "=", "np", ".", "min", "(", "params_2", ",", "axis", "=", "1", ")", "elif", "sigma", ">", "0", ":", "lower_bound", "=", "np", ".", "max", "(", "params_2", ",", "axis", "=", "1", ")", "upper_bound", "=", "np", ".", "min", "(", "params_1", ",", "axis", "=", "1", ")", "return", "(", "lower_bound", "+", "upper_bound", ")", "/", "2" ]
[ 176, 4 ]
[ 271, 46 ]
python
en
['en', 'error', 'th']
False
cs
(arg1, arg2=None)
cs returns a formatted 'control sequence' (ECMA-48 §5.4). This only supports text/ASCII ("7-bit") control seqences, and does support binary ("8-bit") control seqeneces. This only supports standard parameters (ECMA-48 §5.4.1.a / §5.4.2), and does NOT support "experimental"/"private" parameters (ECMA-48 §5.4.1.b).
cs returns a formatted 'control sequence' (ECMA-48 §5.4). This only supports text/ASCII ("7-bit") control seqences, and does support binary ("8-bit") control seqeneces. This only supports standard parameters (ECMA-48 §5.4.1.a / §5.4.2), and does NOT support "experimental"/"private" parameters (ECMA-48 §5.4.1.b).
def cs(arg1, arg2=None): # type: ignore """cs returns a formatted 'control sequence' (ECMA-48 §5.4). This only supports text/ASCII ("7-bit") control seqences, and does support binary ("8-bit") control seqeneces. This only supports standard parameters (ECMA-48 §5.4.1.a / §5.4.2), and does NOT support "experimental"/"private" parameters (ECMA-48 §5.4.1.b). """ csi = '\033[' if arg2: params: List[_number] = arg1 op: str = arg2 else: params = [] op = arg1 return csi + (';'.join(str(n).replace('.', ':') for n in params)) + op
[ "def", "cs", "(", "arg1", ",", "arg2", "=", "None", ")", ":", "# type: ignore", "csi", "=", "'\\033['", "if", "arg2", ":", "params", ":", "List", "[", "_number", "]", "=", "arg1", "op", ":", "str", "=", "arg2", "else", ":", "params", "=", "[", "]", "op", "=", "arg1", "return", "csi", "+", "(", "';'", ".", "join", "(", "str", "(", "n", ")", ".", "replace", "(", "'.'", ",", "':'", ")", "for", "n", "in", "params", ")", ")", "+", "op" ]
[ 32, 0 ]
[ 47, 74 ]
python
en
['ca', 'en', 'en']
True
cursor_up
(lines: int = 1)
Generate the "CUU" ("CUrsor Up") control sequence (ECMA-48 §8.3.22).
Generate the "CUU" ("CUrsor Up") control sequence (ECMA-48 §8.3.22).
def cursor_up(lines: int = 1) -> str: """Generate the "CUU" ("CUrsor Up") control sequence (ECMA-48 §8.3.22).""" if lines == 1: return cs('A') return cs([lines], 'A')
[ "def", "cursor_up", "(", "lines", ":", "int", "=", "1", ")", "->", "str", ":", "if", "lines", "==", "1", ":", "return", "cs", "(", "'A'", ")", "return", "cs", "(", "[", "lines", "]", ",", "'A'", ")" ]
[ 59, 0 ]
[ 63, 27 ]
python
en
['en', 'ca', 'en']
True
Base.__log_all_options_if_none_specified
(self, test)
When testing_base is specified, but none of the log options to save are specified (basic_test_info, screen_shots, page_source), then save them all by default. Otherwise, save only selected ones from their plugins.
When testing_base is specified, but none of the log options to save are specified (basic_test_info, screen_shots, page_source), then save them all by default. Otherwise, save only selected ones from their plugins.
def __log_all_options_if_none_specified(self, test): """ When testing_base is specified, but none of the log options to save are specified (basic_test_info, screen_shots, page_source), then save them all by default. Otherwise, save only selected ones from their plugins. """ if ((not self.options.enable_plugin_basic_test_info) and ( not self.options.enable_plugin_screen_shots) and ( not self.options.enable_plugin_page_source)): test_logpath = self.options.log_path + "/" + test.id() log_helper.log_screenshot(test_logpath, test.driver) log_helper.log_test_failure_data( test, test_logpath, test.driver, test.browser) log_helper.log_page_source(test_logpath, test.driver)
[ "def", "__log_all_options_if_none_specified", "(", "self", ",", "test", ")", ":", "if", "(", "(", "not", "self", ".", "options", ".", "enable_plugin_basic_test_info", ")", "and", "(", "not", "self", ".", "options", ".", "enable_plugin_screen_shots", ")", "and", "(", "not", "self", ".", "options", ".", "enable_plugin_page_source", ")", ")", ":", "test_logpath", "=", "self", ".", "options", ".", "log_path", "+", "\"/\"", "+", "test", ".", "id", "(", ")", "log_helper", ".", "log_screenshot", "(", "test_logpath", ",", "test", ".", "driver", ")", "log_helper", ".", "log_test_failure_data", "(", "test", ",", "test_logpath", ",", "test", ".", "driver", ",", "test", ".", "browser", ")", "log_helper", ".", "log_page_source", "(", "test_logpath", ",", "test", ".", "driver", ")" ]
[ 167, 4 ]
[ 180, 65 ]
python
en
['en', 'error', 'th']
False
Base.addError
(self, test, err, capt=None)
Since Skip, Blocked, and Deprecated are all technically errors, but not error states, we want to make sure that they don't show up in the nose output as errors.
Since Skip, Blocked, and Deprecated are all technically errors, but not error states, we want to make sure that they don't show up in the nose output as errors.
def addError(self, test, err, capt=None): """ Since Skip, Blocked, and Deprecated are all technically errors, but not error states, we want to make sure that they don't show up in the nose output as errors. """ if (err[0] == errors.BlockedTest or ( err[0] == errors.SkipTest) or ( err[0] == errors.DeprecatedTest)): print(err[1].__str__().split('''-------------------- >> ''' '''begin captured logging''' ''' << --------------------''', 1)[0]) else: # self.__log_all_options_if_none_specified(test) pass self.add_fails_or_errors(test)
[ "def", "addError", "(", "self", ",", "test", ",", "err", ",", "capt", "=", "None", ")", ":", "if", "(", "err", "[", "0", "]", "==", "errors", ".", "BlockedTest", "or", "(", "err", "[", "0", "]", "==", "errors", ".", "SkipTest", ")", "or", "(", "err", "[", "0", "]", "==", "errors", ".", "DeprecatedTest", ")", ")", ":", "print", "(", "err", "[", "1", "]", ".", "__str__", "(", ")", ".", "split", "(", "'''-------------------- >> '''", "'''begin captured logging'''", "''' << --------------------'''", ",", "1", ")", "[", "0", "]", ")", "else", ":", "# self.__log_all_options_if_none_specified(test)", "pass", "self", ".", "add_fails_or_errors", "(", "test", ")" ]
[ 210, 4 ]
[ 225, 38 ]
python
en
['en', 'error', 'th']
False
Base.handleError
(self, test, err, capt=None)
If the database plugin is not present, we have to handle capturing "errors" that shouldn't be reported as such in base.
If the database plugin is not present, we have to handle capturing "errors" that shouldn't be reported as such in base.
def handleError(self, test, err, capt=None): """ If the database plugin is not present, we have to handle capturing "errors" that shouldn't be reported as such in base. """ if not hasattr(test.test, "testcase_guid"): if err[0] == errors.BlockedTest: raise SkipTest(err[1]) return True elif err[0] == errors.DeprecatedTest: raise SkipTest(err[1]) return True elif err[0] == errors.SkipTest: raise SkipTest(err[1]) return True
[ "def", "handleError", "(", "self", ",", "test", ",", "err", ",", "capt", "=", "None", ")", ":", "if", "not", "hasattr", "(", "test", ".", "test", ",", "\"testcase_guid\"", ")", ":", "if", "err", "[", "0", "]", "==", "errors", ".", "BlockedTest", ":", "raise", "SkipTest", "(", "err", "[", "1", "]", ")", "return", "True", "elif", "err", "[", "0", "]", "==", "errors", ".", "DeprecatedTest", ":", "raise", "SkipTest", "(", "err", "[", "1", "]", ")", "return", "True", "elif", "err", "[", "0", "]", "==", "errors", ".", "SkipTest", ":", "raise", "SkipTest", "(", "err", "[", "1", "]", ")", "return", "True" ]
[ 227, 4 ]
[ 243, 27 ]
python
en
['en', 'error', 'th']
False
IVRegressor.__init__
(self)
Initializes the class.
Initializes the class.
def __init__(self): ''' Initializes the class. ''' self.method = '2SLS'
[ "def", "__init__", "(", "self", ")", ":", "self", ".", "method", "=", "'2SLS'" ]
[ 14, 4 ]
[ 19, 28 ]
python
en
['en', 'error', 'th']
False
IVRegressor.fit
(self, X, treatment, y, w)
Fits the 2SLS model. Args: X (np.matrix or np.array or pd.Dataframe): a feature matrix treatment (np.array or pd.Series): a treatment vector y (np.array or pd.Series): an outcome vector w (np.array or pd.Series): an instrument vector
Fits the 2SLS model.
def fit(self, X, treatment, y, w): ''' Fits the 2SLS model. Args: X (np.matrix or np.array or pd.Dataframe): a feature matrix treatment (np.array or pd.Series): a treatment vector y (np.array or pd.Series): an outcome vector w (np.array or pd.Series): an instrument vector ''' X, treatment, y, w = convert_pd_to_np(X, treatment, y, w) exog = sm.add_constant(np.c_[X, treatment]) endog = y instrument = sm.add_constant(np.c_[X, w]) self.iv_model = IV2SLS(endog=endog, exog=exog, instrument=instrument) self.iv_fit = self.iv_model.fit()
[ "def", "fit", "(", "self", ",", "X", ",", "treatment", ",", "y", ",", "w", ")", ":", "X", ",", "treatment", ",", "y", ",", "w", "=", "convert_pd_to_np", "(", "X", ",", "treatment", ",", "y", ",", "w", ")", "exog", "=", "sm", ".", "add_constant", "(", "np", ".", "c_", "[", "X", ",", "treatment", "]", ")", "endog", "=", "y", "instrument", "=", "sm", ".", "add_constant", "(", "np", ".", "c_", "[", "X", ",", "w", "]", ")", "self", ".", "iv_model", "=", "IV2SLS", "(", "endog", "=", "endog", ",", "exog", "=", "exog", ",", "instrument", "=", "instrument", ")", "self", ".", "iv_fit", "=", "self", ".", "iv_model", ".", "fit", "(", ")" ]
[ 21, 4 ]
[ 38, 41 ]
python
en
['en', 'ca', 'en']
True
IVRegressor.predict
(self)
Returns the average treatment effect and its estimated standard error Returns: (float): average treatment effect (float): standard error of the estimation
Returns the average treatment effect and its estimated standard error
def predict(self): '''Returns the average treatment effect and its estimated standard error Returns: (float): average treatment effect (float): standard error of the estimation ''' return self.iv_fit.params[-1], self.iv_fit.bse[-1]
[ "def", "predict", "(", "self", ")", ":", "return", "self", ".", "iv_fit", ".", "params", "[", "-", "1", "]", ",", "self", ".", "iv_fit", ".", "bse", "[", "-", "1", "]" ]
[ 40, 4 ]
[ 48, 58 ]
python
en
['en', 'en', 'en']
True
MultiPartForm.add_field
(self, name, value)
Add a simple field to the form data.
Add a simple field to the form data.
def add_field(self, name, value): """Add a simple field to the form data.""" if not isinstance(value, str): value = json.dumps(value, ensure_ascii=False) self.form_fields.append((name, value)) return
[ "def", "add_field", "(", "self", ",", "name", ",", "value", ")", ":", "if", "not", "isinstance", "(", "value", ",", "str", ")", ":", "value", "=", "json", ".", "dumps", "(", "value", ",", "ensure_ascii", "=", "False", ")", "self", ".", "form_fields", ".", "append", "(", "(", "name", ",", "value", ")", ")", "return" ]
[ 44, 4 ]
[ 49, 14 ]
python
en
['en', 'en', 'en']
True
MultiPartForm.add_file
(self, field_name, file_name, file_content, mimetype=None)
Add a file to be uploaded.
Add a file to be uploaded.
def add_file(self, field_name, file_name, file_content, mimetype=None): """Add a file to be uploaded.""" if mimetype is None: mimetype = mimetypes.guess_type(file_name)[0] or 'application/octet-stream' self.files.append((field_name, file_name, mimetype, file_content)) return
[ "def", "add_file", "(", "self", ",", "field_name", ",", "file_name", ",", "file_content", ",", "mimetype", "=", "None", ")", ":", "if", "mimetype", "is", "None", ":", "mimetype", "=", "mimetypes", ".", "guess_type", "(", "file_name", ")", "[", "0", "]", "or", "'application/octet-stream'", "self", ".", "files", ".", "append", "(", "(", "field_name", ",", "file_name", ",", "mimetype", ",", "file_content", ")", ")", "return" ]
[ 51, 4 ]
[ 56, 14 ]
python
en
['en', 'en', 'en']
True
MultiPartForm.build_body
(self)
Return a string representing the form data, including attached files.
Return a string representing the form data, including attached files.
def build_body(self): """Return a string representing the form data, including attached files.""" # Build a list of lists, each containing "lines" of the # request. Each part is separated by a boundary string. # Once the list is built, return a string where each # line is separated by '\r\n'. parts = [] part_boundary = '--' + self.boundary # Add the form fields parts.extend( [bytes(part_boundary.encode(self.charset)), bytes(('Content-Disposition: form-data; name="%s"' % name).encode(self.charset)) if PYTHON_VERSION_3 else ('Content-Disposition: form-data; name="%s"' % name), bytes(('Content-Type: text/plain; charset=%s' % self.charset).encode(self.charset)), bytes(''.encode(self.charset)), bytes(value.encode(self.charset)) if PYTHON_VERSION_3 else value ] for name, value in self.form_fields ) # Add the files to upload parts.extend( [bytes(part_boundary.encode(self.charset)), bytes(('Content-Disposition: form-data; name="%s"; filename="%s"' % (field_name, filename)).encode(self.charset)) if PYTHON_VERSION_3 else ('Content-Disposition: form-data; name="%s"; filename="%s"' % (field_name, filename)), bytes(('Content-Type: %s' % content_type).encode(self.charset)), bytes('Content-Transfer-Encoding: binary'.encode(self.charset)), bytes(''.encode(self.charset)), body, ] for field_name, filename, content_type, body in self.files ) # Flatten the list and add closing boundary marker, # then return CR+LF separated data flattened = list(itertools.chain(*parts)) flattened.append(bytes(('--' + self.boundary + '--').encode(self.charset))) flattened.append(bytes(''.encode(self.charset))) return bytes('\r\n'.encode(self.charset)).join(flattened)
[ "def", "build_body", "(", "self", ")", ":", "# Build a list of lists, each containing \"lines\" of the", "# request. Each part is separated by a boundary string.", "# Once the list is built, return a string where each", "# line is separated by '\\r\\n'.", "parts", "=", "[", "]", "part_boundary", "=", "'--'", "+", "self", ".", "boundary", "# Add the form fields", "parts", ".", "extend", "(", "[", "bytes", "(", "part_boundary", ".", "encode", "(", "self", ".", "charset", ")", ")", ",", "bytes", "(", "(", "'Content-Disposition: form-data; name=\"%s\"'", "%", "name", ")", ".", "encode", "(", "self", ".", "charset", ")", ")", "if", "PYTHON_VERSION_3", "else", "(", "'Content-Disposition: form-data; name=\"%s\"'", "%", "name", ")", ",", "bytes", "(", "(", "'Content-Type: text/plain; charset=%s'", "%", "self", ".", "charset", ")", ".", "encode", "(", "self", ".", "charset", ")", ")", ",", "bytes", "(", "''", ".", "encode", "(", "self", ".", "charset", ")", ")", ",", "bytes", "(", "value", ".", "encode", "(", "self", ".", "charset", ")", ")", "if", "PYTHON_VERSION_3", "else", "value", "]", "for", "name", ",", "value", "in", "self", ".", "form_fields", ")", "# Add the files to upload", "parts", ".", "extend", "(", "[", "bytes", "(", "part_boundary", ".", "encode", "(", "self", ".", "charset", ")", ")", ",", "bytes", "(", "(", "'Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"'", "%", "(", "field_name", ",", "filename", ")", ")", ".", "encode", "(", "self", ".", "charset", ")", ")", "if", "PYTHON_VERSION_3", "else", "(", "'Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"'", "%", "(", "field_name", ",", "filename", ")", ")", ",", "bytes", "(", "(", "'Content-Type: %s'", "%", "content_type", ")", ".", "encode", "(", "self", ".", "charset", ")", ")", ",", "bytes", "(", "'Content-Transfer-Encoding: binary'", ".", "encode", "(", "self", ".", "charset", ")", ")", ",", "bytes", "(", "''", ".", "encode", "(", "self", ".", "charset", ")", ")", ",", "body", ",", "]", "for", "field_name", ",", "filename", ",", "content_type", ",", "body", "in", "self", ".", "files", ")", "# Flatten the list and add closing boundary marker,", "# then return CR+LF separated data", "flattened", "=", "list", "(", "itertools", ".", "chain", "(", "*", "parts", ")", ")", "flattened", ".", "append", "(", "bytes", "(", "(", "'--'", "+", "self", ".", "boundary", "+", "'--'", ")", ".", "encode", "(", "self", ".", "charset", ")", ")", ")", "flattened", ".", "append", "(", "bytes", "(", "''", ".", "encode", "(", "self", ".", "charset", ")", ")", ")", "return", "bytes", "(", "'\\r\\n'", ".", "encode", "(", "self", ".", "charset", ")", ")", ".", "join", "(", "flattened", ")" ]
[ 58, 4 ]
[ 98, 65 ]
python
en
['en', 'en', 'en']
True
get_index
(subjects, ra)
Usage: sort input poses by the distance to [mean pose] from train data sorted from large to small :param subjects: e.g. Test set :return: Reversed Index in the Test set
Usage: sort input poses by the distance to [mean pose] from train data sorted from large to small :param subjects: e.g. Test set :return: Reversed Index in the Test set
def get_index(subjects, ra): """ Usage: sort input poses by the distance to [mean pose] from train data sorted from large to small :param subjects: e.g. Test set :return: Reversed Index in the Test set """ train_pose_3d = [] for subject in subjects: #print('subject',subject) for action in dataset[subject].keys(): #print('action',action) # poses_2d = keypoints[subject][action] poses_3d = dataset[subject][action]['positions_3d'] #out = [] for i in range(len(poses_3d)): # Remove global offset, but keep trajectory in first position poses_3d[i] -= poses_3d[i][:, :1] if cal_mean: mean_3d_1 = np.mean(poses_3d[i], axis=0) elif cal_distance: ext_mean_pose = np.repeat(mean_pose[np.newaxis, :, :], poses_3d[i].shape[0], axis=0) assert ext_mean_pose.shape == poses_3d[i].shape pose_dis = np.linalg.norm((ext_mean_pose - poses_3d[i]), axis=-1) pose_dis_mean = np.mean(pose_dis, axis=-1) #out.append(pose_dis_mean) train_pose_3d.append(pose_dis_mean) #plot17j(out, subject, action, show_animation=False) full_pose = np.concatenate(train_pose_3d, axis=0) # Sorted from large to small distance sorted_index = np.argsort(-full_pose) full_pose.tolist() #sorted_dis = sorted(full_pose, reverse=True) #print('From large to small value:',sorted_dis) print('index',sorted_index) num = len(full_pose) print('Total pose:',num) ratio = ra pick_num = int(ratio*num) print('Picked number:',pick_num) pick_index = sorted_index[:pick_num] np.set_printoptions(threshold=np.inf) #print(pick_index) rerank = sorted(pick_index) print('rerank',len(rerank)) return rerank
[ "def", "get_index", "(", "subjects", ",", "ra", ")", ":", "train_pose_3d", "=", "[", "]", "for", "subject", "in", "subjects", ":", "#print('subject',subject)", "for", "action", "in", "dataset", "[", "subject", "]", ".", "keys", "(", ")", ":", "#print('action',action)", "# poses_2d = keypoints[subject][action]", "poses_3d", "=", "dataset", "[", "subject", "]", "[", "action", "]", "[", "'positions_3d'", "]", "#out = []", "for", "i", "in", "range", "(", "len", "(", "poses_3d", ")", ")", ":", "# Remove global offset, but keep trajectory in first position", "poses_3d", "[", "i", "]", "-=", "poses_3d", "[", "i", "]", "[", ":", ",", ":", "1", "]", "if", "cal_mean", ":", "mean_3d_1", "=", "np", ".", "mean", "(", "poses_3d", "[", "i", "]", ",", "axis", "=", "0", ")", "elif", "cal_distance", ":", "ext_mean_pose", "=", "np", ".", "repeat", "(", "mean_pose", "[", "np", ".", "newaxis", ",", ":", ",", ":", "]", ",", "poses_3d", "[", "i", "]", ".", "shape", "[", "0", "]", ",", "axis", "=", "0", ")", "assert", "ext_mean_pose", ".", "shape", "==", "poses_3d", "[", "i", "]", ".", "shape", "pose_dis", "=", "np", ".", "linalg", ".", "norm", "(", "(", "ext_mean_pose", "-", "poses_3d", "[", "i", "]", ")", ",", "axis", "=", "-", "1", ")", "pose_dis_mean", "=", "np", ".", "mean", "(", "pose_dis", ",", "axis", "=", "-", "1", ")", "#out.append(pose_dis_mean)", "train_pose_3d", ".", "append", "(", "pose_dis_mean", ")", "#plot17j(out, subject, action, show_animation=False)", "full_pose", "=", "np", ".", "concatenate", "(", "train_pose_3d", ",", "axis", "=", "0", ")", "# Sorted from large to small distance", "sorted_index", "=", "np", ".", "argsort", "(", "-", "full_pose", ")", "full_pose", ".", "tolist", "(", ")", "#sorted_dis = sorted(full_pose, reverse=True)", "#print('From large to small value:',sorted_dis)", "print", "(", "'index'", ",", "sorted_index", ")", "num", "=", "len", "(", "full_pose", ")", "print", "(", "'Total pose:'", ",", "num", ")", "ratio", "=", "ra", "pick_num", "=", "int", "(", "ratio", "*", "num", ")", "print", "(", "'Picked number:'", ",", "pick_num", ")", "pick_index", "=", "sorted_index", "[", ":", "pick_num", "]", "np", ".", "set_printoptions", "(", "threshold", "=", "np", ".", "inf", ")", "#print(pick_index)", "rerank", "=", "sorted", "(", "pick_index", ")", "print", "(", "'rerank'", ",", "len", "(", "rerank", ")", ")", "return", "rerank" ]
[ 113, 0 ]
[ 159, 17 ]
python
en
['en', 'error', 'th']
False
split_data
(index)
Partition index into a list, make one more dimension :param index: a so long list :return out: splited index, type: List
Partition index into a list, make one more dimension :param index: a so long list :return out: splited index, type: List
def split_data(index): """ Partition index into a list, make one more dimension :param index: a so long list :return out: splited index, type: List """ out = [] j = 0 for i in index: if i < len(index)-1: if index[i+1] - index[i]>5: print('Split index into smaller groups:',j,i) out.append(index[j:(i+1)]) j = i+1 elif i==len(index)-1: out.append(index[j:]) print('Split group:',len(out)) return out
[ "def", "split_data", "(", "index", ")", ":", "out", "=", "[", "]", "j", "=", "0", "for", "i", "in", "index", ":", "if", "i", "<", "len", "(", "index", ")", "-", "1", ":", "if", "index", "[", "i", "+", "1", "]", "-", "index", "[", "i", "]", ">", "5", ":", "print", "(", "'Split index into smaller groups:'", ",", "j", ",", "i", ")", "out", ".", "append", "(", "index", "[", "j", ":", "(", "i", "+", "1", ")", "]", ")", "j", "=", "i", "+", "1", "elif", "i", "==", "len", "(", "index", ")", "-", "1", ":", "out", ".", "append", "(", "index", "[", "j", ":", "]", ")", "print", "(", "'Split group:'", ",", "len", "(", "out", ")", ")", "return", "out" ]
[ 179, 0 ]
[ 196, 14 ]
python
en
['en', 'error', 'th']
False
SocketStream.setsockopt
(self, level, option, value)
Set an option on the underlying socket. See :meth:`socket.socket.setsockopt` for details.
Set an option on the underlying socket.
def setsockopt(self, level, option, value): """Set an option on the underlying socket. See :meth:`socket.socket.setsockopt` for details. """ return self.socket.setsockopt(level, option, value)
[ "def", "setsockopt", "(", "self", ",", "level", ",", "option", ",", "value", ")", ":", "return", "self", ".", "socket", ".", "setsockopt", "(", "level", ",", "option", ",", "value", ")" ]
[ 143, 4 ]
[ 149, 59 ]
python
en
['en', 'no', 'en']
True
SocketStream.getsockopt
(self, level, option, buffersize=0)
Check the current value of an option on the underlying socket. See :meth:`socket.socket.getsockopt` for details.
Check the current value of an option on the underlying socket.
def getsockopt(self, level, option, buffersize=0): """Check the current value of an option on the underlying socket. See :meth:`socket.socket.getsockopt` for details. """ # This is to work around # https://bitbucket.org/pypy/pypy/issues/2561 # We should be able to drop it when the next PyPy3 beta is released. if buffersize == 0: return self.socket.getsockopt(level, option) else: return self.socket.getsockopt(level, option, buffersize)
[ "def", "getsockopt", "(", "self", ",", "level", ",", "option", ",", "buffersize", "=", "0", ")", ":", "# This is to work around", "# https://bitbucket.org/pypy/pypy/issues/2561", "# We should be able to drop it when the next PyPy3 beta is released.", "if", "buffersize", "==", "0", ":", "return", "self", ".", "socket", ".", "getsockopt", "(", "level", ",", "option", ")", "else", ":", "return", "self", ".", "socket", ".", "getsockopt", "(", "level", ",", "option", ",", "buffersize", ")" ]
[ 151, 4 ]
[ 163, 68 ]
python
en
['en', 'en', 'en']
True
SocketListener.accept
(self)
Accept an incoming connection. Returns: :class:`SocketStream` Raises: OSError: if the underlying call to ``accept`` raises an unexpected error. ClosedResourceError: if you already closed the socket. This method handles routine errors like ``ECONNABORTED``, but passes other errors on to its caller. In particular, it does *not* make any special effort to handle resource exhaustion errors like ``EMFILE``, ``ENFILE``, ``ENOBUFS``, ``ENOMEM``.
Accept an incoming connection.
async def accept(self): """Accept an incoming connection. Returns: :class:`SocketStream` Raises: OSError: if the underlying call to ``accept`` raises an unexpected error. ClosedResourceError: if you already closed the socket. This method handles routine errors like ``ECONNABORTED``, but passes other errors on to its caller. In particular, it does *not* make any special effort to handle resource exhaustion errors like ``EMFILE``, ``ENFILE``, ``ENOBUFS``, ``ENOMEM``. """ while True: try: sock, _ = await self.socket.accept() except OSError as exc: if exc.errno in _closed_stream_errnos: raise trio.ClosedResourceError if exc.errno not in _ignorable_accept_errnos: raise else: return SocketStream(sock)
[ "async", "def", "accept", "(", "self", ")", ":", "while", "True", ":", "try", ":", "sock", ",", "_", "=", "await", "self", ".", "socket", ".", "accept", "(", ")", "except", "OSError", "as", "exc", ":", "if", "exc", ".", "errno", "in", "_closed_stream_errnos", ":", "raise", "trio", ".", "ClosedResourceError", "if", "exc", ".", "errno", "not", "in", "_ignorable_accept_errnos", ":", "raise", "else", ":", "return", "SocketStream", "(", "sock", ")" ]
[ 350, 4 ]
[ 376, 41 ]
python
en
['en', 'en', 'en']
True
SocketListener.aclose
(self)
Close this listener and its underlying socket.
Close this listener and its underlying socket.
async def aclose(self): """Close this listener and its underlying socket.""" self.socket.close() await trio.lowlevel.checkpoint()
[ "async", "def", "aclose", "(", "self", ")", ":", "self", ".", "socket", ".", "close", "(", ")", "await", "trio", ".", "lowlevel", ".", "checkpoint", "(", ")" ]
[ 378, 4 ]
[ 381, 40 ]
python
en
['en', 'en', 'en']
True
current_statistics
()
Returns an object containing run-loop-level debugging information. Currently the following fields are defined: * ``tasks_living`` (int): The number of tasks that have been spawned and not yet exited. * ``tasks_runnable`` (int): The number of tasks that are currently queued on the run queue (as opposed to blocked waiting for something to happen). * ``seconds_to_next_deadline`` (float): The time until the next pending cancel scope deadline. May be negative if the deadline has expired but we haven't yet processed cancellations. May be :data:`~math.inf` if there are no pending deadlines. * ``run_sync_soon_queue_size`` (int): The number of unprocessed callbacks queued via :meth:`trio.lowlevel.TrioToken.run_sync_soon`. * ``io_statistics`` (object): Some statistics from Trio's I/O backend. This always has an attribute ``backend`` which is a string naming which operating-system-specific I/O backend is in use; the other attributes vary between backends.
Returns an object containing run-loop-level debugging information.
def current_statistics(): """Returns an object containing run-loop-level debugging information. Currently the following fields are defined: * ``tasks_living`` (int): The number of tasks that have been spawned and not yet exited. * ``tasks_runnable`` (int): The number of tasks that are currently queued on the run queue (as opposed to blocked waiting for something to happen). * ``seconds_to_next_deadline`` (float): The time until the next pending cancel scope deadline. May be negative if the deadline has expired but we haven't yet processed cancellations. May be :data:`~math.inf` if there are no pending deadlines. * ``run_sync_soon_queue_size`` (int): The number of unprocessed callbacks queued via :meth:`trio.lowlevel.TrioToken.run_sync_soon`. * ``io_statistics`` (object): Some statistics from Trio's I/O backend. This always has an attribute ``backend`` which is a string naming which operating-system-specific I/O backend is in use; the other attributes vary between backends. """ locals()[LOCALS_KEY_KI_PROTECTION_ENABLED] = True try: return GLOBAL_RUN_CONTEXT.runner.current_statistics() except AttributeError: raise RuntimeError("must be called from async context")
[ "def", "current_statistics", "(", ")", ":", "locals", "(", ")", "[", "LOCALS_KEY_KI_PROTECTION_ENABLED", "]", "=", "True", "try", ":", "return", "GLOBAL_RUN_CONTEXT", ".", "runner", ".", "current_statistics", "(", ")", "except", "AttributeError", ":", "raise", "RuntimeError", "(", "\"must be called from async context\"", ")" ]
[ 10, 0 ]
[ 37, 63 ]
python
en
['en', 'en', 'en']
True
current_time
()
Returns the current time according to Trio's internal clock. Returns: float: The current time. Raises: RuntimeError: if not inside a call to :func:`trio.run`.
Returns the current time according to Trio's internal clock.
def current_time(): """Returns the current time according to Trio's internal clock. Returns: float: The current time. Raises: RuntimeError: if not inside a call to :func:`trio.run`. """ locals()[LOCALS_KEY_KI_PROTECTION_ENABLED] = True try: return GLOBAL_RUN_CONTEXT.runner.current_time() except AttributeError: raise RuntimeError("must be called from async context")
[ "def", "current_time", "(", ")", ":", "locals", "(", ")", "[", "LOCALS_KEY_KI_PROTECTION_ENABLED", "]", "=", "True", "try", ":", "return", "GLOBAL_RUN_CONTEXT", ".", "runner", ".", "current_time", "(", ")", "except", "AttributeError", ":", "raise", "RuntimeError", "(", "\"must be called from async context\"", ")" ]
[ 40, 0 ]
[ 54, 63 ]
python
en
['en', 'en', 'en']
True
current_clock
()
Returns the current :class:`~trio.abc.Clock`.
Returns the current :class:`~trio.abc.Clock`.
def current_clock(): """Returns the current :class:`~trio.abc.Clock`.""" locals()[LOCALS_KEY_KI_PROTECTION_ENABLED] = True try: return GLOBAL_RUN_CONTEXT.runner.current_clock() except AttributeError: raise RuntimeError("must be called from async context")
[ "def", "current_clock", "(", ")", ":", "locals", "(", ")", "[", "LOCALS_KEY_KI_PROTECTION_ENABLED", "]", "=", "True", "try", ":", "return", "GLOBAL_RUN_CONTEXT", ".", "runner", ".", "current_clock", "(", ")", "except", "AttributeError", ":", "raise", "RuntimeError", "(", "\"must be called from async context\"", ")" ]
[ 57, 0 ]
[ 63, 63 ]
python
en
['en', 'la', 'en']
True
current_root_task
()
Returns the current root :class:`Task`. This is the task that is the ultimate parent of all other tasks.
Returns the current root :class:`Task`.
def current_root_task(): """Returns the current root :class:`Task`. This is the task that is the ultimate parent of all other tasks. """ locals()[LOCALS_KEY_KI_PROTECTION_ENABLED] = True try: return GLOBAL_RUN_CONTEXT.runner.current_root_task() except AttributeError: raise RuntimeError("must be called from async context")
[ "def", "current_root_task", "(", ")", ":", "locals", "(", ")", "[", "LOCALS_KEY_KI_PROTECTION_ENABLED", "]", "=", "True", "try", ":", "return", "GLOBAL_RUN_CONTEXT", ".", "runner", ".", "current_root_task", "(", ")", "except", "AttributeError", ":", "raise", "RuntimeError", "(", "\"must be called from async context\"", ")" ]
[ 66, 0 ]
[ 76, 63 ]
python
en
['en', 'en', 'en']
True
reschedule
(task, next_send=_NO_SEND)
Reschedule the given task with the given :class:`outcome.Outcome`. See :func:`wait_task_rescheduled` for the gory details. There must be exactly one call to :func:`reschedule` for every call to :func:`wait_task_rescheduled`. (And when counting, keep in mind that returning :data:`Abort.SUCCEEDED` from an abort callback is equivalent to calling :func:`reschedule` once.) Args: task (trio.lowlevel.Task): the task to be rescheduled. Must be blocked in a call to :func:`wait_task_rescheduled`. next_send (outcome.Outcome): the value (or error) to return (or raise) from :func:`wait_task_rescheduled`.
Reschedule the given task with the given :class:`outcome.Outcome`.
def reschedule(task, next_send=_NO_SEND): """Reschedule the given task with the given :class:`outcome.Outcome`. See :func:`wait_task_rescheduled` for the gory details. There must be exactly one call to :func:`reschedule` for every call to :func:`wait_task_rescheduled`. (And when counting, keep in mind that returning :data:`Abort.SUCCEEDED` from an abort callback is equivalent to calling :func:`reschedule` once.) Args: task (trio.lowlevel.Task): the task to be rescheduled. Must be blocked in a call to :func:`wait_task_rescheduled`. next_send (outcome.Outcome): the value (or error) to return (or raise) from :func:`wait_task_rescheduled`. """ locals()[LOCALS_KEY_KI_PROTECTION_ENABLED] = True try: return GLOBAL_RUN_CONTEXT.runner.reschedule(task, next_send) except AttributeError: raise RuntimeError("must be called from async context")
[ "def", "reschedule", "(", "task", ",", "next_send", "=", "_NO_SEND", ")", ":", "locals", "(", ")", "[", "LOCALS_KEY_KI_PROTECTION_ENABLED", "]", "=", "True", "try", ":", "return", "GLOBAL_RUN_CONTEXT", ".", "runner", ".", "reschedule", "(", "task", ",", "next_send", ")", "except", "AttributeError", ":", "raise", "RuntimeError", "(", "\"must be called from async context\"", ")" ]
[ 79, 0 ]
[ 101, 63 ]
python
en
['en', 'en', 'en']
True
spawn_system_task
(async_fn, *args, name=None)
Spawn a "system" task. System tasks have a few differences from regular tasks: * They don't need an explicit nursery; instead they go into the internal "system nursery". * If a system task raises an exception, then it's converted into a :exc:`~trio.TrioInternalError` and *all* tasks are cancelled. If you write a system task, you should be careful to make sure it doesn't crash. * System tasks are automatically cancelled when the main task exits. * By default, system tasks have :exc:`KeyboardInterrupt` protection *enabled*. If you want your task to be interruptible by control-C, then you need to use :func:`disable_ki_protection` explicitly (and come up with some plan for what to do with a :exc:`KeyboardInterrupt`, given that system tasks aren't allowed to raise exceptions). * System tasks do not inherit context variables from their creator. Towards the end of a call to :meth:`trio.run`, after the main task and all system tasks have exited, the system nursery becomes closed. At this point, new calls to :func:`spawn_system_task` will raise ``RuntimeError("Nursery is closed to new arrivals")`` instead of creating a system task. It's possible to encounter this state either in a ``finally`` block in an async generator, or in a callback passed to :meth:`TrioToken.run_sync_soon` at the right moment. Args: async_fn: An async callable. args: Positional arguments for ``async_fn``. If you want to pass keyword arguments, use :func:`functools.partial`. name: The name for this task. Only used for debugging/introspection (e.g. ``repr(task_obj)``). If this isn't a string, :func:`spawn_system_task` will try to make it one. A common use case is if you're wrapping a function before spawning a new task, you might pass the original function as the ``name=`` to make debugging easier. Returns: Task: the newly spawned task
Spawn a "system" task.
def spawn_system_task(async_fn, *args, name=None): """Spawn a "system" task. System tasks have a few differences from regular tasks: * They don't need an explicit nursery; instead they go into the internal "system nursery". * If a system task raises an exception, then it's converted into a :exc:`~trio.TrioInternalError` and *all* tasks are cancelled. If you write a system task, you should be careful to make sure it doesn't crash. * System tasks are automatically cancelled when the main task exits. * By default, system tasks have :exc:`KeyboardInterrupt` protection *enabled*. If you want your task to be interruptible by control-C, then you need to use :func:`disable_ki_protection` explicitly (and come up with some plan for what to do with a :exc:`KeyboardInterrupt`, given that system tasks aren't allowed to raise exceptions). * System tasks do not inherit context variables from their creator. Towards the end of a call to :meth:`trio.run`, after the main task and all system tasks have exited, the system nursery becomes closed. At this point, new calls to :func:`spawn_system_task` will raise ``RuntimeError("Nursery is closed to new arrivals")`` instead of creating a system task. It's possible to encounter this state either in a ``finally`` block in an async generator, or in a callback passed to :meth:`TrioToken.run_sync_soon` at the right moment. Args: async_fn: An async callable. args: Positional arguments for ``async_fn``. If you want to pass keyword arguments, use :func:`functools.partial`. name: The name for this task. Only used for debugging/introspection (e.g. ``repr(task_obj)``). If this isn't a string, :func:`spawn_system_task` will try to make it one. A common use case is if you're wrapping a function before spawning a new task, you might pass the original function as the ``name=`` to make debugging easier. Returns: Task: the newly spawned task """ locals()[LOCALS_KEY_KI_PROTECTION_ENABLED] = True try: return GLOBAL_RUN_CONTEXT.runner.spawn_system_task(async_fn, *args, name=name) except AttributeError: raise RuntimeError("must be called from async context")
[ "def", "spawn_system_task", "(", "async_fn", ",", "*", "args", ",", "name", "=", "None", ")", ":", "locals", "(", ")", "[", "LOCALS_KEY_KI_PROTECTION_ENABLED", "]", "=", "True", "try", ":", "return", "GLOBAL_RUN_CONTEXT", ".", "runner", ".", "spawn_system_task", "(", "async_fn", ",", "*", "args", ",", "name", "=", "name", ")", "except", "AttributeError", ":", "raise", "RuntimeError", "(", "\"must be called from async context\"", ")" ]
[ 104, 0 ]
[ 156, 63 ]
python
cy
['cy', 'cy', 'en']
True
current_trio_token
()
Retrieve the :class:`TrioToken` for the current call to :func:`trio.run`.
Retrieve the :class:`TrioToken` for the current call to :func:`trio.run`.
def current_trio_token(): """Retrieve the :class:`TrioToken` for the current call to :func:`trio.run`. """ locals()[LOCALS_KEY_KI_PROTECTION_ENABLED] = True try: return GLOBAL_RUN_CONTEXT.runner.current_trio_token() except AttributeError: raise RuntimeError("must be called from async context")
[ "def", "current_trio_token", "(", ")", ":", "locals", "(", ")", "[", "LOCALS_KEY_KI_PROTECTION_ENABLED", "]", "=", "True", "try", ":", "return", "GLOBAL_RUN_CONTEXT", ".", "runner", ".", "current_trio_token", "(", ")", "except", "AttributeError", ":", "raise", "RuntimeError", "(", "\"must be called from async context\"", ")" ]
[ 159, 0 ]
[ 168, 63 ]
python
en
['en', 'pt', 'en']
True
wait_all_tasks_blocked
(cushion=0.0, tiebreaker='deprecated')
Block until there are no runnable tasks. This is useful in testing code when you want to give other tasks a chance to "settle down". The calling task is blocked, and doesn't wake up until all other tasks are also blocked for at least ``cushion`` seconds. (Setting a non-zero ``cushion`` is intended to handle cases like two tasks talking to each other over a local socket, where we want to ignore the potential brief moment between a send and receive when all tasks are blocked.) Note that ``cushion`` is measured in *real* time, not the Trio clock time. If there are multiple tasks blocked in :func:`wait_all_tasks_blocked`, then the one with the shortest ``cushion`` is the one woken (and this task becoming unblocked resets the timers for the remaining tasks). If there are multiple tasks that have exactly the same ``cushion``, then all are woken. You should also consider :class:`trio.testing.Sequencer`, which provides a more explicit way to control execution ordering within a test, and will often produce more readable tests. Example: Here's an example of one way to test that Trio's locks are fair: we take the lock in the parent, start a child, wait for the child to be blocked waiting for the lock (!), and then check that we can't release and immediately re-acquire the lock:: async def lock_taker(lock): await lock.acquire() lock.release() async def test_lock_fairness(): lock = trio.Lock() await lock.acquire() async with trio.open_nursery() as nursery: nursery.start_soon(lock_taker, lock) # child hasn't run yet, we have the lock assert lock.locked() assert lock._owner is trio.lowlevel.current_task() await trio.testing.wait_all_tasks_blocked() # now the child has run and is blocked on lock.acquire(), we # still have the lock assert lock.locked() assert lock._owner is trio.lowlevel.current_task() lock.release() try: # The child has a prior claim, so we can't have it lock.acquire_nowait() except trio.WouldBlock: assert lock._owner is not trio.lowlevel.current_task() print("PASS") else: print("FAIL")
Block until there are no runnable tasks.
async def wait_all_tasks_blocked(cushion=0.0, tiebreaker='deprecated'): """Block until there are no runnable tasks. This is useful in testing code when you want to give other tasks a chance to "settle down". The calling task is blocked, and doesn't wake up until all other tasks are also blocked for at least ``cushion`` seconds. (Setting a non-zero ``cushion`` is intended to handle cases like two tasks talking to each other over a local socket, where we want to ignore the potential brief moment between a send and receive when all tasks are blocked.) Note that ``cushion`` is measured in *real* time, not the Trio clock time. If there are multiple tasks blocked in :func:`wait_all_tasks_blocked`, then the one with the shortest ``cushion`` is the one woken (and this task becoming unblocked resets the timers for the remaining tasks). If there are multiple tasks that have exactly the same ``cushion``, then all are woken. You should also consider :class:`trio.testing.Sequencer`, which provides a more explicit way to control execution ordering within a test, and will often produce more readable tests. Example: Here's an example of one way to test that Trio's locks are fair: we take the lock in the parent, start a child, wait for the child to be blocked waiting for the lock (!), and then check that we can't release and immediately re-acquire the lock:: async def lock_taker(lock): await lock.acquire() lock.release() async def test_lock_fairness(): lock = trio.Lock() await lock.acquire() async with trio.open_nursery() as nursery: nursery.start_soon(lock_taker, lock) # child hasn't run yet, we have the lock assert lock.locked() assert lock._owner is trio.lowlevel.current_task() await trio.testing.wait_all_tasks_blocked() # now the child has run and is blocked on lock.acquire(), we # still have the lock assert lock.locked() assert lock._owner is trio.lowlevel.current_task() lock.release() try: # The child has a prior claim, so we can't have it lock.acquire_nowait() except trio.WouldBlock: assert lock._owner is not trio.lowlevel.current_task() print("PASS") else: print("FAIL") """ locals()[LOCALS_KEY_KI_PROTECTION_ENABLED] = True try: return await GLOBAL_RUN_CONTEXT.runner.wait_all_tasks_blocked(cushion, tiebreaker) except AttributeError: raise RuntimeError("must be called from async context")
[ "async", "def", "wait_all_tasks_blocked", "(", "cushion", "=", "0.0", ",", "tiebreaker", "=", "'deprecated'", ")", ":", "locals", "(", ")", "[", "LOCALS_KEY_KI_PROTECTION_ENABLED", "]", "=", "True", "try", ":", "return", "await", "GLOBAL_RUN_CONTEXT", ".", "runner", ".", "wait_all_tasks_blocked", "(", "cushion", ",", "tiebreaker", ")", "except", "AttributeError", ":", "raise", "RuntimeError", "(", "\"must be called from async context\"", ")" ]
[ 171, 0 ]
[ 233, 63 ]
python
en
['en', 'en', 'en']
True
ExpandXcodeVariables
(string, expansions)
Expands Xcode-style $(VARIABLES) in string per the expansions dict. In some rare cases, it is appropriate to expand Xcode variables when a project file is generated. For any substring $(VAR) in string, if VAR is a key in the expansions dict, $(VAR) will be replaced with expansions[VAR]. Any $(VAR) substring in string for which VAR is not a key in the expansions dict will remain in the returned string.
Expands Xcode-style $(VARIABLES) in string per the expansions dict.
def ExpandXcodeVariables(string, expansions): """Expands Xcode-style $(VARIABLES) in string per the expansions dict. In some rare cases, it is appropriate to expand Xcode variables when a project file is generated. For any substring $(VAR) in string, if VAR is a key in the expansions dict, $(VAR) will be replaced with expansions[VAR]. Any $(VAR) substring in string for which VAR is not a key in the expansions dict will remain in the returned string. """ matches = _xcode_variable_re.findall(string) if matches == None: return string matches.reverse() for match in matches: (to_replace, variable) = match if not variable in expansions: continue replacement = expansions[variable] string = re.sub(re.escape(to_replace), replacement, string) return string
[ "def", "ExpandXcodeVariables", "(", "string", ",", "expansions", ")", ":", "matches", "=", "_xcode_variable_re", ".", "findall", "(", "string", ")", "if", "matches", "==", "None", ":", "return", "string", "matches", ".", "reverse", "(", ")", "for", "match", "in", "matches", ":", "(", "to_replace", ",", "variable", ")", "=", "match", "if", "not", "variable", "in", "expansions", ":", "continue", "replacement", "=", "expansions", "[", "variable", "]", "string", "=", "re", ".", "sub", "(", "re", ".", "escape", "(", "to_replace", ")", ",", "replacement", ",", "string", ")", "return", "string" ]
[ 529, 0 ]
[ 552, 15 ]
python
en
['en', 'en', 'en']
True
EscapeXcodeDefine
(s)
We must escape the defines that we give to XCode so that it knows not to split on spaces and to respect backslash and quote literals. However, we must not quote the define, or Xcode will incorrectly intepret variables especially $(inherited).
We must escape the defines that we give to XCode so that it knows not to split on spaces and to respect backslash and quote literals. However, we must not quote the define, or Xcode will incorrectly intepret variables especially $(inherited).
def EscapeXcodeDefine(s): """We must escape the defines that we give to XCode so that it knows not to split on spaces and to respect backslash and quote literals. However, we must not quote the define, or Xcode will incorrectly intepret variables especially $(inherited).""" return re.sub(_xcode_define_re, r'\\\1', s)
[ "def", "EscapeXcodeDefine", "(", "s", ")", ":", "return", "re", ".", "sub", "(", "_xcode_define_re", ",", "r'\\\\\\1'", ",", "s", ")" ]
[ 556, 0 ]
[ 561, 45 ]
python
en
['en', 'en', 'en']
True
render_multiple_validation_result_pages_markdown
( validation_operator_result: ValidationOperatorResult, run_info_at_end: bool = True, )
Loop through and render multiple validation results to markdown. Args: validation_operator_result: (ValidationOperatorResult) Result of validation operator run run_info_at_end: move run info below expectation results Returns: string containing formatted markdown validation results
Loop through and render multiple validation results to markdown. Args: validation_operator_result: (ValidationOperatorResult) Result of validation operator run run_info_at_end: move run info below expectation results Returns: string containing formatted markdown validation results
def render_multiple_validation_result_pages_markdown( validation_operator_result: ValidationOperatorResult, run_info_at_end: bool = True, ) -> str: """ Loop through and render multiple validation results to markdown. Args: validation_operator_result: (ValidationOperatorResult) Result of validation operator run run_info_at_end: move run info below expectation results Returns: string containing formatted markdown validation results """ warnings.warn( "This 'render_multiple_validation_result_pages_markdown' function will be deprecated " "Please use ValidationResultsPageRenderer.render_validation_operator_result() instead." "E.g. to replicate the functionality of rendering a ValidationOperatorResult to markdown:" "validation_results_page_renderer = ValidationResultsPageRenderer(" " run_info_at_end=run_info_at_end" ")" "rendered_document_content_list = validation_results_page_renderer.render_validation_operator_result(" " validation_operator_result=validation_operator_result" ")" 'return " ".join(DefaultMarkdownPageView().render(rendered_document_content_list))' "Please update code accordingly.", DeprecationWarning, ) validation_results_page_renderer = ValidationResultsPageRenderer( run_info_at_end=run_info_at_end ) rendered_document_content_list = ( validation_results_page_renderer.render_validation_operator_result( validation_operator_result=validation_operator_result ) ) return " ".join(DefaultMarkdownPageView().render(rendered_document_content_list))
[ "def", "render_multiple_validation_result_pages_markdown", "(", "validation_operator_result", ":", "ValidationOperatorResult", ",", "run_info_at_end", ":", "bool", "=", "True", ",", ")", "->", "str", ":", "warnings", ".", "warn", "(", "\"This 'render_multiple_validation_result_pages_markdown' function will be deprecated \"", "\"Please use ValidationResultsPageRenderer.render_validation_operator_result() instead.\"", "\"E.g. to replicate the functionality of rendering a ValidationOperatorResult to markdown:\"", "\"validation_results_page_renderer = ValidationResultsPageRenderer(\"", "\" run_info_at_end=run_info_at_end\"", "\")\"", "\"rendered_document_content_list = validation_results_page_renderer.render_validation_operator_result(\"", "\" validation_operator_result=validation_operator_result\"", "\")\"", "'return \" \".join(DefaultMarkdownPageView().render(rendered_document_content_list))'", "\"Please update code accordingly.\"", ",", "DeprecationWarning", ",", ")", "validation_results_page_renderer", "=", "ValidationResultsPageRenderer", "(", "run_info_at_end", "=", "run_info_at_end", ")", "rendered_document_content_list", "=", "(", "validation_results_page_renderer", ".", "render_validation_operator_result", "(", "validation_operator_result", "=", "validation_operator_result", ")", ")", "return", "\" \"", ".", "join", "(", "DefaultMarkdownPageView", "(", ")", ".", "render", "(", "rendered_document_content_list", ")", ")" ]
[ 9, 0 ]
[ 46, 85 ]
python
en
['en', 'error', 'th']
False
MockClock.jump
(self, seconds)
Manually advance the clock by the given number of seconds. Args: seconds (float): the number of seconds to jump the clock forward. Raises: ValueError: if you try to pass a negative value for ``seconds``.
Manually advance the clock by the given number of seconds.
def jump(self, seconds): """Manually advance the clock by the given number of seconds. Args: seconds (float): the number of seconds to jump the clock forward. Raises: ValueError: if you try to pass a negative value for ``seconds``. """ if seconds < 0: raise ValueError("time can't go backwards") self._virtual_base += seconds
[ "def", "jump", "(", "self", ",", "seconds", ")", ":", "if", "seconds", "<", "0", ":", "raise", "ValueError", "(", "\"time can't go backwards\"", ")", "self", ".", "_virtual_base", "+=", "seconds" ]
[ 152, 4 ]
[ 164, 37 ]
python
en
['en', 'en', 'en']
True
escape_html
(text)
Escape &, <, > as well as single and double quotes for HTML.
Escape &, <, > as well as single and double quotes for HTML.
def escape_html(text): """Escape &, <, > as well as single and double quotes for HTML.""" return text.replace('&', '&amp;'). \ replace('<', '&lt;'). \ replace('>', '&gt;'). \ replace('"', '&quot;'). \ replace("'", '&#39;')
[ "def", "escape_html", "(", "text", ")", ":", "return", "text", ".", "replace", "(", "'&'", ",", "'&amp;'", ")", ".", "replace", "(", "'<'", ",", "'&lt;'", ")", ".", "replace", "(", "'>'", ",", "'&gt;'", ")", ".", "replace", "(", "'\"'", ",", "'&quot;'", ")", ".", "replace", "(", "\"'\"", ",", "'&#39;'", ")" ]
[ 17, 0 ]
[ 23, 37 ]
python
en
['en', 'en', 'en']
True
SvgFormatter.format_unencoded
(self, tokensource, outfile)
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)`` tuples and write it into ``outfile``. For our implementation we put all lines in their own 'line group'.
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)`` tuples and write it into ``outfile``.
def format_unencoded(self, tokensource, outfile): """ Format ``tokensource``, an iterable of ``(tokentype, tokenstring)`` tuples and write it into ``outfile``. For our implementation we put all lines in their own 'line group'. """ x = self.xoffset y = self.yoffset if not self.nowrap: if self.encoding: outfile.write('<?xml version="1.0" encoding="%s"?>\n' % self.encoding) else: outfile.write('<?xml version="1.0"?>\n') outfile.write('<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" ' '"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/' 'svg10.dtd">\n') outfile.write('<svg xmlns="http://www.w3.org/2000/svg">\n') outfile.write('<g font-family="%s" font-size="%s">\n' % (self.fontfamily, self.fontsize)) outfile.write('<text x="%s" y="%s" xml:space="preserve">' % (x, y)) for ttype, value in tokensource: style = self._get_style(ttype) tspan = style and '<tspan' + style + '>' or '' tspanend = tspan and '</tspan>' or '' value = escape_html(value) if self.spacehack: value = value.expandtabs().replace(' ', '&#160;') parts = value.split('\n') for part in parts[:-1]: outfile.write(tspan + part + tspanend) y += self.ystep outfile.write('</text>\n<text x="%s" y="%s" ' 'xml:space="preserve">' % (x, y)) outfile.write(tspan + parts[-1] + tspanend) outfile.write('</text>') if not self.nowrap: outfile.write('</g></svg>\n')
[ "def", "format_unencoded", "(", "self", ",", "tokensource", ",", "outfile", ")", ":", "x", "=", "self", ".", "xoffset", "y", "=", "self", ".", "yoffset", "if", "not", "self", ".", "nowrap", ":", "if", "self", ".", "encoding", ":", "outfile", ".", "write", "(", "'<?xml version=\"1.0\" encoding=\"%s\"?>\\n'", "%", "self", ".", "encoding", ")", "else", ":", "outfile", ".", "write", "(", "'<?xml version=\"1.0\"?>\\n'", ")", "outfile", ".", "write", "(", "'<!DOCTYPE svg PUBLIC \"-//W3C//DTD SVG 1.0//EN\" '", "'\"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/'", "'svg10.dtd\">\\n'", ")", "outfile", ".", "write", "(", "'<svg xmlns=\"http://www.w3.org/2000/svg\">\\n'", ")", "outfile", ".", "write", "(", "'<g font-family=\"%s\" font-size=\"%s\">\\n'", "%", "(", "self", ".", "fontfamily", ",", "self", ".", "fontsize", ")", ")", "outfile", ".", "write", "(", "'<text x=\"%s\" y=\"%s\" xml:space=\"preserve\">'", "%", "(", "x", ",", "y", ")", ")", "for", "ttype", ",", "value", "in", "tokensource", ":", "style", "=", "self", ".", "_get_style", "(", "ttype", ")", "tspan", "=", "style", "and", "'<tspan'", "+", "style", "+", "'>'", "or", "''", "tspanend", "=", "tspan", "and", "'</tspan>'", "or", "''", "value", "=", "escape_html", "(", "value", ")", "if", "self", ".", "spacehack", ":", "value", "=", "value", ".", "expandtabs", "(", ")", ".", "replace", "(", "' '", ",", "'&#160;'", ")", "parts", "=", "value", ".", "split", "(", "'\\n'", ")", "for", "part", "in", "parts", "[", ":", "-", "1", "]", ":", "outfile", ".", "write", "(", "tspan", "+", "part", "+", "tspanend", ")", "y", "+=", "self", ".", "ystep", "outfile", ".", "write", "(", "'</text>\\n<text x=\"%s\" y=\"%s\" '", "'xml:space=\"preserve\">'", "%", "(", "x", ",", "y", ")", ")", "outfile", ".", "write", "(", "tspan", "+", "parts", "[", "-", "1", "]", "+", "tspanend", ")", "outfile", ".", "write", "(", "'</text>'", ")", "if", "not", "self", ".", "nowrap", ":", "outfile", ".", "write", "(", "'</g></svg>\\n'", ")" ]
[ 96, 4 ]
[ 135, 41 ]
python
en
['en', 'error', 'th']
False
ColumnStandardDeviation._pandas
(cls, column, **kwargs)
Pandas Standard Deviation implementation
Pandas Standard Deviation implementation
def _pandas(cls, column, **kwargs): """Pandas Standard Deviation implementation""" return column.std()
[ "def", "_pandas", "(", "cls", ",", "column", ",", "*", "*", "kwargs", ")", ":", "return", "column", ".", "std", "(", ")" ]
[ 34, 4 ]
[ 36, 27 ]
python
en
['pt', 'fr', 'en']
False
ColumnStandardDeviation._sqlalchemy
(cls, column, _dialect, **kwargs)
SqlAlchemy Standard Deviation implementation
SqlAlchemy Standard Deviation implementation
def _sqlalchemy(cls, column, _dialect, **kwargs): """SqlAlchemy Standard Deviation implementation""" if _dialect.name.lower() == "mssql": standard_deviation = sa.func.stdev(column) else: standard_deviation = sa.func.stddev_samp(column) return standard_deviation
[ "def", "_sqlalchemy", "(", "cls", ",", "column", ",", "_dialect", ",", "*", "*", "kwargs", ")", ":", "if", "_dialect", ".", "name", ".", "lower", "(", ")", "==", "\"mssql\"", ":", "standard_deviation", "=", "sa", ".", "func", ".", "stdev", "(", "column", ")", "else", ":", "standard_deviation", "=", "sa", ".", "func", ".", "stddev_samp", "(", "column", ")", "return", "standard_deviation" ]
[ 39, 4 ]
[ 45, 33 ]
python
en
['en', 'de', 'en']
True
ColumnStandardDeviation._spark
(cls, column, **kwargs)
Spark Standard Deviation implementation
Spark Standard Deviation implementation
def _spark(cls, column, **kwargs): """Spark Standard Deviation implementation""" return F.stddev_samp(column)
[ "def", "_spark", "(", "cls", ",", "column", ",", "*", "*", "kwargs", ")", ":", "return", "F", ".", "stddev_samp", "(", "column", ")" ]
[ 48, 4 ]
[ 50, 36 ]
python
en
['en', 'da', 'en']
True
build_anomaly_intervals
(X, y, time_column, severity=True, indices=False)
Group together consecutive anomalous samples in anomaly intervals. This is a dummy boundary detection function that groups together samples that have been consecutively flagged as anomalous and returns boundaries of anomalous intervals. Optionally, it computes the average severity of each interval. This detector is here only to serve as reference of what an boundary detection primitive looks like, and is not intended to be used in real scenarios.
Group together consecutive anomalous samples in anomaly intervals.
def build_anomaly_intervals(X, y, time_column, severity=True, indices=False): """Group together consecutive anomalous samples in anomaly intervals. This is a dummy boundary detection function that groups together samples that have been consecutively flagged as anomalous and returns boundaries of anomalous intervals. Optionally, it computes the average severity of each interval. This detector is here only to serve as reference of what an boundary detection primitive looks like, and is not intended to be used in real scenarios. """ timestamps = X[time_column] start = None start_ts = None intervals = list() values = list() for index, (value, timestamp) in enumerate(zip(y, timestamps)): if value != 0: if start_ts is None: start = index start_ts = timestamp if severity: values.append(value) elif start_ts is not None: interval = [start_ts, timestamp] if indices: interval.extend([start, index]) if severity: interval.append(np.mean(values)) values = list() intervals.append(tuple(interval)) start = None start_ts = None # We might have an open interval at the end if start_ts is not None: interval = [start_ts, timestamp] if indices: interval.extend([start, index]) if severity: interval.append(np.mean(values)) intervals.append(tuple(interval)) return np.array(intervals)
[ "def", "build_anomaly_intervals", "(", "X", ",", "y", ",", "time_column", ",", "severity", "=", "True", ",", "indices", "=", "False", ")", ":", "timestamps", "=", "X", "[", "time_column", "]", "start", "=", "None", "start_ts", "=", "None", "intervals", "=", "list", "(", ")", "values", "=", "list", "(", ")", "for", "index", ",", "(", "value", ",", "timestamp", ")", "in", "enumerate", "(", "zip", "(", "y", ",", "timestamps", ")", ")", ":", "if", "value", "!=", "0", ":", "if", "start_ts", "is", "None", ":", "start", "=", "index", "start_ts", "=", "timestamp", "if", "severity", ":", "values", ".", "append", "(", "value", ")", "elif", "start_ts", "is", "not", "None", ":", "interval", "=", "[", "start_ts", ",", "timestamp", "]", "if", "indices", ":", "interval", ".", "extend", "(", "[", "start", ",", "index", "]", ")", "if", "severity", ":", "interval", ".", "append", "(", "np", ".", "mean", "(", "values", ")", ")", "values", "=", "list", "(", ")", "intervals", ".", "append", "(", "tuple", "(", "interval", ")", ")", "start", "=", "None", "start_ts", "=", "None", "# We might have an open interval at the end", "if", "start_ts", "is", "not", "None", ":", "interval", "=", "[", "start_ts", ",", "timestamp", "]", "if", "indices", ":", "interval", ".", "extend", "(", "[", "start", ",", "index", "]", ")", "if", "severity", ":", "interval", ".", "append", "(", "np", ".", "mean", "(", "values", ")", ")", "intervals", ".", "append", "(", "tuple", "(", "interval", ")", ")", "return", "np", ".", "array", "(", "intervals", ")" ]
[ 3, 0 ]
[ 54, 30 ]
python
en
['en', 'en', 'en']
True