Search is not available for this dataset
identifier
stringlengths
1
155
parameters
stringlengths
2
6.09k
docstring
stringlengths
11
63.4k
docstring_summary
stringlengths
0
63.4k
function
stringlengths
29
99.8k
function_tokens
sequence
start_point
sequence
end_point
sequence
language
stringclasses
1 value
docstring_language
stringlengths
2
7
docstring_language_predictions
stringlengths
18
23
is_langid_reliable
stringclasses
2 values
CacheKeyLock.set_result
(self, value: Any, ttl: int = None)
Set the result, updating the cache and any waiters.
Set the result, updating the cache and any waiters.
async def set_result(self, value: Any, ttl: int = None): """Set the result, updating the cache and any waiters.""" if self.done and value: raise CacheError("Result already set") self._future.set_result(value) if not self._parent or self._parent.done: await self.cache.set(self.key, value, ttl)
[ "async", "def", "set_result", "(", "self", ",", "value", ":", "Any", ",", "ttl", ":", "int", "=", "None", ")", ":", "if", "self", ".", "done", "and", "value", ":", "raise", "CacheError", "(", "\"Result already set\"", ")", "self", ".", "_future", ".", "set_result", "(", "value", ")", "if", "not", "self", ".", "_parent", "or", "self", ".", "_parent", ".", "done", ":", "await", "self", ".", "cache", ".", "set", "(", "self", ".", "key", ",", "value", ",", "ttl", ")" ]
[ 127, 4 ]
[ 133, 54 ]
python
en
['en', 'en', 'en']
True
CacheKeyLock.__await__
(self)
Wait for a result to be produced.
Wait for a result to be produced.
def __await__(self): """Wait for a result to be produced.""" return (yield from self._future)
[ "def", "__await__", "(", "self", ")", ":", "return", "(", "yield", "from", "self", ".", "_future", ")" ]
[ 135, 4 ]
[ 137, 40 ]
python
en
['en', 'en', 'en']
True
CacheKeyLock.__aenter__
(self)
Async context manager entry.
Async context manager entry.
async def __aenter__(self): """Async context manager entry.""" result = None if self.parent: result = await self.parent if result: await self # wait for parent's done handler to complete if not result: found = await self.cache.get(self.key) if found: self._future.set_result(found) return self
[ "async", "def", "__aenter__", "(", "self", ")", ":", "result", "=", "None", "if", "self", ".", "parent", ":", "result", "=", "await", "self", ".", "parent", "if", "result", ":", "await", "self", "# wait for parent's done handler to complete", "if", "not", "result", ":", "found", "=", "await", "self", ".", "cache", ".", "get", "(", "self", ".", "key", ")", "if", "found", ":", "self", ".", "_future", ".", "set_result", "(", "found", ")", "return", "self" ]
[ 139, 4 ]
[ 150, 19 ]
python
en
['fr', 'gl', 'en']
False
CacheKeyLock.release
(self)
Release the cache lock.
Release the cache lock.
def release(self): """Release the cache lock.""" if not self.parent and not self.released: self.cache.release(self.key) self.released = True
[ "def", "release", "(", "self", ")", ":", "if", "not", "self", ".", "parent", "and", "not", "self", ".", "released", ":", "self", ".", "cache", ".", "release", "(", "self", ".", "key", ")", "self", ".", "released", "=", "True" ]
[ 152, 4 ]
[ 156, 32 ]
python
en
['en', 'it', 'en']
True
CacheKeyLock.__aexit__
(self, exc_type, exc_val, exc_tb)
Async context manager exit. `None` is returned to any waiters if no value is produced.
Async context manager exit.
async def __aexit__(self, exc_type, exc_val, exc_tb): """ Async context manager exit. `None` is returned to any waiters if no value is produced. """ if exc_val: self.exception = exc_val if not self.done: self._future.set_result(None) self.release()
[ "async", "def", "__aexit__", "(", "self", ",", "exc_type", ",", "exc_val", ",", "exc_tb", ")", ":", "if", "exc_val", ":", "self", ".", "exception", "=", "exc_val", "if", "not", "self", ".", "done", ":", "self", ".", "_future", ".", "set_result", "(", "None", ")", "self", ".", "release", "(", ")" ]
[ 158, 4 ]
[ 168, 22 ]
python
en
['en', 'error', 'th']
False
CacheKeyLock.__del__
(self)
Handle deletion.
Handle deletion.
def __del__(self): """Handle deletion.""" self.release()
[ "def", "__del__", "(", "self", ")", ":", "self", ".", "release", "(", ")" ]
[ 170, 4 ]
[ 172, 22 ]
python
en
['it', 'es', 'en']
False
parse_ansi
(string, strip_ansi=False, parser=ANSI_PARSER, xterm256=False, mxp=False)
Parses a string, subbing color codes as needed. Args: string (str): The string to parse. strip_ansi (bool, optional): Strip all ANSI sequences. parser (ansi.AnsiParser, optional): A parser instance to use. xterm256 (bool, optional): Support xterm256 or not. mxp (bool, optional): Support MXP markup or not. Returns: string (str): The parsed string.
Parses a string, subbing color codes as needed.
def parse_ansi(string, strip_ansi=False, parser=ANSI_PARSER, xterm256=False, mxp=False): """ Parses a string, subbing color codes as needed. Args: string (str): The string to parse. strip_ansi (bool, optional): Strip all ANSI sequences. parser (ansi.AnsiParser, optional): A parser instance to use. xterm256 (bool, optional): Support xterm256 or not. mxp (bool, optional): Support MXP markup or not. Returns: string (str): The parsed string. """ return parser.parse_ansi(string, strip_ansi=strip_ansi, xterm256=xterm256, mxp=mxp)
[ "def", "parse_ansi", "(", "string", ",", "strip_ansi", "=", "False", ",", "parser", "=", "ANSI_PARSER", ",", "xterm256", "=", "False", ",", "mxp", "=", "False", ")", ":", "return", "parser", ".", "parse_ansi", "(", "string", ",", "strip_ansi", "=", "strip_ansi", ",", "xterm256", "=", "xterm256", ",", "mxp", "=", "mxp", ")" ]
[ 468, 0 ]
[ 483, 87 ]
python
en
['en', 'error', 'th']
False
strip_ansi
(string, parser=ANSI_PARSER)
Strip all ansi from the string. This handles the Evennia-specific markup. Args: string (str): The string to strip. parser (ansi.AnsiParser, optional): The parser to use. Returns: string (str): The stripped string.
Strip all ansi from the string. This handles the Evennia-specific markup.
def strip_ansi(string, parser=ANSI_PARSER): """ Strip all ansi from the string. This handles the Evennia-specific markup. Args: string (str): The string to strip. parser (ansi.AnsiParser, optional): The parser to use. Returns: string (str): The stripped string. """ return parser.parse_ansi(string, strip_ansi=True)
[ "def", "strip_ansi", "(", "string", ",", "parser", "=", "ANSI_PARSER", ")", ":", "return", "parser", ".", "parse_ansi", "(", "string", ",", "strip_ansi", "=", "True", ")" ]
[ 486, 0 ]
[ 499, 53 ]
python
en
['en', 'error', 'th']
False
strip_raw_ansi
(string, parser=ANSI_PARSER)
Remove raw ansi codes from string. This assumes pure ANSI-bytecodes in the string. Args: string (str): The string to parse. parser (bool, optional): The parser to use. Returns: string (str): the stripped string.
Remove raw ansi codes from string. This assumes pure ANSI-bytecodes in the string.
def strip_raw_ansi(string, parser=ANSI_PARSER): """ Remove raw ansi codes from string. This assumes pure ANSI-bytecodes in the string. Args: string (str): The string to parse. parser (bool, optional): The parser to use. Returns: string (str): the stripped string. """ return parser.strip_raw_codes(string)
[ "def", "strip_raw_ansi", "(", "string", ",", "parser", "=", "ANSI_PARSER", ")", ":", "return", "parser", ".", "strip_raw_codes", "(", "string", ")" ]
[ 502, 0 ]
[ 515, 41 ]
python
en
['en', 'error', 'th']
False
raw
(string)
Escapes a string into a form which won't be colorized by the ansi parser. Returns: string (str): The raw, escaped string.
Escapes a string into a form which won't be colorized by the ansi parser.
def raw(string): """ Escapes a string into a form which won't be colorized by the ansi parser. Returns: string (str): The raw, escaped string. """ return string.replace('{', '{{').replace('|', '||')
[ "def", "raw", "(", "string", ")", ":", "return", "string", ".", "replace", "(", "'{'", ",", "'{{'", ")", ".", "replace", "(", "'|'", ",", "'||'", ")" ]
[ 518, 0 ]
[ 527, 55 ]
python
en
['en', 'error', 'th']
False
_spacing_preflight
(func)
This wrapper function is used to do some preflight checks on functions used for padding ANSIStrings.
This wrapper function is used to do some preflight checks on functions used for padding ANSIStrings.
def _spacing_preflight(func): """ This wrapper function is used to do some preflight checks on functions used for padding ANSIStrings. """ def wrapped(self, width, fillchar=None): if fillchar is None: fillchar = " " if (len(fillchar) != 1) or (not isinstance(fillchar, basestring)): raise TypeError("must be char, not %s" % type(fillchar)) if not isinstance(width, int): raise TypeError("integer argument expected, got %s" % type(width)) _difference = width - len(self) if _difference <= 0: return self return func(self, width, fillchar, _difference) return wrapped
[ "def", "_spacing_preflight", "(", "func", ")", ":", "def", "wrapped", "(", "self", ",", "width", ",", "fillchar", "=", "None", ")", ":", "if", "fillchar", "is", "None", ":", "fillchar", "=", "\" \"", "if", "(", "len", "(", "fillchar", ")", "!=", "1", ")", "or", "(", "not", "isinstance", "(", "fillchar", ",", "basestring", ")", ")", ":", "raise", "TypeError", "(", "\"must be char, not %s\"", "%", "type", "(", "fillchar", ")", ")", "if", "not", "isinstance", "(", "width", ",", "int", ")", ":", "raise", "TypeError", "(", "\"integer argument expected, got %s\"", "%", "type", "(", "width", ")", ")", "_difference", "=", "width", "-", "len", "(", "self", ")", "if", "_difference", "<=", "0", ":", "return", "self", "return", "func", "(", "self", ",", "width", ",", "fillchar", ",", "_difference", ")", "return", "wrapped" ]
[ 530, 0 ]
[ 548, 18 ]
python
en
['en', 'error', 'th']
False
_query_super
(func_name)
Have the string class handle this with the cleaned string instead of ANSIString.
Have the string class handle this with the cleaned string instead of ANSIString.
def _query_super(func_name): """ Have the string class handle this with the cleaned string instead of ANSIString. """ def wrapped(self, *args, **kwargs): return getattr(self.clean(), func_name)(*args, **kwargs) return wrapped
[ "def", "_query_super", "(", "func_name", ")", ":", "def", "wrapped", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "getattr", "(", "self", ".", "clean", "(", ")", ",", "func_name", ")", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapped" ]
[ 551, 0 ]
[ 560, 18 ]
python
en
['en', 'error', 'th']
False
_on_raw
(func_name)
Like query_super, but makes the operation run on the raw string.
Like query_super, but makes the operation run on the raw string.
def _on_raw(func_name): """ Like query_super, but makes the operation run on the raw string. """ def wrapped(self, *args, **kwargs): args = list(args) try: string = args.pop(0) if hasattr(string, '_raw_string'): args.insert(0, string.raw()) else: args.insert(0, string) except IndexError: # just skip out if there are no more strings pass result = getattr(self._raw_string, func_name)(*args, **kwargs) if isinstance(result, basestring): return ANSIString(result, decoded=True) return result return wrapped
[ "def", "_on_raw", "(", "func_name", ")", ":", "def", "wrapped", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "args", "=", "list", "(", "args", ")", "try", ":", "string", "=", "args", ".", "pop", "(", "0", ")", "if", "hasattr", "(", "string", ",", "'_raw_string'", ")", ":", "args", ".", "insert", "(", "0", ",", "string", ".", "raw", "(", ")", ")", "else", ":", "args", ".", "insert", "(", "0", ",", "string", ")", "except", "IndexError", ":", "# just skip out if there are no more strings", "pass", "result", "=", "getattr", "(", "self", ".", "_raw_string", ",", "func_name", ")", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "isinstance", "(", "result", ",", "basestring", ")", ":", "return", "ANSIString", "(", "result", ",", "decoded", "=", "True", ")", "return", "result", "return", "wrapped" ]
[ 563, 0 ]
[ 584, 18 ]
python
en
['en', 'error', 'th']
False
_transform
(func_name)
Some string functions, like those manipulating capital letters, return a string the same length as the original. This function allows us to do the same, replacing all the non-coded characters with the resulting string.
Some string functions, like those manipulating capital letters, return a string the same length as the original. This function allows us to do the same, replacing all the non-coded characters with the resulting string.
def _transform(func_name): """ Some string functions, like those manipulating capital letters, return a string the same length as the original. This function allows us to do the same, replacing all the non-coded characters with the resulting string. """ def wrapped(self, *args, **kwargs): replacement_string = _query_super(func_name)(self, *args, **kwargs) to_string = [] char_counter = 0 for index in range(0, len(self._raw_string)): if index in self._code_indexes: to_string.append(self._raw_string[index]) elif index in self._char_indexes: to_string.append(replacement_string[char_counter]) char_counter += 1 return ANSIString( ''.join(to_string), decoded=True, code_indexes=self._code_indexes, char_indexes=self._char_indexes, clean_string=replacement_string) return wrapped
[ "def", "_transform", "(", "func_name", ")", ":", "def", "wrapped", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "replacement_string", "=", "_query_super", "(", "func_name", ")", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "to_string", "=", "[", "]", "char_counter", "=", "0", "for", "index", "in", "range", "(", "0", ",", "len", "(", "self", ".", "_raw_string", ")", ")", ":", "if", "index", "in", "self", ".", "_code_indexes", ":", "to_string", ".", "append", "(", "self", ".", "_raw_string", "[", "index", "]", ")", "elif", "index", "in", "self", ".", "_char_indexes", ":", "to_string", ".", "append", "(", "replacement_string", "[", "char_counter", "]", ")", "char_counter", "+=", "1", "return", "ANSIString", "(", "''", ".", "join", "(", "to_string", ")", ",", "decoded", "=", "True", ",", "code_indexes", "=", "self", ".", "_code_indexes", ",", "char_indexes", "=", "self", ".", "_char_indexes", ",", "clean_string", "=", "replacement_string", ")", "return", "wrapped" ]
[ 587, 0 ]
[ 610, 18 ]
python
en
['en', 'error', 'th']
False
ANSIParser.sub_ansi
(self, ansimatch)
Replacer used by `re.sub` to replace ANSI markers with correct ANSI sequences Args: ansimatch (re.matchobject): The match. Returns: processed (str): The processed match string.
Replacer used by `re.sub` to replace ANSI markers with correct ANSI sequences
def sub_ansi(self, ansimatch): """ Replacer used by `re.sub` to replace ANSI markers with correct ANSI sequences Args: ansimatch (re.matchobject): The match. Returns: processed (str): The processed match string. """ return self.ansi_map_dict.get(ansimatch.group(), "")
[ "def", "sub_ansi", "(", "self", ",", "ansimatch", ")", ":", "return", "self", ".", "ansi_map_dict", ".", "get", "(", "ansimatch", ".", "group", "(", ")", ",", "\"\"", ")" ]
[ 210, 4 ]
[ 222, 60 ]
python
en
['en', 'error', 'th']
False
ANSIParser.sub_brightbg
(self, ansimatch)
Replacer used by `re.sub` to replace ANSI bright background markers with Xterm256 replacement Args: ansimatch (re.matchobject): The match. Returns: processed (str): The processed match string.
Replacer used by `re.sub` to replace ANSI bright background markers with Xterm256 replacement
def sub_brightbg(self, ansimatch): """ Replacer used by `re.sub` to replace ANSI bright background markers with Xterm256 replacement Args: ansimatch (re.matchobject): The match. Returns: processed (str): The processed match string. """ return self.ansi_xterm256_bright_bg_map_dict.get(ansimatch.group(), "")
[ "def", "sub_brightbg", "(", "self", ",", "ansimatch", ")", ":", "return", "self", ".", "ansi_xterm256_bright_bg_map_dict", ".", "get", "(", "ansimatch", ".", "group", "(", ")", ",", "\"\"", ")" ]
[ 224, 4 ]
[ 236, 79 ]
python
en
['en', 'error', 'th']
False
ANSIParser.sub_xterm256
(self, rgbmatch, use_xterm256=False, color_type="fg")
This is a replacer method called by `re.sub` with the matched tag. It must return the correct ansi sequence. It checks `self.do_xterm256` to determine if conversion to standard ANSI should be done or not. Args: rgbmatch (re.matchobject): The match. use_xterm256 (bool, optional): Don't convert 256-colors to 16. color_type (str): One of 'fg', 'bg', 'gfg', 'gbg'. Returns: processed (str): The processed match string.
This is a replacer method called by `re.sub` with the matched tag. It must return the correct ansi sequence.
def sub_xterm256(self, rgbmatch, use_xterm256=False, color_type="fg"): """ This is a replacer method called by `re.sub` with the matched tag. It must return the correct ansi sequence. It checks `self.do_xterm256` to determine if conversion to standard ANSI should be done or not. Args: rgbmatch (re.matchobject): The match. use_xterm256 (bool, optional): Don't convert 256-colors to 16. color_type (str): One of 'fg', 'bg', 'gfg', 'gbg'. Returns: processed (str): The processed match string. """ if not rgbmatch: return "" # get tag, stripping the initial marker #rgbtag = rgbmatch.group()[1:] background = color_type in ("bg", "gbg") grayscale = color_type in ("gfg", "gbg") if not grayscale: # 6x6x6 color-cube (xterm indexes 16-231) try: red, green, blue = [int(val) for val in rgbmatch.groups() if val is not None] except (IndexError, ValueError): logger.log_trace() return rgbmatch.group(0) else: # grayscale values (xterm indexes 0, 232-255, 15) for full spectrum try: letter = [val for val in rgbmatch.groups() if val is not None][0] except IndexError: logger.log_trace() return rgbmatch.group(0) if letter == 'a': colval = 16 # pure black @ index 16 (first color cube entry) elif letter == 'z': colval = 231 # pure white @ index 231 (last color cube entry) else: # letter in range [b..y] (exactly 24 values!) colval = 134 + ord(letter) # ansi fallback logic expects r,g,b values in [0..5] range gray = (ord(letter) - 97) / 5.0 red, green, blue = gray, gray, gray if use_xterm256: if not grayscale: colval = 16 + (red * 36) + (green * 6) + blue return "\033[%s8;5;%sm" % (3 + int(background), colval) # replaced since some clients (like Potato) does not accept codes with leading zeroes, see issue #1024. # return "\033[%s8;5;%s%s%sm" % (3 + int(background), colval // 100, (colval % 100) // 10, colval%10) else: # xterm256 not supported, convert the rgb value to ansi instead if red == green == blue and red < 3: if background: return ANSI_BACK_BLACK elif red >= 1: return ANSI_HILITE + ANSI_BLACK else: return ANSI_NORMAL + ANSI_BLACK elif red == green == blue: if background: return ANSI_BACK_WHITE elif red >= 4: return ANSI_HILITE + ANSI_WHITE else: return ANSI_NORMAL + ANSI_WHITE elif red > green and red > blue: if background: return ANSI_BACK_RED elif red >= 3: return ANSI_HILITE + ANSI_RED else: return ANSI_NORMAL + ANSI_RED elif red == green and red > blue: if background: return ANSI_BACK_YELLOW elif red >= 3: return ANSI_HILITE + ANSI_YELLOW else: return ANSI_NORMAL + ANSI_YELLOW elif red == blue and red > green: if background: return ANSI_BACK_MAGENTA elif red >= 3: return ANSI_HILITE + ANSI_MAGENTA else: return ANSI_NORMAL + ANSI_MAGENTA elif green > blue: if background: return ANSI_BACK_GREEN elif green >= 3: return ANSI_HILITE + ANSI_GREEN else: return ANSI_NORMAL + ANSI_GREEN elif green == blue: if background: return ANSI_BACK_CYAN elif green >= 3: return ANSI_HILITE + ANSI_CYAN else: return ANSI_NORMAL + ANSI_CYAN else: # mostly blue if background: return ANSI_BACK_BLUE elif blue >= 3: return ANSI_HILITE + ANSI_BLUE else: return ANSI_NORMAL + ANSI_BLUE
[ "def", "sub_xterm256", "(", "self", ",", "rgbmatch", ",", "use_xterm256", "=", "False", ",", "color_type", "=", "\"fg\"", ")", ":", "if", "not", "rgbmatch", ":", "return", "\"\"", "# get tag, stripping the initial marker", "#rgbtag = rgbmatch.group()[1:]", "background", "=", "color_type", "in", "(", "\"bg\"", ",", "\"gbg\"", ")", "grayscale", "=", "color_type", "in", "(", "\"gfg\"", ",", "\"gbg\"", ")", "if", "not", "grayscale", ":", "# 6x6x6 color-cube (xterm indexes 16-231)", "try", ":", "red", ",", "green", ",", "blue", "=", "[", "int", "(", "val", ")", "for", "val", "in", "rgbmatch", ".", "groups", "(", ")", "if", "val", "is", "not", "None", "]", "except", "(", "IndexError", ",", "ValueError", ")", ":", "logger", ".", "log_trace", "(", ")", "return", "rgbmatch", ".", "group", "(", "0", ")", "else", ":", "# grayscale values (xterm indexes 0, 232-255, 15) for full spectrum", "try", ":", "letter", "=", "[", "val", "for", "val", "in", "rgbmatch", ".", "groups", "(", ")", "if", "val", "is", "not", "None", "]", "[", "0", "]", "except", "IndexError", ":", "logger", ".", "log_trace", "(", ")", "return", "rgbmatch", ".", "group", "(", "0", ")", "if", "letter", "==", "'a'", ":", "colval", "=", "16", "# pure black @ index 16 (first color cube entry)", "elif", "letter", "==", "'z'", ":", "colval", "=", "231", "# pure white @ index 231 (last color cube entry)", "else", ":", "# letter in range [b..y] (exactly 24 values!)", "colval", "=", "134", "+", "ord", "(", "letter", ")", "# ansi fallback logic expects r,g,b values in [0..5] range", "gray", "=", "(", "ord", "(", "letter", ")", "-", "97", ")", "/", "5.0", "red", ",", "green", ",", "blue", "=", "gray", ",", "gray", ",", "gray", "if", "use_xterm256", ":", "if", "not", "grayscale", ":", "colval", "=", "16", "+", "(", "red", "*", "36", ")", "+", "(", "green", "*", "6", ")", "+", "blue", "return", "\"\\033[%s8;5;%sm\"", "%", "(", "3", "+", "int", "(", "background", ")", ",", "colval", ")", "# replaced since some clients (like Potato) does not accept codes with leading zeroes, see issue #1024.", "# return \"\\033[%s8;5;%s%s%sm\" % (3 + int(background), colval // 100, (colval % 100) // 10, colval%10)", "else", ":", "# xterm256 not supported, convert the rgb value to ansi instead", "if", "red", "==", "green", "==", "blue", "and", "red", "<", "3", ":", "if", "background", ":", "return", "ANSI_BACK_BLACK", "elif", "red", ">=", "1", ":", "return", "ANSI_HILITE", "+", "ANSI_BLACK", "else", ":", "return", "ANSI_NORMAL", "+", "ANSI_BLACK", "elif", "red", "==", "green", "==", "blue", ":", "if", "background", ":", "return", "ANSI_BACK_WHITE", "elif", "red", ">=", "4", ":", "return", "ANSI_HILITE", "+", "ANSI_WHITE", "else", ":", "return", "ANSI_NORMAL", "+", "ANSI_WHITE", "elif", "red", ">", "green", "and", "red", ">", "blue", ":", "if", "background", ":", "return", "ANSI_BACK_RED", "elif", "red", ">=", "3", ":", "return", "ANSI_HILITE", "+", "ANSI_RED", "else", ":", "return", "ANSI_NORMAL", "+", "ANSI_RED", "elif", "red", "==", "green", "and", "red", ">", "blue", ":", "if", "background", ":", "return", "ANSI_BACK_YELLOW", "elif", "red", ">=", "3", ":", "return", "ANSI_HILITE", "+", "ANSI_YELLOW", "else", ":", "return", "ANSI_NORMAL", "+", "ANSI_YELLOW", "elif", "red", "==", "blue", "and", "red", ">", "green", ":", "if", "background", ":", "return", "ANSI_BACK_MAGENTA", "elif", "red", ">=", "3", ":", "return", "ANSI_HILITE", "+", "ANSI_MAGENTA", "else", ":", "return", "ANSI_NORMAL", "+", "ANSI_MAGENTA", "elif", "green", ">", "blue", ":", "if", "background", ":", "return", "ANSI_BACK_GREEN", "elif", "green", ">=", "3", ":", "return", "ANSI_HILITE", "+", "ANSI_GREEN", "else", ":", "return", "ANSI_NORMAL", "+", "ANSI_GREEN", "elif", "green", "==", "blue", ":", "if", "background", ":", "return", "ANSI_BACK_CYAN", "elif", "green", ">=", "3", ":", "return", "ANSI_HILITE", "+", "ANSI_CYAN", "else", ":", "return", "ANSI_NORMAL", "+", "ANSI_CYAN", "else", ":", "# mostly blue", "if", "background", ":", "return", "ANSI_BACK_BLUE", "elif", "blue", ">=", "3", ":", "return", "ANSI_HILITE", "+", "ANSI_BLUE", "else", ":", "return", "ANSI_NORMAL", "+", "ANSI_BLUE" ]
[ 238, 4 ]
[ 357, 50 ]
python
en
['en', 'error', 'th']
False
ANSIParser.strip_raw_codes
(self, string)
Strips raw ANSI codes from a string. Args: string (str): The string to strip. Returns: string (str): The processed string.
Strips raw ANSI codes from a string.
def strip_raw_codes(self, string): """ Strips raw ANSI codes from a string. Args: string (str): The string to strip. Returns: string (str): The processed string. """ return self.ansi_regex.sub("", string)
[ "def", "strip_raw_codes", "(", "self", ",", "string", ")", ":", "return", "self", ".", "ansi_regex", ".", "sub", "(", "\"\"", ",", "string", ")" ]
[ 359, 4 ]
[ 370, 46 ]
python
en
['en', 'error', 'th']
False
ANSIParser.strip_mxp
(self, string)
Strips all MXP codes from a string. Args: string (str): The string to strip. Returns: string (str): The processed string.
Strips all MXP codes from a string.
def strip_mxp(self, string): """ Strips all MXP codes from a string. Args: string (str): The string to strip. Returns: string (str): The processed string. """ return self.mxp_sub.sub(r'\2', string)
[ "def", "strip_mxp", "(", "self", ",", "string", ")", ":", "return", "self", ".", "mxp_sub", ".", "sub", "(", "r'\\2'", ",", "string", ")" ]
[ 372, 4 ]
[ 383, 46 ]
python
en
['en', 'error', 'th']
False
ANSIParser.parse_ansi
(self, string, strip_ansi=False, xterm256=False, mxp=False)
Parses a string, subbing color codes according to the stored mapping. Args: string (str): The string to parse. strip_ansi (boolean, optional): Strip all found ansi markup. xterm256 (boolean, optional): If actually using xterm256 or if these values should be converted to 16-color ANSI. mxp (boolean, optional): Parse MXP commands in string. Returns: string (str): The parsed string.
Parses a string, subbing color codes according to the stored mapping.
def parse_ansi(self, string, strip_ansi=False, xterm256=False, mxp=False): """ Parses a string, subbing color codes according to the stored mapping. Args: string (str): The string to parse. strip_ansi (boolean, optional): Strip all found ansi markup. xterm256 (boolean, optional): If actually using xterm256 or if these values should be converted to 16-color ANSI. mxp (boolean, optional): Parse MXP commands in string. Returns: string (str): The parsed string. """ if hasattr(string, '_raw_string'): if strip_ansi: return string.clean() else: return string.raw() if not string: return '' # check cached parsings global _PARSE_CACHE cachekey = "%s-%s-%s-%s" % (string, strip_ansi, xterm256, mxp) if cachekey in _PARSE_CACHE: return _PARSE_CACHE[cachekey] # pre-convert bright colors to xterm256 color tags string = self.brightbg_sub.sub(self.sub_brightbg, string) def do_xterm256_fg(part): return self.sub_xterm256(part, xterm256, "fg") def do_xterm256_bg(part): return self.sub_xterm256(part, xterm256, "bg") def do_xterm256_gfg(part): return self.sub_xterm256(part, xterm256, "gfg") def do_xterm256_gbg(part): return self.sub_xterm256(part, xterm256, "gbg") in_string = utils.to_str(string) # do string replacement parsed_string = [] parts = self.ansi_escapes.split(in_string) + [" "] for part, sep in zip(parts[::2], parts[1::2]): pstring = self.xterm256_fg_sub.sub(do_xterm256_fg, part) pstring = self.xterm256_bg_sub.sub(do_xterm256_bg, pstring) pstring = self.xterm256_gfg_sub.sub(do_xterm256_gfg, pstring) pstring = self.xterm256_gbg_sub.sub(do_xterm256_gbg, pstring) pstring = self.ansi_sub.sub(self.sub_ansi, pstring) parsed_string.append("%s%s" % (pstring, sep[0].strip())) parsed_string = "".join(parsed_string) if not mxp: parsed_string = self.strip_mxp(parsed_string) if strip_ansi: # remove all ansi codes (including those manually # inserted in string) return self.strip_raw_codes(parsed_string) # cache and crop old cache _PARSE_CACHE[cachekey] = parsed_string if len(_PARSE_CACHE) > _PARSE_CACHE_SIZE: _PARSE_CACHE.popitem(last=False) return parsed_string
[ "def", "parse_ansi", "(", "self", ",", "string", ",", "strip_ansi", "=", "False", ",", "xterm256", "=", "False", ",", "mxp", "=", "False", ")", ":", "if", "hasattr", "(", "string", ",", "'_raw_string'", ")", ":", "if", "strip_ansi", ":", "return", "string", ".", "clean", "(", ")", "else", ":", "return", "string", ".", "raw", "(", ")", "if", "not", "string", ":", "return", "''", "# check cached parsings", "global", "_PARSE_CACHE", "cachekey", "=", "\"%s-%s-%s-%s\"", "%", "(", "string", ",", "strip_ansi", ",", "xterm256", ",", "mxp", ")", "if", "cachekey", "in", "_PARSE_CACHE", ":", "return", "_PARSE_CACHE", "[", "cachekey", "]", "# pre-convert bright colors to xterm256 color tags", "string", "=", "self", ".", "brightbg_sub", ".", "sub", "(", "self", ".", "sub_brightbg", ",", "string", ")", "def", "do_xterm256_fg", "(", "part", ")", ":", "return", "self", ".", "sub_xterm256", "(", "part", ",", "xterm256", ",", "\"fg\"", ")", "def", "do_xterm256_bg", "(", "part", ")", ":", "return", "self", ".", "sub_xterm256", "(", "part", ",", "xterm256", ",", "\"bg\"", ")", "def", "do_xterm256_gfg", "(", "part", ")", ":", "return", "self", ".", "sub_xterm256", "(", "part", ",", "xterm256", ",", "\"gfg\"", ")", "def", "do_xterm256_gbg", "(", "part", ")", ":", "return", "self", ".", "sub_xterm256", "(", "part", ",", "xterm256", ",", "\"gbg\"", ")", "in_string", "=", "utils", ".", "to_str", "(", "string", ")", "# do string replacement", "parsed_string", "=", "[", "]", "parts", "=", "self", ".", "ansi_escapes", ".", "split", "(", "in_string", ")", "+", "[", "\" \"", "]", "for", "part", ",", "sep", "in", "zip", "(", "parts", "[", ":", ":", "2", "]", ",", "parts", "[", "1", ":", ":", "2", "]", ")", ":", "pstring", "=", "self", ".", "xterm256_fg_sub", ".", "sub", "(", "do_xterm256_fg", ",", "part", ")", "pstring", "=", "self", ".", "xterm256_bg_sub", ".", "sub", "(", "do_xterm256_bg", ",", "pstring", ")", "pstring", "=", "self", ".", "xterm256_gfg_sub", ".", "sub", "(", "do_xterm256_gfg", ",", "pstring", ")", "pstring", "=", "self", ".", "xterm256_gbg_sub", ".", "sub", "(", "do_xterm256_gbg", ",", "pstring", ")", "pstring", "=", "self", ".", "ansi_sub", ".", "sub", "(", "self", ".", "sub_ansi", ",", "pstring", ")", "parsed_string", ".", "append", "(", "\"%s%s\"", "%", "(", "pstring", ",", "sep", "[", "0", "]", ".", "strip", "(", ")", ")", ")", "parsed_string", "=", "\"\"", ".", "join", "(", "parsed_string", ")", "if", "not", "mxp", ":", "parsed_string", "=", "self", ".", "strip_mxp", "(", "parsed_string", ")", "if", "strip_ansi", ":", "# remove all ansi codes (including those manually", "# inserted in string)", "return", "self", ".", "strip_raw_codes", "(", "parsed_string", ")", "# cache and crop old cache", "_PARSE_CACHE", "[", "cachekey", "]", "=", "parsed_string", "if", "len", "(", "_PARSE_CACHE", ")", ">", "_PARSE_CACHE_SIZE", ":", "_PARSE_CACHE", ".", "popitem", "(", "last", "=", "False", ")", "return", "parsed_string" ]
[ 385, 4 ]
[ 458, 28 ]
python
en
['en', 'error', 'th']
False
ANSIString.__new__
(cls, *args, **kwargs)
When creating a new ANSIString, you may use a custom parser that has the same attributes as the standard one, and you may declare the string to be handled as already decoded. It is important not to double decode strings, as escapes can only be respected once. Internally, ANSIString can also passes itself precached code/character indexes and clean strings to avoid doing extra work when combining ANSIStrings.
When creating a new ANSIString, you may use a custom parser that has the same attributes as the standard one, and you may declare the string to be handled as already decoded. It is important not to double decode strings, as escapes can only be respected once.
def __new__(cls, *args, **kwargs): """ When creating a new ANSIString, you may use a custom parser that has the same attributes as the standard one, and you may declare the string to be handled as already decoded. It is important not to double decode strings, as escapes can only be respected once. Internally, ANSIString can also passes itself precached code/character indexes and clean strings to avoid doing extra work when combining ANSIStrings. """ string = args[0] if not isinstance(string, basestring): string = to_str(string, force_string=True) parser = kwargs.get('parser', ANSI_PARSER) decoded = kwargs.get('decoded', False) or hasattr(string, '_raw_string') code_indexes = kwargs.pop('code_indexes', None) char_indexes = kwargs.pop('char_indexes', None) clean_string = kwargs.pop('clean_string', None) # All True, or All False, not just one. checks = [x is None for x in [code_indexes, char_indexes, clean_string]] if not len(set(checks)) == 1: raise ValueError("You must specify code_indexes, char_indexes, " "and clean_string together, or not at all.") if not all(checks): decoded = True if not decoded: # Completely new ANSI String clean_string = to_unicode(parser.parse_ansi(string, strip_ansi=True, mxp=True)) string = parser.parse_ansi(string, xterm256=True, mxp=True) elif clean_string is not None: # We have an explicit clean string. pass elif hasattr(string, '_clean_string'): # It's already an ANSIString clean_string = string._clean_string code_indexes = string._code_indexes char_indexes = string._char_indexes string = string._raw_string else: # It's a string that has been pre-ansi decoded. clean_string = parser.strip_raw_codes(string) if not isinstance(string, unicode): string = string.decode('utf-8') ansi_string = super(ANSIString, cls).__new__(ANSIString, to_str(clean_string), "utf-8") ansi_string._raw_string = string ansi_string._clean_string = clean_string ansi_string._code_indexes = code_indexes ansi_string._char_indexes = char_indexes return ansi_string
[ "def", "__new__", "(", "cls", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "string", "=", "args", "[", "0", "]", "if", "not", "isinstance", "(", "string", ",", "basestring", ")", ":", "string", "=", "to_str", "(", "string", ",", "force_string", "=", "True", ")", "parser", "=", "kwargs", ".", "get", "(", "'parser'", ",", "ANSI_PARSER", ")", "decoded", "=", "kwargs", ".", "get", "(", "'decoded'", ",", "False", ")", "or", "hasattr", "(", "string", ",", "'_raw_string'", ")", "code_indexes", "=", "kwargs", ".", "pop", "(", "'code_indexes'", ",", "None", ")", "char_indexes", "=", "kwargs", ".", "pop", "(", "'char_indexes'", ",", "None", ")", "clean_string", "=", "kwargs", ".", "pop", "(", "'clean_string'", ",", "None", ")", "# All True, or All False, not just one.", "checks", "=", "[", "x", "is", "None", "for", "x", "in", "[", "code_indexes", ",", "char_indexes", ",", "clean_string", "]", "]", "if", "not", "len", "(", "set", "(", "checks", ")", ")", "==", "1", ":", "raise", "ValueError", "(", "\"You must specify code_indexes, char_indexes, \"", "\"and clean_string together, or not at all.\"", ")", "if", "not", "all", "(", "checks", ")", ":", "decoded", "=", "True", "if", "not", "decoded", ":", "# Completely new ANSI String", "clean_string", "=", "to_unicode", "(", "parser", ".", "parse_ansi", "(", "string", ",", "strip_ansi", "=", "True", ",", "mxp", "=", "True", ")", ")", "string", "=", "parser", ".", "parse_ansi", "(", "string", ",", "xterm256", "=", "True", ",", "mxp", "=", "True", ")", "elif", "clean_string", "is", "not", "None", ":", "# We have an explicit clean string.", "pass", "elif", "hasattr", "(", "string", ",", "'_clean_string'", ")", ":", "# It's already an ANSIString", "clean_string", "=", "string", ".", "_clean_string", "code_indexes", "=", "string", ".", "_code_indexes", "char_indexes", "=", "string", ".", "_char_indexes", "string", "=", "string", ".", "_raw_string", "else", ":", "# It's a string that has been pre-ansi decoded.", "clean_string", "=", "parser", ".", "strip_raw_codes", "(", "string", ")", "if", "not", "isinstance", "(", "string", ",", "unicode", ")", ":", "string", "=", "string", ".", "decode", "(", "'utf-8'", ")", "ansi_string", "=", "super", "(", "ANSIString", ",", "cls", ")", ".", "__new__", "(", "ANSIString", ",", "to_str", "(", "clean_string", ")", ",", "\"utf-8\"", ")", "ansi_string", ".", "_raw_string", "=", "string", "ansi_string", ".", "_clean_string", "=", "clean_string", "ansi_string", ".", "_code_indexes", "=", "code_indexes", "ansi_string", ".", "_char_indexes", "=", "char_indexes", "return", "ansi_string" ]
[ 663, 4 ]
[ 715, 26 ]
python
en
['en', 'error', 'th']
False
ANSIString.__unicode__
(self)
Unfortunately, this is not called during print() statements due to a bug in the Python interpreter. You can always do unicode() or str() around the resulting ANSIString and print that.
Unfortunately, this is not called during print() statements due to a bug in the Python interpreter. You can always do unicode() or str() around the resulting ANSIString and print that.
def __unicode__(self): """ Unfortunately, this is not called during print() statements due to a bug in the Python interpreter. You can always do unicode() or str() around the resulting ANSIString and print that. """ return self._raw_string
[ "def", "__unicode__", "(", "self", ")", ":", "return", "self", ".", "_raw_string" ]
[ 720, 4 ]
[ 728, 31 ]
python
en
['en', 'error', 'th']
False
ANSIString.__repr__
(self)
Let's make the repr the command that would actually be used to construct this object, for convenience and reference.
Let's make the repr the command that would actually be used to construct this object, for convenience and reference.
def __repr__(self): """ Let's make the repr the command that would actually be used to construct this object, for convenience and reference. """ return "ANSIString(%s, decoded=True)" % repr(self._raw_string)
[ "def", "__repr__", "(", "self", ")", ":", "return", "\"ANSIString(%s, decoded=True)\"", "%", "repr", "(", "self", ".", "_raw_string", ")" ]
[ 730, 4 ]
[ 736, 70 ]
python
en
['en', 'error', 'th']
False
ANSIString.__init__
(self, *_, **kwargs)
When the ANSIString is first initialized, a few internal variables have to be set. The first is the parser. It is possible to replace Evennia's standard ANSI parser with one of your own syntax if you wish, so long as it implements the same interface. The second is the _raw_string. It should be noted that the ANSIStrings are unicode based. This seemed more reasonable than basing it off of the string class, because if someone were to use a unicode character, the benefits of knowing the indexes of the ANSI characters would be negated by the fact that a character within the string might require more than one byte to be represented. The raw string is, then, a unicode object rather than a true encoded string. If you need the encoded string for sending over the wire, try using the .encode() method. The third thing to set is the _clean_string. This is a unicode object that is devoid of all ANSI Escapes. Finally, _code_indexes and _char_indexes are defined. These are lookup tables for which characters in the raw string are related to ANSI escapes, and which are for the readable text.
When the ANSIString is first initialized, a few internal variables have to be set.
def __init__(self, *_, **kwargs): """ When the ANSIString is first initialized, a few internal variables have to be set. The first is the parser. It is possible to replace Evennia's standard ANSI parser with one of your own syntax if you wish, so long as it implements the same interface. The second is the _raw_string. It should be noted that the ANSIStrings are unicode based. This seemed more reasonable than basing it off of the string class, because if someone were to use a unicode character, the benefits of knowing the indexes of the ANSI characters would be negated by the fact that a character within the string might require more than one byte to be represented. The raw string is, then, a unicode object rather than a true encoded string. If you need the encoded string for sending over the wire, try using the .encode() method. The third thing to set is the _clean_string. This is a unicode object that is devoid of all ANSI Escapes. Finally, _code_indexes and _char_indexes are defined. These are lookup tables for which characters in the raw string are related to ANSI escapes, and which are for the readable text. """ self.parser = kwargs.pop('parser', ANSI_PARSER) super(ANSIString, self).__init__() if self._code_indexes is None: self._code_indexes, self._char_indexes = self._get_indexes()
[ "def", "__init__", "(", "self", ",", "*", "_", ",", "*", "*", "kwargs", ")", ":", "self", ".", "parser", "=", "kwargs", ".", "pop", "(", "'parser'", ",", "ANSI_PARSER", ")", "super", "(", "ANSIString", ",", "self", ")", ".", "__init__", "(", ")", "if", "self", ".", "_code_indexes", "is", "None", ":", "self", ".", "_code_indexes", ",", "self", ".", "_char_indexes", "=", "self", ".", "_get_indexes", "(", ")" ]
[ 738, 4 ]
[ 768, 72 ]
python
en
['en', 'error', 'th']
False
ANSIString._shifter
(iterable, offset)
Takes a list of integers, and produces a new one incrementing all by a number.
Takes a list of integers, and produces a new one incrementing all by a number.
def _shifter(iterable, offset): """ Takes a list of integers, and produces a new one incrementing all by a number. """ return [i + offset for i in iterable]
[ "def", "_shifter", "(", "iterable", ",", "offset", ")", ":", "return", "[", "i", "+", "offset", "for", "i", "in", "iterable", "]" ]
[ 771, 4 ]
[ 777, 45 ]
python
en
['en', 'error', 'th']
False
ANSIString._adder
(cls, first, second)
Joins two ANSIStrings, preserving calculated info.
Joins two ANSIStrings, preserving calculated info.
def _adder(cls, first, second): """ Joins two ANSIStrings, preserving calculated info. """ raw_string = first._raw_string + second._raw_string clean_string = first._clean_string + second._clean_string code_indexes = first._code_indexes[:] char_indexes = first._char_indexes[:] code_indexes.extend( cls._shifter(second._code_indexes, len(first._raw_string))) char_indexes.extend( cls._shifter(second._char_indexes, len(first._raw_string))) return ANSIString(raw_string, code_indexes=code_indexes, char_indexes=char_indexes, clean_string=clean_string)
[ "def", "_adder", "(", "cls", ",", "first", ",", "second", ")", ":", "raw_string", "=", "first", ".", "_raw_string", "+", "second", ".", "_raw_string", "clean_string", "=", "first", ".", "_clean_string", "+", "second", ".", "_clean_string", "code_indexes", "=", "first", ".", "_code_indexes", "[", ":", "]", "char_indexes", "=", "first", ".", "_char_indexes", "[", ":", "]", "code_indexes", ".", "extend", "(", "cls", ".", "_shifter", "(", "second", ".", "_code_indexes", ",", "len", "(", "first", ".", "_raw_string", ")", ")", ")", "char_indexes", ".", "extend", "(", "cls", ".", "_shifter", "(", "second", ".", "_char_indexes", ",", "len", "(", "first", ".", "_raw_string", ")", ")", ")", "return", "ANSIString", "(", "raw_string", ",", "code_indexes", "=", "code_indexes", ",", "char_indexes", "=", "char_indexes", ",", "clean_string", "=", "clean_string", ")" ]
[ 780, 4 ]
[ 796, 52 ]
python
en
['en', 'error', 'th']
False
ANSIString.__add__
(self, other)
We have to be careful when adding two strings not to reprocess things that don't need to be reprocessed, lest we end up with escapes being interpreted literally.
We have to be careful when adding two strings not to reprocess things that don't need to be reprocessed, lest we end up with escapes being interpreted literally.
def __add__(self, other): """ We have to be careful when adding two strings not to reprocess things that don't need to be reprocessed, lest we end up with escapes being interpreted literally. """ if not isinstance(other, basestring): return NotImplemented if not isinstance(other, ANSIString): other = ANSIString(other) return self._adder(self, other)
[ "def", "__add__", "(", "self", ",", "other", ")", ":", "if", "not", "isinstance", "(", "other", ",", "basestring", ")", ":", "return", "NotImplemented", "if", "not", "isinstance", "(", "other", ",", "ANSIString", ")", ":", "other", "=", "ANSIString", "(", "other", ")", "return", "self", ".", "_adder", "(", "self", ",", "other", ")" ]
[ 798, 4 ]
[ 809, 39 ]
python
en
['en', 'error', 'th']
False
ANSIString.__radd__
(self, other)
Likewise, if we're on the other end.
Likewise, if we're on the other end.
def __radd__(self, other): """ Likewise, if we're on the other end. """ if not isinstance(other, basestring): return NotImplemented if not isinstance(other, ANSIString): other = ANSIString(other) return self._adder(other, self)
[ "def", "__radd__", "(", "self", ",", "other", ")", ":", "if", "not", "isinstance", "(", "other", ",", "basestring", ")", ":", "return", "NotImplemented", "if", "not", "isinstance", "(", "other", ",", "ANSIString", ")", ":", "other", "=", "ANSIString", "(", "other", ")", "return", "self", ".", "_adder", "(", "other", ",", "self", ")" ]
[ 811, 4 ]
[ 820, 39 ]
python
en
['en', 'error', 'th']
False
ANSIString.__getslice__
(self, i, j)
This function is deprecated, so we just make it call the proper function.
This function is deprecated, so we just make it call the proper function.
def __getslice__(self, i, j): """ This function is deprecated, so we just make it call the proper function. """ return self.__getitem__(slice(i, j))
[ "def", "__getslice__", "(", "self", ",", "i", ",", "j", ")", ":", "return", "self", ".", "__getitem__", "(", "slice", "(", "i", ",", "j", ")", ")" ]
[ 822, 4 ]
[ 828, 44 ]
python
en
['en', 'error', 'th']
False
ANSIString._slice
(self, slc)
This function takes a slice() object. Slices have to be handled specially. Not only are they able to specify a start and end with [x:y], but many forget that they can also specify an interval with [x:y:z]. As a result, not only do we have to track the ANSI Escapes that have played before the start of the slice, we must also replay any in these intervals, should they exist. Thankfully, slicing the _char_indexes table gives us the actual indexes that need slicing in the raw string. We can check between those indexes to figure out what escape characters need to be replayed.
This function takes a slice() object.
def _slice(self, slc): """ This function takes a slice() object. Slices have to be handled specially. Not only are they able to specify a start and end with [x:y], but many forget that they can also specify an interval with [x:y:z]. As a result, not only do we have to track the ANSI Escapes that have played before the start of the slice, we must also replay any in these intervals, should they exist. Thankfully, slicing the _char_indexes table gives us the actual indexes that need slicing in the raw string. We can check between those indexes to figure out what escape characters need to be replayed. """ slice_indexes = self._char_indexes[slc] # If it's the end of the string, we need to append final color codes. if not slice_indexes: return ANSIString('') try: string = self[slc.start]._raw_string except IndexError: return ANSIString('') last_mark = slice_indexes[0] # Check between the slice intervals for escape sequences. i = None for i in slice_indexes[1:]: for index in range(last_mark, i): if index in self._code_indexes: string += self._raw_string[index] last_mark = i try: string += self._raw_string[i] except IndexError: # raw_string not long enough pass if i is not None: append_tail = self._get_interleving(self._char_indexes.index(i) + 1) else: append_tail = '' return ANSIString(string + append_tail, decoded=True)
[ "def", "_slice", "(", "self", ",", "slc", ")", ":", "slice_indexes", "=", "self", ".", "_char_indexes", "[", "slc", "]", "# If it's the end of the string, we need to append final color codes.", "if", "not", "slice_indexes", ":", "return", "ANSIString", "(", "''", ")", "try", ":", "string", "=", "self", "[", "slc", ".", "start", "]", ".", "_raw_string", "except", "IndexError", ":", "return", "ANSIString", "(", "''", ")", "last_mark", "=", "slice_indexes", "[", "0", "]", "# Check between the slice intervals for escape sequences.", "i", "=", "None", "for", "i", "in", "slice_indexes", "[", "1", ":", "]", ":", "for", "index", "in", "range", "(", "last_mark", ",", "i", ")", ":", "if", "index", "in", "self", ".", "_code_indexes", ":", "string", "+=", "self", ".", "_raw_string", "[", "index", "]", "last_mark", "=", "i", "try", ":", "string", "+=", "self", ".", "_raw_string", "[", "i", "]", "except", "IndexError", ":", "# raw_string not long enough", "pass", "if", "i", "is", "not", "None", ":", "append_tail", "=", "self", ".", "_get_interleving", "(", "self", ".", "_char_indexes", ".", "index", "(", "i", ")", "+", "1", ")", "else", ":", "append_tail", "=", "''", "return", "ANSIString", "(", "string", "+", "append_tail", ",", "decoded", "=", "True", ")" ]
[ 830, 4 ]
[ 871, 61 ]
python
en
['en', 'error', 'th']
False
ANSIString.__getitem__
(self, item)
Gateway for slices and getting specific indexes in the ANSIString. If this is a regexable ANSIString, it will get the data from the raw string instead, bypassing ANSIString's intelligent escape skipping, for reasons explained in the __new__ method's docstring.
Gateway for slices and getting specific indexes in the ANSIString. If this is a regexable ANSIString, it will get the data from the raw string instead, bypassing ANSIString's intelligent escape skipping, for reasons explained in the __new__ method's docstring.
def __getitem__(self, item): """ Gateway for slices and getting specific indexes in the ANSIString. If this is a regexable ANSIString, it will get the data from the raw string instead, bypassing ANSIString's intelligent escape skipping, for reasons explained in the __new__ method's docstring. """ if isinstance(item, slice): # Slices must be handled specially. return self._slice(item) try: self._char_indexes[item] except IndexError: raise IndexError("ANSIString Index out of range") # Get character codes after the index as well. if self._char_indexes[-1] == self._char_indexes[item]: append_tail = self._get_interleving(item + 1) else: append_tail = '' item = self._char_indexes[item] clean = self._raw_string[item] result = '' # Get the character they're after, and replay all escape sequences # previous to it. for index in range(0, item + 1): if index in self._code_indexes: result += self._raw_string[index] return ANSIString(result + clean + append_tail, decoded=True)
[ "def", "__getitem__", "(", "self", ",", "item", ")", ":", "if", "isinstance", "(", "item", ",", "slice", ")", ":", "# Slices must be handled specially.", "return", "self", ".", "_slice", "(", "item", ")", "try", ":", "self", ".", "_char_indexes", "[", "item", "]", "except", "IndexError", ":", "raise", "IndexError", "(", "\"ANSIString Index out of range\"", ")", "# Get character codes after the index as well.", "if", "self", ".", "_char_indexes", "[", "-", "1", "]", "==", "self", ".", "_char_indexes", "[", "item", "]", ":", "append_tail", "=", "self", ".", "_get_interleving", "(", "item", "+", "1", ")", "else", ":", "append_tail", "=", "''", "item", "=", "self", ".", "_char_indexes", "[", "item", "]", "clean", "=", "self", ".", "_raw_string", "[", "item", "]", "result", "=", "''", "# Get the character they're after, and replay all escape sequences", "# previous to it.", "for", "index", "in", "range", "(", "0", ",", "item", "+", "1", ")", ":", "if", "index", "in", "self", ".", "_code_indexes", ":", "result", "+=", "self", ".", "_raw_string", "[", "index", "]", "return", "ANSIString", "(", "result", "+", "clean", "+", "append_tail", ",", "decoded", "=", "True", ")" ]
[ 873, 4 ]
[ 902, 69 ]
python
en
['en', 'error', 'th']
False
ANSIString.clean
(self)
Return a unicode object without the ANSI escapes. Returns: clean_string (unicode): A unicode object with no ANSI escapes.
Return a unicode object without the ANSI escapes.
def clean(self): """ Return a unicode object without the ANSI escapes. Returns: clean_string (unicode): A unicode object with no ANSI escapes. """ return self._clean_string
[ "def", "clean", "(", "self", ")", ":", "return", "self", ".", "_clean_string" ]
[ 904, 4 ]
[ 912, 33 ]
python
en
['en', 'error', 'th']
False
ANSIString.raw
(self)
Return a unicode object with the ANSI escapes. Returns: raw (unicode): A unicode object with the raw ANSI escape sequences.
Return a unicode object with the ANSI escapes.
def raw(self): """ Return a unicode object with the ANSI escapes. Returns: raw (unicode): A unicode object with the raw ANSI escape sequences. """ return self._raw_string
[ "def", "raw", "(", "self", ")", ":", "return", "self", ".", "_raw_string" ]
[ 914, 4 ]
[ 922, 31 ]
python
en
['en', 'error', 'th']
False
ANSIString.partition
(self, sep, reverse=False)
Splits once into three sections (with the separator being the middle section) We use the same techniques we used in split() to make sure each are colored. Args: sep (str): The separator to split the string on. reverse (boolean): Whether to split the string on the last occurrence of the separator rather than the first. Returns: result (tuple): prefix (ANSIString): The part of the string before the separator sep (ANSIString): The separator itself postfix (ANSIString): The part of the string after the separator.
Splits once into three sections (with the separator being the middle section)
def partition(self, sep, reverse=False): """ Splits once into three sections (with the separator being the middle section) We use the same techniques we used in split() to make sure each are colored. Args: sep (str): The separator to split the string on. reverse (boolean): Whether to split the string on the last occurrence of the separator rather than the first. Returns: result (tuple): prefix (ANSIString): The part of the string before the separator sep (ANSIString): The separator itself postfix (ANSIString): The part of the string after the separator. """ if hasattr(sep, '_clean_string'): sep = sep.clean() if reverse: parent_result = self._clean_string.rpartition(sep) else: parent_result = self._clean_string.partition(sep) current_index = 0 result = tuple() for section in parent_result: result += (self[current_index:current_index + len(section)],) current_index += len(section) return result
[ "def", "partition", "(", "self", ",", "sep", ",", "reverse", "=", "False", ")", ":", "if", "hasattr", "(", "sep", ",", "'_clean_string'", ")", ":", "sep", "=", "sep", ".", "clean", "(", ")", "if", "reverse", ":", "parent_result", "=", "self", ".", "_clean_string", ".", "rpartition", "(", "sep", ")", "else", ":", "parent_result", "=", "self", ".", "_clean_string", ".", "partition", "(", "sep", ")", "current_index", "=", "0", "result", "=", "tuple", "(", ")", "for", "section", "in", "parent_result", ":", "result", "+=", "(", "self", "[", "current_index", ":", "current_index", "+", "len", "(", "section", ")", "]", ",", ")", "current_index", "+=", "len", "(", "section", ")", "return", "result" ]
[ 924, 4 ]
[ 955, 21 ]
python
en
['en', 'error', 'th']
False
ANSIString._get_indexes
(self)
Two tables need to be made, one which contains the indexes of all readable characters, and one which contains the indexes of all ANSI escapes. It's important to remember that ANSI escapes require more that one character at a time, though no readable character needs more than one character, since the unicode base class abstracts that away from us. However, several readable characters can be placed in a row. We must use regexes here to figure out where all the escape sequences are hiding in the string. Then we use the ranges of their starts and ends to create a final, comprehensive list of all indexes which are dedicated to code, and all dedicated to text. It's possible that only one of these tables is actually needed, the other assumed to be what isn't in the first.
Two tables need to be made, one which contains the indexes of all readable characters, and one which contains the indexes of all ANSI escapes. It's important to remember that ANSI escapes require more that one character at a time, though no readable character needs more than one character, since the unicode base class abstracts that away from us. However, several readable characters can be placed in a row.
def _get_indexes(self): """ Two tables need to be made, one which contains the indexes of all readable characters, and one which contains the indexes of all ANSI escapes. It's important to remember that ANSI escapes require more that one character at a time, though no readable character needs more than one character, since the unicode base class abstracts that away from us. However, several readable characters can be placed in a row. We must use regexes here to figure out where all the escape sequences are hiding in the string. Then we use the ranges of their starts and ends to create a final, comprehensive list of all indexes which are dedicated to code, and all dedicated to text. It's possible that only one of these tables is actually needed, the other assumed to be what isn't in the first. """ code_indexes = [] for match in self.parser.ansi_regex.finditer(self._raw_string): code_indexes.extend(range(match.start(), match.end())) if not code_indexes: # Plain string, no ANSI codes. return code_indexes, list(range(0, len(self._raw_string))) # all indexes not occupied by ansi codes are normal characters char_indexes = [i for i in range(len(self._raw_string)) if i not in code_indexes] return code_indexes, char_indexes
[ "def", "_get_indexes", "(", "self", ")", ":", "code_indexes", "=", "[", "]", "for", "match", "in", "self", ".", "parser", ".", "ansi_regex", ".", "finditer", "(", "self", ".", "_raw_string", ")", ":", "code_indexes", ".", "extend", "(", "range", "(", "match", ".", "start", "(", ")", ",", "match", ".", "end", "(", ")", ")", ")", "if", "not", "code_indexes", ":", "# Plain string, no ANSI codes.", "return", "code_indexes", ",", "list", "(", "range", "(", "0", ",", "len", "(", "self", ".", "_raw_string", ")", ")", ")", "# all indexes not occupied by ansi codes are normal characters", "char_indexes", "=", "[", "i", "for", "i", "in", "range", "(", "len", "(", "self", ".", "_raw_string", ")", ")", "if", "i", "not", "in", "code_indexes", "]", "return", "code_indexes", ",", "char_indexes" ]
[ 957, 4 ]
[ 984, 41 ]
python
en
['en', 'error', 'th']
False
ANSIString._get_interleving
(self, index)
Get the code characters from the given slice end to the next character.
Get the code characters from the given slice end to the next character.
def _get_interleving(self, index): """ Get the code characters from the given slice end to the next character. """ try: index = self._char_indexes[index - 1] except IndexError: return '' s = '' while True: index += 1 if index in self._char_indexes: break elif index in self._code_indexes: s += self._raw_string[index] else: break return s
[ "def", "_get_interleving", "(", "self", ",", "index", ")", ":", "try", ":", "index", "=", "self", ".", "_char_indexes", "[", "index", "-", "1", "]", "except", "IndexError", ":", "return", "''", "s", "=", "''", "while", "True", ":", "index", "+=", "1", "if", "index", "in", "self", ".", "_char_indexes", ":", "break", "elif", "index", "in", "self", ".", "_code_indexes", ":", "s", "+=", "self", ".", "_raw_string", "[", "index", "]", "else", ":", "break", "return", "s" ]
[ 986, 4 ]
[ 1005, 16 ]
python
en
['en', 'error', 'th']
False
ANSIString.__mul__
(self, other)
Multiplication method. Implemented for performance reasons.
Multiplication method. Implemented for performance reasons.
def __mul__(self, other): """ Multiplication method. Implemented for performance reasons. """ if not isinstance(other, int): return NotImplemented raw_string = self._raw_string * other clean_string = self._clean_string * other code_indexes = self._code_indexes[:] char_indexes = self._char_indexes[:] for i in range(1, other + 1): code_indexes.extend( self._shifter(self._code_indexes, i * len(self._raw_string))) char_indexes.extend( self._shifter(self._char_indexes, i * len(self._raw_string))) return ANSIString( raw_string, code_indexes=code_indexes, char_indexes=char_indexes, clean_string=clean_string)
[ "def", "__mul__", "(", "self", ",", "other", ")", ":", "if", "not", "isinstance", "(", "other", ",", "int", ")", ":", "return", "NotImplemented", "raw_string", "=", "self", ".", "_raw_string", "*", "other", "clean_string", "=", "self", ".", "_clean_string", "*", "other", "code_indexes", "=", "self", ".", "_code_indexes", "[", ":", "]", "char_indexes", "=", "self", ".", "_char_indexes", "[", ":", "]", "for", "i", "in", "range", "(", "1", ",", "other", "+", "1", ")", ":", "code_indexes", ".", "extend", "(", "self", ".", "_shifter", "(", "self", ".", "_code_indexes", ",", "i", "*", "len", "(", "self", ".", "_raw_string", ")", ")", ")", "char_indexes", ".", "extend", "(", "self", ".", "_shifter", "(", "self", ".", "_char_indexes", ",", "i", "*", "len", "(", "self", ".", "_raw_string", ")", ")", ")", "return", "ANSIString", "(", "raw_string", ",", "code_indexes", "=", "code_indexes", ",", "char_indexes", "=", "char_indexes", ",", "clean_string", "=", "clean_string", ")" ]
[ 1007, 4 ]
[ 1025, 38 ]
python
en
['en', 'error', 'th']
False
ANSIString.split
(self, by=None, maxsplit=-1)
Splits a string based on a separator. Stolen from PyPy's pure Python string implementation, tweaked for ANSIString. PyPy is distributed under the MIT licence. http://opensource.org/licenses/MIT Args: by (str): A string to search for which will be used to split the string. For instance, ',' for 'Hello,world' would result in ['Hello', 'world'] maxsplit (int): The maximum number of times to split the string. For example, a maxsplit of 2 with a by of ',' on the string 'Hello,world,test,string' would result in ['Hello', 'world', 'test,string'] Returns: result (list of ANSIStrings): A list of ANSIStrings derived from this string.
Splits a string based on a separator.
def split(self, by=None, maxsplit=-1): """ Splits a string based on a separator. Stolen from PyPy's pure Python string implementation, tweaked for ANSIString. PyPy is distributed under the MIT licence. http://opensource.org/licenses/MIT Args: by (str): A string to search for which will be used to split the string. For instance, ',' for 'Hello,world' would result in ['Hello', 'world'] maxsplit (int): The maximum number of times to split the string. For example, a maxsplit of 2 with a by of ',' on the string 'Hello,world,test,string' would result in ['Hello', 'world', 'test,string'] Returns: result (list of ANSIStrings): A list of ANSIStrings derived from this string. """ drop_spaces = by is None if drop_spaces: by = " " bylen = len(by) if bylen == 0: raise ValueError("empty separator") res = [] start = 0 while maxsplit != 0: next = self._clean_string.find(by, start) if next < 0: break # Get character codes after the index as well. res.append(self[start:next]) start = next + bylen maxsplit -= 1 # NB. if it's already < 0, it stays < 0 res.append(self[start:len(self)]) if drop_spaces: return [part for part in res if part != ""] return res
[ "def", "split", "(", "self", ",", "by", "=", "None", ",", "maxsplit", "=", "-", "1", ")", ":", "drop_spaces", "=", "by", "is", "None", "if", "drop_spaces", ":", "by", "=", "\" \"", "bylen", "=", "len", "(", "by", ")", "if", "bylen", "==", "0", ":", "raise", "ValueError", "(", "\"empty separator\"", ")", "res", "=", "[", "]", "start", "=", "0", "while", "maxsplit", "!=", "0", ":", "next", "=", "self", ".", "_clean_string", ".", "find", "(", "by", ",", "start", ")", "if", "next", "<", "0", ":", "break", "# Get character codes after the index as well.", "res", ".", "append", "(", "self", "[", "start", ":", "next", "]", ")", "start", "=", "next", "+", "bylen", "maxsplit", "-=", "1", "# NB. if it's already < 0, it stays < 0", "res", ".", "append", "(", "self", "[", "start", ":", "len", "(", "self", ")", "]", ")", "if", "drop_spaces", ":", "return", "[", "part", "for", "part", "in", "res", "if", "part", "!=", "\"\"", "]", "return", "res" ]
[ 1030, 4 ]
[ 1075, 18 ]
python
en
['en', 'error', 'th']
False
ANSIString.rsplit
(self, by=None, maxsplit=-1)
Like split, but starts from the end of the string rather than the beginning. Stolen from PyPy's pure Python string implementation, tweaked for ANSIString. PyPy is distributed under the MIT licence. http://opensource.org/licenses/MIT Args: by (str): A string to search for which will be used to split the string. For instance, ',' for 'Hello,world' would result in ['Hello', 'world'] maxsplit (int): The maximum number of times to split the string. For example, a maxsplit of 2 with a by of ',' on the string 'Hello,world,test,string' would result in ['Hello,world', 'test', 'string'] Returns: result (list of ANSIStrings): A list of ANSIStrings derived from this string.
Like split, but starts from the end of the string rather than the beginning.
def rsplit(self, by=None, maxsplit=-1): """ Like split, but starts from the end of the string rather than the beginning. Stolen from PyPy's pure Python string implementation, tweaked for ANSIString. PyPy is distributed under the MIT licence. http://opensource.org/licenses/MIT Args: by (str): A string to search for which will be used to split the string. For instance, ',' for 'Hello,world' would result in ['Hello', 'world'] maxsplit (int): The maximum number of times to split the string. For example, a maxsplit of 2 with a by of ',' on the string 'Hello,world,test,string' would result in ['Hello,world', 'test', 'string'] Returns: result (list of ANSIStrings): A list of ANSIStrings derived from this string. """ res = [] end = len(self) drop_spaces = by is None if drop_spaces: by = " " bylen = len(by) if bylen == 0: raise ValueError("empty separator") while maxsplit != 0: next = self._clean_string.rfind(by, 0, end) if next < 0: break # Get character codes after the index as well. res.append(self[next + bylen:end]) end = next maxsplit -= 1 # NB. if it's already < 0, it stays < 0 res.append(self[:end]) res.reverse() if drop_spaces: return [part for part in res if part != ""] return res
[ "def", "rsplit", "(", "self", ",", "by", "=", "None", ",", "maxsplit", "=", "-", "1", ")", ":", "res", "=", "[", "]", "end", "=", "len", "(", "self", ")", "drop_spaces", "=", "by", "is", "None", "if", "drop_spaces", ":", "by", "=", "\" \"", "bylen", "=", "len", "(", "by", ")", "if", "bylen", "==", "0", ":", "raise", "ValueError", "(", "\"empty separator\"", ")", "while", "maxsplit", "!=", "0", ":", "next", "=", "self", ".", "_clean_string", ".", "rfind", "(", "by", ",", "0", ",", "end", ")", "if", "next", "<", "0", ":", "break", "# Get character codes after the index as well.", "res", ".", "append", "(", "self", "[", "next", "+", "bylen", ":", "end", "]", ")", "end", "=", "next", "maxsplit", "-=", "1", "# NB. if it's already < 0, it stays < 0", "res", ".", "append", "(", "self", "[", ":", "end", "]", ")", "res", ".", "reverse", "(", ")", "if", "drop_spaces", ":", "return", "[", "part", "for", "part", "in", "res", "if", "part", "!=", "\"\"", "]", "return", "res" ]
[ 1077, 4 ]
[ 1123, 18 ]
python
en
['en', 'error', 'th']
False
ANSIString.strip
(self, chars=None)
Strip from both ends, taking ANSI markers into account. Args: chars (str, optional): A string containing individual characters to strip off of both ends of the string. By default, any blank spaces are trimmed. Returns: result (ANSIString): A new ANSIString with the ends trimmed of the relevant characters.
Strip from both ends, taking ANSI markers into account.
def strip(self, chars=None): """ Strip from both ends, taking ANSI markers into account. Args: chars (str, optional): A string containing individual characters to strip off of both ends of the string. By default, any blank spaces are trimmed. Returns: result (ANSIString): A new ANSIString with the ends trimmed of the relevant characters. """ clean = self._clean_string raw = self._raw_string # count continuous sequence of chars from left and right nlen = len(clean) nlstripped = nlen - len(clean.lstrip(chars)) nrstripped = nlen - len(clean.rstrip(chars)) # within the stripped regions, only retain parts of the raw # string *not* matching the clean string (these are ansi/mxp tags) lstripped = "" ic, ir1 = 0, 0 while nlstripped: if ic >= nlstripped: break elif raw[ir1] != clean[ic]: lstripped += raw[ir1] else: ic += 1 ir1 += 1 rstripped = "" ic, ir2 = nlen - 1, len(raw) - 1 while nrstripped: if nlen - ic > nrstripped: break elif raw[ir2] != clean[ic]: rstripped += raw[ir2] else: ic -= 1 ir2 -= 1 rstripped = rstripped[::-1] return ANSIString(lstripped + raw[ir1:ir2 + 1] + rstripped)
[ "def", "strip", "(", "self", ",", "chars", "=", "None", ")", ":", "clean", "=", "self", ".", "_clean_string", "raw", "=", "self", ".", "_raw_string", "# count continuous sequence of chars from left and right", "nlen", "=", "len", "(", "clean", ")", "nlstripped", "=", "nlen", "-", "len", "(", "clean", ".", "lstrip", "(", "chars", ")", ")", "nrstripped", "=", "nlen", "-", "len", "(", "clean", ".", "rstrip", "(", "chars", ")", ")", "# within the stripped regions, only retain parts of the raw", "# string *not* matching the clean string (these are ansi/mxp tags)", "lstripped", "=", "\"\"", "ic", ",", "ir1", "=", "0", ",", "0", "while", "nlstripped", ":", "if", "ic", ">=", "nlstripped", ":", "break", "elif", "raw", "[", "ir1", "]", "!=", "clean", "[", "ic", "]", ":", "lstripped", "+=", "raw", "[", "ir1", "]", "else", ":", "ic", "+=", "1", "ir1", "+=", "1", "rstripped", "=", "\"\"", "ic", ",", "ir2", "=", "nlen", "-", "1", ",", "len", "(", "raw", ")", "-", "1", "while", "nrstripped", ":", "if", "nlen", "-", "ic", ">", "nrstripped", ":", "break", "elif", "raw", "[", "ir2", "]", "!=", "clean", "[", "ic", "]", ":", "rstripped", "+=", "raw", "[", "ir2", "]", "else", ":", "ic", "-=", "1", "ir2", "-=", "1", "rstripped", "=", "rstripped", "[", ":", ":", "-", "1", "]", "return", "ANSIString", "(", "lstripped", "+", "raw", "[", "ir1", ":", "ir2", "+", "1", "]", "+", "rstripped", ")" ]
[ 1125, 4 ]
[ 1169, 67 ]
python
en
['en', 'error', 'th']
False
ANSIString.lstrip
(self, chars=None)
Strip from the left, taking ANSI markers into account. Args: chars (str, optional): A string containing individual characters to strip off of the left end of the string. By default, any blank spaces are trimmed. Returns: result (ANSIString): A new ANSIString with the left end trimmed of the relevant characters.
Strip from the left, taking ANSI markers into account.
def lstrip(self, chars=None): """ Strip from the left, taking ANSI markers into account. Args: chars (str, optional): A string containing individual characters to strip off of the left end of the string. By default, any blank spaces are trimmed. Returns: result (ANSIString): A new ANSIString with the left end trimmed of the relevant characters. """ clean = self._clean_string raw = self._raw_string # count continuous sequence of chars from left and right nlen = len(clean) nlstripped = nlen - len(clean.lstrip(chars)) # within the stripped regions, only retain parts of the raw # string *not* matching the clean string (these are ansi/mxp tags) lstripped = "" ic, ir1 = 0, 0 while nlstripped: if ic >= nlstripped: break elif raw[ir1] != clean[ic]: lstripped += raw[ir1] else: ic += 1 ir1 += 1 return ANSIString(lstripped + raw[ir1:])
[ "def", "lstrip", "(", "self", ",", "chars", "=", "None", ")", ":", "clean", "=", "self", ".", "_clean_string", "raw", "=", "self", ".", "_raw_string", "# count continuous sequence of chars from left and right", "nlen", "=", "len", "(", "clean", ")", "nlstripped", "=", "nlen", "-", "len", "(", "clean", ".", "lstrip", "(", "chars", ")", ")", "# within the stripped regions, only retain parts of the raw", "# string *not* matching the clean string (these are ansi/mxp tags)", "lstripped", "=", "\"\"", "ic", ",", "ir1", "=", "0", ",", "0", "while", "nlstripped", ":", "if", "ic", ">=", "nlstripped", ":", "break", "elif", "raw", "[", "ir1", "]", "!=", "clean", "[", "ic", "]", ":", "lstripped", "+=", "raw", "[", "ir1", "]", "else", ":", "ic", "+=", "1", "ir1", "+=", "1", "return", "ANSIString", "(", "lstripped", "+", "raw", "[", "ir1", ":", "]", ")" ]
[ 1171, 4 ]
[ 1202, 48 ]
python
en
['en', 'error', 'th']
False
ANSIString.rstrip
(self, chars=None)
Strip from the right, taking ANSI markers into account. Args: chars (str, optional): A string containing individual characters to strip off of the right end of the string. By default, any blank spaces are trimmed. Returns: result (ANSIString): A new ANSIString with the right end trimmed of the relevant characters.
Strip from the right, taking ANSI markers into account.
def rstrip(self, chars=None): """ Strip from the right, taking ANSI markers into account. Args: chars (str, optional): A string containing individual characters to strip off of the right end of the string. By default, any blank spaces are trimmed. Returns: result (ANSIString): A new ANSIString with the right end trimmed of the relevant characters. """ clean = self._clean_string raw = self._raw_string nlen = len(clean) nrstripped = nlen - len(clean.rstrip(chars)) rstripped = "" ic, ir2 = nlen - 1, len(raw) - 1 while nrstripped: if nlen - ic > nrstripped: break elif raw[ir2] != clean[ic]: rstripped += raw[ir2] else: ic -= 1 ir2 -= 1 rstripped = rstripped[::-1] return ANSIString(raw[:ir2 + 1] + rstripped)
[ "def", "rstrip", "(", "self", ",", "chars", "=", "None", ")", ":", "clean", "=", "self", ".", "_clean_string", "raw", "=", "self", ".", "_raw_string", "nlen", "=", "len", "(", "clean", ")", "nrstripped", "=", "nlen", "-", "len", "(", "clean", ".", "rstrip", "(", "chars", ")", ")", "rstripped", "=", "\"\"", "ic", ",", "ir2", "=", "nlen", "-", "1", ",", "len", "(", "raw", ")", "-", "1", "while", "nrstripped", ":", "if", "nlen", "-", "ic", ">", "nrstripped", ":", "break", "elif", "raw", "[", "ir2", "]", "!=", "clean", "[", "ic", "]", ":", "rstripped", "+=", "raw", "[", "ir2", "]", "else", ":", "ic", "-=", "1", "ir2", "-=", "1", "rstripped", "=", "rstripped", "[", ":", ":", "-", "1", "]", "return", "ANSIString", "(", "raw", "[", ":", "ir2", "+", "1", "]", "+", "rstripped", ")" ]
[ 1204, 4 ]
[ 1232, 52 ]
python
en
['en', 'error', 'th']
False
ANSIString.join
(self, iterable)
Joins together strings in an iterable, using this string between each one. NOTE: This should always be used for joining strings when ANSIStrings are involved. Otherwise color information will be discarded by python, due to details in the C implementation of unicode strings. Args: iterable (list of strings): A list of strings to join together Returns: result (ANSIString): A single string with all of the iterable's contents concatenated, with this string between each. For example: ANSIString(', ').join(['up', 'right', 'left', 'down']) ...Would return: ANSIString('up, right, left, down')
Joins together strings in an iterable, using this string between each one.
def join(self, iterable): """ Joins together strings in an iterable, using this string between each one. NOTE: This should always be used for joining strings when ANSIStrings are involved. Otherwise color information will be discarded by python, due to details in the C implementation of unicode strings. Args: iterable (list of strings): A list of strings to join together Returns: result (ANSIString): A single string with all of the iterable's contents concatenated, with this string between each. For example: ANSIString(', ').join(['up', 'right', 'left', 'down']) ...Would return: ANSIString('up, right, left, down') """ result = ANSIString('') last_item = None for item in iterable: if last_item is not None: result += self._raw_string if not isinstance(item, ANSIString): item = ANSIString(item) result += item last_item = item return result
[ "def", "join", "(", "self", ",", "iterable", ")", ":", "result", "=", "ANSIString", "(", "''", ")", "last_item", "=", "None", "for", "item", "in", "iterable", ":", "if", "last_item", "is", "not", "None", ":", "result", "+=", "self", ".", "_raw_string", "if", "not", "isinstance", "(", "item", ",", "ANSIString", ")", ":", "item", "=", "ANSIString", "(", "item", ")", "result", "+=", "item", "last_item", "=", "item", "return", "result" ]
[ 1234, 4 ]
[ 1263, 21 ]
python
en
['en', 'error', 'th']
False
ANSIString._filler
(self, char, amount)
Generate a line of characters in a more efficient way than just adding ANSIStrings.
Generate a line of characters in a more efficient way than just adding ANSIStrings.
def _filler(self, char, amount): """ Generate a line of characters in a more efficient way than just adding ANSIStrings. """ if not isinstance(char, ANSIString): line = char * amount return ANSIString( char * amount, code_indexes=[], char_indexes=list(range(0, len(line))), clean_string=char) try: start = char._code_indexes[0] except IndexError: start = None end = char._char_indexes[0] prefix = char._raw_string[start:end] postfix = char._raw_string[end + 1:] line = char._clean_string * amount code_indexes = [i for i in range(0, len(prefix))] length = len(prefix) + len(line) code_indexes.extend([i for i in range(length, length + len(postfix))]) char_indexes = self._shifter(range(0, len(line)), len(prefix)) raw_string = prefix + line + postfix return ANSIString( raw_string, clean_string=line, char_indexes=char_indexes, code_indexes=code_indexes)
[ "def", "_filler", "(", "self", ",", "char", ",", "amount", ")", ":", "if", "not", "isinstance", "(", "char", ",", "ANSIString", ")", ":", "line", "=", "char", "*", "amount", "return", "ANSIString", "(", "char", "*", "amount", ",", "code_indexes", "=", "[", "]", ",", "char_indexes", "=", "list", "(", "range", "(", "0", ",", "len", "(", "line", ")", ")", ")", ",", "clean_string", "=", "char", ")", "try", ":", "start", "=", "char", ".", "_code_indexes", "[", "0", "]", "except", "IndexError", ":", "start", "=", "None", "end", "=", "char", ".", "_char_indexes", "[", "0", "]", "prefix", "=", "char", ".", "_raw_string", "[", "start", ":", "end", "]", "postfix", "=", "char", ".", "_raw_string", "[", "end", "+", "1", ":", "]", "line", "=", "char", ".", "_clean_string", "*", "amount", "code_indexes", "=", "[", "i", "for", "i", "in", "range", "(", "0", ",", "len", "(", "prefix", ")", ")", "]", "length", "=", "len", "(", "prefix", ")", "+", "len", "(", "line", ")", "code_indexes", ".", "extend", "(", "[", "i", "for", "i", "in", "range", "(", "length", ",", "length", "+", "len", "(", "postfix", ")", ")", "]", ")", "char_indexes", "=", "self", ".", "_shifter", "(", "range", "(", "0", ",", "len", "(", "line", ")", ")", ",", "len", "(", "prefix", ")", ")", "raw_string", "=", "prefix", "+", "line", "+", "postfix", "return", "ANSIString", "(", "raw_string", ",", "clean_string", "=", "line", ",", "char_indexes", "=", "char_indexes", ",", "code_indexes", "=", "code_indexes", ")" ]
[ 1265, 4 ]
[ 1291, 38 ]
python
en
['en', 'error', 'th']
False
ANSIString.center
(self, width, fillchar, _difference)
Center some text with some spaces padding both sides. Args: width (int): The target width of the output string. fillchar (str): A single character string to pad the output string with. Returns: result (ANSIString): A string padded on both ends with fillchar.
Center some text with some spaces padding both sides.
def center(self, width, fillchar, _difference): """ Center some text with some spaces padding both sides. Args: width (int): The target width of the output string. fillchar (str): A single character string to pad the output string with. Returns: result (ANSIString): A string padded on both ends with fillchar. """ remainder = _difference % 2 _difference /= 2 spacing = self._filler(fillchar, _difference) result = spacing + self + spacing + self._filler(fillchar, remainder) return result
[ "def", "center", "(", "self", ",", "width", ",", "fillchar", ",", "_difference", ")", ":", "remainder", "=", "_difference", "%", "2", "_difference", "/=", "2", "spacing", "=", "self", ".", "_filler", "(", "fillchar", ",", "_difference", ")", "result", "=", "spacing", "+", "self", "+", "spacing", "+", "self", ".", "_filler", "(", "fillchar", ",", "remainder", ")", "return", "result" ]
[ 1296, 4 ]
[ 1312, 21 ]
python
en
['en', 'error', 'th']
False
ANSIString.ljust
(self, width, fillchar, _difference)
Left justify some text. Args: width (int): The target width of the output string. fillchar (str): A single character string to pad the output string with. Returns: result (ANSIString): A string padded on the right with fillchar.
Left justify some text.
def ljust(self, width, fillchar, _difference): """ Left justify some text. Args: width (int): The target width of the output string. fillchar (str): A single character string to pad the output string with. Returns: result (ANSIString): A string padded on the right with fillchar. """ return self + self._filler(fillchar, _difference)
[ "def", "ljust", "(", "self", ",", "width", ",", "fillchar", ",", "_difference", ")", ":", "return", "self", "+", "self", ".", "_filler", "(", "fillchar", ",", "_difference", ")" ]
[ 1315, 4 ]
[ 1327, 57 ]
python
en
['en', 'error', 'th']
False
ANSIString.rjust
(self, width, fillchar, _difference)
Right justify some text. Args: width (int): The target width of the output string. fillchar (str): A single character string to pad the output string with. Returns: result (ANSIString): A string padded on the left with fillchar.
Right justify some text.
def rjust(self, width, fillchar, _difference): """ Right justify some text. Args: width (int): The target width of the output string. fillchar (str): A single character string to pad the output string with. Returns: result (ANSIString): A string padded on the left with fillchar. """ return self._filler(fillchar, _difference) + self
[ "def", "rjust", "(", "self", ",", "width", ",", "fillchar", ",", "_difference", ")", ":", "return", "self", ".", "_filler", "(", "fillchar", ",", "_difference", ")", "+", "self" ]
[ 1330, 4 ]
[ 1342, 57 ]
python
en
['en', 'error', 'th']
False
IndyErrorHandler.__init__
(self, message: str = None, error_cls: Type[LedgerError] = LedgerError)
Init the context manager.
Init the context manager.
def __init__(self, message: str = None, error_cls: Type[LedgerError] = LedgerError): """Init the context manager.""" self.error_cls = error_cls self.message = message
[ "def", "__init__", "(", "self", ",", "message", ":", "str", "=", "None", ",", "error_cls", ":", "Type", "[", "LedgerError", "]", "=", "LedgerError", ")", ":", "self", ".", "error_cls", "=", "error_cls", "self", ".", "message", "=", "message" ]
[ 46, 4 ]
[ 49, 30 ]
python
en
['en', 'en', 'en']
True
IndyErrorHandler.__enter__
(self)
Enter the context manager.
Enter the context manager.
def __enter__(self): """Enter the context manager.""" return self
[ "def", "__enter__", "(", "self", ")", ":", "return", "self" ]
[ 51, 4 ]
[ 53, 19 ]
python
en
['en', 'gl', 'en']
True
IndyErrorHandler.__exit__
(self, err_type, err_value, err_traceback)
Exit the context manager.
Exit the context manager.
def __exit__(self, err_type, err_value, err_traceback): """Exit the context manager.""" if err_type is IndyError: raise self.wrap_error( err_value, self.message, self.error_cls ) from err_value
[ "def", "__exit__", "(", "self", ",", "err_type", ",", "err_value", ",", "err_traceback", ")", ":", "if", "err_type", "is", "IndyError", ":", "raise", "self", ".", "wrap_error", "(", "err_value", ",", "self", ".", "message", ",", "self", ".", "error_cls", ")", "from", "err_value" ]
[ 55, 4 ]
[ 60, 28 ]
python
en
['en', 'en', 'en']
True
IndyErrorHandler.wrap_error
( cls, err_value: IndyError, message: str = None, error_cls: Type[LedgerError] = LedgerError, )
Create an instance of LedgerError from an IndyError.
Create an instance of LedgerError from an IndyError.
def wrap_error( cls, err_value: IndyError, message: str = None, error_cls: Type[LedgerError] = LedgerError, ) -> LedgerError: """Create an instance of LedgerError from an IndyError.""" err_msg = message or "Exception while performing ledger operation" indy_message = hasattr(err_value, "message") and err_value.message if indy_message: err_msg += f": {indy_message}" # TODO: may wish to attach backtrace when available return error_cls(err_msg)
[ "def", "wrap_error", "(", "cls", ",", "err_value", ":", "IndyError", ",", "message", ":", "str", "=", "None", ",", "error_cls", ":", "Type", "[", "LedgerError", "]", "=", "LedgerError", ",", ")", "->", "LedgerError", ":", "err_msg", "=", "message", "or", "\"Exception while performing ledger operation\"", "indy_message", "=", "hasattr", "(", "err_value", ",", "\"message\"", ")", "and", "err_value", ".", "message", "if", "indy_message", ":", "err_msg", "+=", "f\": {indy_message}\"", "# TODO: may wish to attach backtrace when available", "return", "error_cls", "(", "err_msg", ")" ]
[ 63, 4 ]
[ 75, 33 ]
python
en
['en', 'lb', 'en']
True
IndyLedger.__init__
( self, pool_name: str, wallet: BaseWallet, *, keepalive: int = 0, cache: BaseCache = None, cache_duration: int = 600, )
Initialize an IndyLedger instance. Args: pool_name: The Indy pool ledger configuration name wallet: IndyWallet instance keepalive: How many seconds to keep the ledger open cache: The cache instance to use cache_duration: The TTL for ledger cache entries
Initialize an IndyLedger instance.
def __init__( self, pool_name: str, wallet: BaseWallet, *, keepalive: int = 0, cache: BaseCache = None, cache_duration: int = 600, ): """ Initialize an IndyLedger instance. Args: pool_name: The Indy pool ledger configuration name wallet: IndyWallet instance keepalive: How many seconds to keep the ledger open cache: The cache instance to use cache_duration: The TTL for ledger cache entries """ self.logger = logging.getLogger(__name__) self.opened = False self.ref_count = 0 self.ref_lock = asyncio.Lock() self.keepalive = keepalive self.close_task: asyncio.Future = None self.cache = cache self.cache_duration = cache_duration self.wallet = wallet self.pool_handle = None self.pool_name = pool_name self.taa_acceptance = None self.taa_cache = None if wallet.WALLET_TYPE != "indy": raise LedgerConfigError("Wallet type is not 'indy'")
[ "def", "__init__", "(", "self", ",", "pool_name", ":", "str", ",", "wallet", ":", "BaseWallet", ",", "*", ",", "keepalive", ":", "int", "=", "0", ",", "cache", ":", "BaseCache", "=", "None", ",", "cache_duration", ":", "int", "=", "600", ",", ")", ":", "self", ".", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "self", ".", "opened", "=", "False", "self", ".", "ref_count", "=", "0", "self", ".", "ref_lock", "=", "asyncio", ".", "Lock", "(", ")", "self", ".", "keepalive", "=", "keepalive", "self", ".", "close_task", ":", "asyncio", ".", "Future", "=", "None", "self", ".", "cache", "=", "cache", "self", ".", "cache_duration", "=", "cache_duration", "self", ".", "wallet", "=", "wallet", "self", ".", "pool_handle", "=", "None", "self", ".", "pool_name", "=", "pool_name", "self", ".", "taa_acceptance", "=", "None", "self", ".", "taa_cache", "=", "None", "if", "wallet", ".", "WALLET_TYPE", "!=", "\"indy\"", ":", "raise", "LedgerConfigError", "(", "\"Wallet type is not 'indy'\"", ")" ]
[ 83, 4 ]
[ 118, 64 ]
python
en
['en', 'error', 'th']
False
IndyLedger.create_pool_config
( self, genesis_transactions: str, recreate: bool = False )
Create the pool ledger configuration.
Create the pool ledger configuration.
async def create_pool_config( self, genesis_transactions: str, recreate: bool = False ): """Create the pool ledger configuration.""" # indy-sdk requires a file but it's only used once to bootstrap # the connection so we take a string instead of create a tmp file txn_path = GENESIS_TRANSACTION_PATH with open(txn_path, "w") as genesis_file: genesis_file.write(genesis_transactions) pool_config = json.dumps({"genesis_txn": txn_path}) if await self.check_pool_config(): if recreate: self.logger.debug("Removing existing ledger config") await indy.pool.delete_pool_ledger_config(self.pool_name) else: raise LedgerConfigError( "Ledger pool configuration already exists: %s", self.pool_name ) self.logger.debug("Creating pool ledger config") with IndyErrorHandler( "Exception when creating pool ledger config", LedgerConfigError ): await indy.pool.create_pool_ledger_config(self.pool_name, pool_config)
[ "async", "def", "create_pool_config", "(", "self", ",", "genesis_transactions", ":", "str", ",", "recreate", ":", "bool", "=", "False", ")", ":", "# indy-sdk requires a file but it's only used once to bootstrap", "# the connection so we take a string instead of create a tmp file", "txn_path", "=", "GENESIS_TRANSACTION_PATH", "with", "open", "(", "txn_path", ",", "\"w\"", ")", "as", "genesis_file", ":", "genesis_file", ".", "write", "(", "genesis_transactions", ")", "pool_config", "=", "json", ".", "dumps", "(", "{", "\"genesis_txn\"", ":", "txn_path", "}", ")", "if", "await", "self", ".", "check_pool_config", "(", ")", ":", "if", "recreate", ":", "self", ".", "logger", ".", "debug", "(", "\"Removing existing ledger config\"", ")", "await", "indy", ".", "pool", ".", "delete_pool_ledger_config", "(", "self", ".", "pool_name", ")", "else", ":", "raise", "LedgerConfigError", "(", "\"Ledger pool configuration already exists: %s\"", ",", "self", ".", "pool_name", ")", "self", ".", "logger", ".", "debug", "(", "\"Creating pool ledger config\"", ")", "with", "IndyErrorHandler", "(", "\"Exception when creating pool ledger config\"", ",", "LedgerConfigError", ")", ":", "await", "indy", ".", "pool", ".", "create_pool_ledger_config", "(", "self", ".", "pool_name", ",", "pool_config", ")" ]
[ 120, 4 ]
[ 145, 82 ]
python
en
['en', 'sm', 'en']
True
IndyLedger.check_pool_config
(self)
Check if a pool config has been created.
Check if a pool config has been created.
async def check_pool_config(self) -> bool: """Check if a pool config has been created.""" pool_names = {cfg["pool"] for cfg in await indy.pool.list_pools()} return self.pool_name in pool_names
[ "async", "def", "check_pool_config", "(", "self", ")", "->", "bool", ":", "pool_names", "=", "{", "cfg", "[", "\"pool\"", "]", "for", "cfg", "in", "await", "indy", ".", "pool", ".", "list_pools", "(", ")", "}", "return", "self", ".", "pool_name", "in", "pool_names" ]
[ 147, 4 ]
[ 150, 43 ]
python
en
['en', 'en', 'en']
True
IndyLedger.open
(self)
Open the pool ledger, creating it if necessary.
Open the pool ledger, creating it if necessary.
async def open(self): """Open the pool ledger, creating it if necessary.""" # We only support proto ver 2 with IndyErrorHandler( "Exception when setting ledger protocol version", LedgerConfigError ): await indy.pool.set_protocol_version(2) with IndyErrorHandler("Exception when opening pool ledger", LedgerConfigError): self.pool_handle = await indy.pool.open_pool_ledger(self.pool_name, "{}") self.opened = True
[ "async", "def", "open", "(", "self", ")", ":", "# We only support proto ver 2", "with", "IndyErrorHandler", "(", "\"Exception when setting ledger protocol version\"", ",", "LedgerConfigError", ")", ":", "await", "indy", ".", "pool", ".", "set_protocol_version", "(", "2", ")", "with", "IndyErrorHandler", "(", "\"Exception when opening pool ledger\"", ",", "LedgerConfigError", ")", ":", "self", ".", "pool_handle", "=", "await", "indy", ".", "pool", ".", "open_pool_ledger", "(", "self", ".", "pool_name", ",", "\"{}\"", ")", "self", ".", "opened", "=", "True" ]
[ 152, 4 ]
[ 162, 26 ]
python
en
['en', 'en', 'en']
True
IndyLedger.close
(self)
Close the pool ledger.
Close the pool ledger.
async def close(self): """Close the pool ledger.""" if self.opened: with IndyErrorHandler("Exception when closing pool ledger"): await indy.pool.close_pool_ledger(self.pool_handle) self.pool_handle = None self.opened = False
[ "async", "def", "close", "(", "self", ")", ":", "if", "self", ".", "opened", ":", "with", "IndyErrorHandler", "(", "\"Exception when closing pool ledger\"", ")", ":", "await", "indy", ".", "pool", ".", "close_pool_ledger", "(", "self", ".", "pool_handle", ")", "self", ".", "pool_handle", "=", "None", "self", ".", "opened", "=", "False" ]
[ 164, 4 ]
[ 170, 31 ]
python
en
['en', 'sq', 'en']
True
IndyLedger._context_open
(self)
Open the wallet if necessary and increase the number of active references.
Open the wallet if necessary and increase the number of active references.
async def _context_open(self): """Open the wallet if necessary and increase the number of active references.""" async with self.ref_lock: if self.close_task: self.close_task.cancel() if not self.opened: self.logger.debug("Opening the pool ledger") await self.open() self.ref_count += 1
[ "async", "def", "_context_open", "(", "self", ")", ":", "async", "with", "self", ".", "ref_lock", ":", "if", "self", ".", "close_task", ":", "self", ".", "close_task", ".", "cancel", "(", ")", "if", "not", "self", ".", "opened", ":", "self", ".", "logger", ".", "debug", "(", "\"Opening the pool ledger\"", ")", "await", "self", ".", "open", "(", ")", "self", ".", "ref_count", "+=", "1" ]
[ 172, 4 ]
[ 180, 31 ]
python
en
['en', 'en', 'en']
True
IndyLedger._context_close
(self)
Release the wallet reference and schedule closing of the pool ledger.
Release the wallet reference and schedule closing of the pool ledger.
async def _context_close(self): """Release the wallet reference and schedule closing of the pool ledger.""" async def closer(timeout: int): """Close the pool ledger after a timeout.""" await asyncio.sleep(timeout) async with self.ref_lock: if not self.ref_count: self.logger.debug("Closing pool ledger after timeout") await self.close() async with self.ref_lock: self.ref_count -= 1 if not self.ref_count: if self.keepalive: self.close_task = asyncio.ensure_future(closer(self.keepalive)) else: await self.close()
[ "async", "def", "_context_close", "(", "self", ")", ":", "async", "def", "closer", "(", "timeout", ":", "int", ")", ":", "\"\"\"Close the pool ledger after a timeout.\"\"\"", "await", "asyncio", ".", "sleep", "(", "timeout", ")", "async", "with", "self", ".", "ref_lock", ":", "if", "not", "self", ".", "ref_count", ":", "self", ".", "logger", ".", "debug", "(", "\"Closing pool ledger after timeout\"", ")", "await", "self", ".", "close", "(", ")", "async", "with", "self", ".", "ref_lock", ":", "self", ".", "ref_count", "-=", "1", "if", "not", "self", ".", "ref_count", ":", "if", "self", ".", "keepalive", ":", "self", ".", "close_task", "=", "asyncio", ".", "ensure_future", "(", "closer", "(", "self", ".", "keepalive", ")", ")", "else", ":", "await", "self", ".", "close", "(", ")" ]
[ 182, 4 ]
[ 199, 38 ]
python
en
['en', 'en', 'en']
True
IndyLedger.__aenter__
(self)
Context manager entry. Returns: The current instance
Context manager entry.
async def __aenter__(self) -> "IndyLedger": """ Context manager entry. Returns: The current instance """ await self._context_open() return self
[ "async", "def", "__aenter__", "(", "self", ")", "->", "\"IndyLedger\"", ":", "await", "self", ".", "_context_open", "(", ")", "return", "self" ]
[ 201, 4 ]
[ 210, 19 ]
python
en
['en', 'error', 'th']
False
IndyLedger.__aexit__
(self, exc_type, exc, tb)
Context manager exit.
Context manager exit.
async def __aexit__(self, exc_type, exc, tb): """Context manager exit.""" await self._context_close()
[ "async", "def", "__aexit__", "(", "self", ",", "exc_type", ",", "exc", ",", "tb", ")", ":", "await", "self", ".", "_context_close", "(", ")" ]
[ 212, 4 ]
[ 214, 35 ]
python
en
['da', 'en', 'en']
True
IndyLedger._submit
( self, request_json: str, sign: bool = None, taa_accept: bool = False, public_did: str = "", )
Sign and submit request to ledger. Args: request_json: The json string to submit sign: whether or not to sign the request taa_accept: whether to apply TAA acceptance to the (signed, write) request public_did: override the public DID used to sign the request
Sign and submit request to ledger.
async def _submit( self, request_json: str, sign: bool = None, taa_accept: bool = False, public_did: str = "", ) -> str: """ Sign and submit request to ledger. Args: request_json: The json string to submit sign: whether or not to sign the request taa_accept: whether to apply TAA acceptance to the (signed, write) request public_did: override the public DID used to sign the request """ if not self.pool_handle: raise ClosedPoolError( "Cannot sign and submit request to closed pool {}".format( self.pool_name ) ) if (sign is None and public_did == "") or (sign and not public_did): did_info = await self.wallet.get_public_did() if did_info: public_did = did_info.did if public_did and sign is None: sign = True if sign: if not public_did: raise BadLedgerRequestError("Cannot sign request without a public DID") if taa_accept: acceptance = await self.get_latest_txn_author_acceptance() if acceptance: request_json = await ( indy.ledger.append_txn_author_agreement_acceptance_to_request( request_json, acceptance["text"], acceptance["version"], acceptance["digest"], acceptance["mechanism"], acceptance["time"], ) ) submit_op = indy.ledger.sign_and_submit_request( self.pool_handle, self.wallet.handle, public_did, request_json ) else: submit_op = indy.ledger.submit_request(self.pool_handle, request_json) with IndyErrorHandler( "Exception raised by ledger transaction", LedgerTransactionError ): request_result_json = await submit_op request_result = json.loads(request_result_json) operation = request_result.get("op", "") if operation in ("REQNACK", "REJECT"): raise LedgerTransactionError( f"Ledger rejected transaction request: {request_result['reason']}" ) elif operation == "REPLY": return request_result_json else: raise LedgerTransactionError( f"Unexpected operation code from ledger: {operation}" )
[ "async", "def", "_submit", "(", "self", ",", "request_json", ":", "str", ",", "sign", ":", "bool", "=", "None", ",", "taa_accept", ":", "bool", "=", "False", ",", "public_did", ":", "str", "=", "\"\"", ",", ")", "->", "str", ":", "if", "not", "self", ".", "pool_handle", ":", "raise", "ClosedPoolError", "(", "\"Cannot sign and submit request to closed pool {}\"", ".", "format", "(", "self", ".", "pool_name", ")", ")", "if", "(", "sign", "is", "None", "and", "public_did", "==", "\"\"", ")", "or", "(", "sign", "and", "not", "public_did", ")", ":", "did_info", "=", "await", "self", ".", "wallet", ".", "get_public_did", "(", ")", "if", "did_info", ":", "public_did", "=", "did_info", ".", "did", "if", "public_did", "and", "sign", "is", "None", ":", "sign", "=", "True", "if", "sign", ":", "if", "not", "public_did", ":", "raise", "BadLedgerRequestError", "(", "\"Cannot sign request without a public DID\"", ")", "if", "taa_accept", ":", "acceptance", "=", "await", "self", ".", "get_latest_txn_author_acceptance", "(", ")", "if", "acceptance", ":", "request_json", "=", "await", "(", "indy", ".", "ledger", ".", "append_txn_author_agreement_acceptance_to_request", "(", "request_json", ",", "acceptance", "[", "\"text\"", "]", ",", "acceptance", "[", "\"version\"", "]", ",", "acceptance", "[", "\"digest\"", "]", ",", "acceptance", "[", "\"mechanism\"", "]", ",", "acceptance", "[", "\"time\"", "]", ",", ")", ")", "submit_op", "=", "indy", ".", "ledger", ".", "sign_and_submit_request", "(", "self", ".", "pool_handle", ",", "self", ".", "wallet", ".", "handle", ",", "public_did", ",", "request_json", ")", "else", ":", "submit_op", "=", "indy", ".", "ledger", ".", "submit_request", "(", "self", ".", "pool_handle", ",", "request_json", ")", "with", "IndyErrorHandler", "(", "\"Exception raised by ledger transaction\"", ",", "LedgerTransactionError", ")", ":", "request_result_json", "=", "await", "submit_op", "request_result", "=", "json", ".", "loads", "(", "request_result_json", ")", "operation", "=", "request_result", ".", "get", "(", "\"op\"", ",", "\"\"", ")", "if", "operation", "in", "(", "\"REQNACK\"", ",", "\"REJECT\"", ")", ":", "raise", "LedgerTransactionError", "(", "f\"Ledger rejected transaction request: {request_result['reason']}\"", ")", "elif", "operation", "==", "\"REPLY\"", ":", "return", "request_result_json", "else", ":", "raise", "LedgerTransactionError", "(", "f\"Unexpected operation code from ledger: {operation}\"", ")" ]
[ 216, 4 ]
[ 290, 13 ]
python
en
['en', 'error', 'th']
False
IndyLedger.send_schema
( self, schema_name: str, schema_version: str, attribute_names: Sequence[str] )
Send schema to ledger. Args: schema_name: The schema name schema_version: The schema version attribute_names: A list of schema attributes
Send schema to ledger.
async def send_schema( self, schema_name: str, schema_version: str, attribute_names: Sequence[str] ): """ Send schema to ledger. Args: schema_name: The schema name schema_version: The schema version attribute_names: A list of schema attributes """ public_info = await self.wallet.get_public_did() if not public_info: raise BadLedgerRequestError("Cannot publish schema without a public DID") schema_id = await self.check_existing_schema( public_info.did, schema_name, schema_version, attribute_names ) if schema_id: self.logger.warning("Schema already exists on ledger. Returning ID.") else: with IndyErrorHandler("Exception when creating schema definition"): schema_id, schema_json = await indy.anoncreds.issuer_create_schema( public_info.did, schema_name, schema_version, json.dumps(attribute_names), ) with IndyErrorHandler("Exception when building schema request"): request_json = await indy.ledger.build_schema_request( public_info.did, schema_json ) try: await self._submit(request_json, public_did=public_info.did) except LedgerTransactionError as e: # Identify possible duplicate schema errors on indy-node < 1.9 and > 1.9 if ( "can have one and only one SCHEMA with name" in e.message or "UnauthorizedClientRequest" in e.message ): # handle potential race condition if multiple agents are publishing # the same schema simultaneously schema_id = await self.check_existing_schema( public_info.did, schema_name, schema_version, attribute_names ) if schema_id: self.logger.warning( "Schema already exists on ledger. Returning ID. Error: %s", e, ) else: raise schema_id_parts = schema_id.split(":") schema_tags = { "schema_id": schema_id, "schema_issuer_did": public_info.did, "schema_name": schema_id_parts[-2], "schema_version": schema_id_parts[-1], "epoch": str(int(time())), } record = StorageRecord(SCHEMA_SENT_RECORD_TYPE, schema_id, schema_tags,) storage = self.get_indy_storage() await storage.add_record(record) return schema_id
[ "async", "def", "send_schema", "(", "self", ",", "schema_name", ":", "str", ",", "schema_version", ":", "str", ",", "attribute_names", ":", "Sequence", "[", "str", "]", ")", ":", "public_info", "=", "await", "self", ".", "wallet", ".", "get_public_did", "(", ")", "if", "not", "public_info", ":", "raise", "BadLedgerRequestError", "(", "\"Cannot publish schema without a public DID\"", ")", "schema_id", "=", "await", "self", ".", "check_existing_schema", "(", "public_info", ".", "did", ",", "schema_name", ",", "schema_version", ",", "attribute_names", ")", "if", "schema_id", ":", "self", ".", "logger", ".", "warning", "(", "\"Schema already exists on ledger. Returning ID.\"", ")", "else", ":", "with", "IndyErrorHandler", "(", "\"Exception when creating schema definition\"", ")", ":", "schema_id", ",", "schema_json", "=", "await", "indy", ".", "anoncreds", ".", "issuer_create_schema", "(", "public_info", ".", "did", ",", "schema_name", ",", "schema_version", ",", "json", ".", "dumps", "(", "attribute_names", ")", ",", ")", "with", "IndyErrorHandler", "(", "\"Exception when building schema request\"", ")", ":", "request_json", "=", "await", "indy", ".", "ledger", ".", "build_schema_request", "(", "public_info", ".", "did", ",", "schema_json", ")", "try", ":", "await", "self", ".", "_submit", "(", "request_json", ",", "public_did", "=", "public_info", ".", "did", ")", "except", "LedgerTransactionError", "as", "e", ":", "# Identify possible duplicate schema errors on indy-node < 1.9 and > 1.9", "if", "(", "\"can have one and only one SCHEMA with name\"", "in", "e", ".", "message", "or", "\"UnauthorizedClientRequest\"", "in", "e", ".", "message", ")", ":", "# handle potential race condition if multiple agents are publishing", "# the same schema simultaneously", "schema_id", "=", "await", "self", ".", "check_existing_schema", "(", "public_info", ".", "did", ",", "schema_name", ",", "schema_version", ",", "attribute_names", ")", "if", "schema_id", ":", "self", ".", "logger", ".", "warning", "(", "\"Schema already exists on ledger. Returning ID. Error: %s\"", ",", "e", ",", ")", "else", ":", "raise", "schema_id_parts", "=", "schema_id", ".", "split", "(", "\":\"", ")", "schema_tags", "=", "{", "\"schema_id\"", ":", "schema_id", ",", "\"schema_issuer_did\"", ":", "public_info", ".", "did", ",", "\"schema_name\"", ":", "schema_id_parts", "[", "-", "2", "]", ",", "\"schema_version\"", ":", "schema_id_parts", "[", "-", "1", "]", ",", "\"epoch\"", ":", "str", "(", "int", "(", "time", "(", ")", ")", ")", ",", "}", "record", "=", "StorageRecord", "(", "SCHEMA_SENT_RECORD_TYPE", ",", "schema_id", ",", "schema_tags", ",", ")", "storage", "=", "self", ".", "get_indy_storage", "(", ")", "await", "storage", ".", "add_record", "(", "record", ")", "return", "schema_id" ]
[ 292, 4 ]
[ 361, 24 ]
python
en
['en', 'error', 'th']
False
IndyLedger.check_existing_schema
( self, public_did: str, schema_name: str, schema_version: str, attribute_names: Sequence[str], )
Check if a schema has already been published.
Check if a schema has already been published.
async def check_existing_schema( self, public_did: str, schema_name: str, schema_version: str, attribute_names: Sequence[str], ) -> str: """Check if a schema has already been published.""" fetch_schema_id = f"{public_did}:2:{schema_name}:{schema_version}" schema = await self.fetch_schema_by_id(fetch_schema_id) if schema: fetched_attrs = schema["attrNames"].copy() fetched_attrs.sort() cmp_attrs = list(attribute_names) cmp_attrs.sort() if fetched_attrs != cmp_attrs: raise LedgerTransactionError( "Schema already exists on ledger, but attributes do not match: " + f"{schema_name}:{schema_version} {fetched_attrs} != {cmp_attrs}" ) return fetch_schema_id
[ "async", "def", "check_existing_schema", "(", "self", ",", "public_did", ":", "str", ",", "schema_name", ":", "str", ",", "schema_version", ":", "str", ",", "attribute_names", ":", "Sequence", "[", "str", "]", ",", ")", "->", "str", ":", "fetch_schema_id", "=", "f\"{public_did}:2:{schema_name}:{schema_version}\"", "schema", "=", "await", "self", ".", "fetch_schema_by_id", "(", "fetch_schema_id", ")", "if", "schema", ":", "fetched_attrs", "=", "schema", "[", "\"attrNames\"", "]", ".", "copy", "(", ")", "fetched_attrs", ".", "sort", "(", ")", "cmp_attrs", "=", "list", "(", "attribute_names", ")", "cmp_attrs", ".", "sort", "(", ")", "if", "fetched_attrs", "!=", "cmp_attrs", ":", "raise", "LedgerTransactionError", "(", "\"Schema already exists on ledger, but attributes do not match: \"", "+", "f\"{schema_name}:{schema_version} {fetched_attrs} != {cmp_attrs}\"", ")", "return", "fetch_schema_id" ]
[ 363, 4 ]
[ 383, 34 ]
python
en
['en', 'en', 'en']
True
IndyLedger.get_schema
(self, schema_id: str)
Get a schema from the cache if available, otherwise fetch from the ledger. Args: schema_id: The schema id (or stringified sequence number) to retrieve
Get a schema from the cache if available, otherwise fetch from the ledger.
async def get_schema(self, schema_id: str): """ Get a schema from the cache if available, otherwise fetch from the ledger. Args: schema_id: The schema id (or stringified sequence number) to retrieve """ if self.cache: result = await self.cache.get(f"schema::{schema_id}") if result: return result if schema_id.isdigit(): return await self.fetch_schema_by_seq_no(int(schema_id)) else: return await self.fetch_schema_by_id(schema_id)
[ "async", "def", "get_schema", "(", "self", ",", "schema_id", ":", "str", ")", ":", "if", "self", ".", "cache", ":", "result", "=", "await", "self", ".", "cache", ".", "get", "(", "f\"schema::{schema_id}\"", ")", "if", "result", ":", "return", "result", "if", "schema_id", ".", "isdigit", "(", ")", ":", "return", "await", "self", ".", "fetch_schema_by_seq_no", "(", "int", "(", "schema_id", ")", ")", "else", ":", "return", "await", "self", ".", "fetch_schema_by_id", "(", "schema_id", ")" ]
[ 385, 4 ]
[ 401, 59 ]
python
en
['en', 'error', 'th']
False
IndyLedger.fetch_schema_by_id
(self, schema_id: str)
Get schema from ledger. Args: schema_id: The schema id (or stringified sequence number) to retrieve Returns: Indy schema dict
Get schema from ledger.
async def fetch_schema_by_id(self, schema_id: str): """ Get schema from ledger. Args: schema_id: The schema id (or stringified sequence number) to retrieve Returns: Indy schema dict """ public_info = await self.wallet.get_public_did() public_did = public_info.did if public_info else None with IndyErrorHandler("Exception when building schema request"): request_json = await indy.ledger.build_get_schema_request( public_did, schema_id ) response_json = await self._submit(request_json, public_did=public_did) response = json.loads(response_json) if not response["result"]["seqNo"]: # schema not found return None with IndyErrorHandler("Exception when parsing schema response"): _, parsed_schema_json = await indy.ledger.parse_get_schema_response( response_json ) parsed_response = json.loads(parsed_schema_json) if parsed_response and self.cache: await self.cache.set( [f"schema::{schema_id}", f"schema::{response['result']['seqNo']}"], parsed_response, self.cache_duration, ) return parsed_response
[ "async", "def", "fetch_schema_by_id", "(", "self", ",", "schema_id", ":", "str", ")", ":", "public_info", "=", "await", "self", ".", "wallet", ".", "get_public_did", "(", ")", "public_did", "=", "public_info", ".", "did", "if", "public_info", "else", "None", "with", "IndyErrorHandler", "(", "\"Exception when building schema request\"", ")", ":", "request_json", "=", "await", "indy", ".", "ledger", ".", "build_get_schema_request", "(", "public_did", ",", "schema_id", ")", "response_json", "=", "await", "self", ".", "_submit", "(", "request_json", ",", "public_did", "=", "public_did", ")", "response", "=", "json", ".", "loads", "(", "response_json", ")", "if", "not", "response", "[", "\"result\"", "]", "[", "\"seqNo\"", "]", ":", "# schema not found", "return", "None", "with", "IndyErrorHandler", "(", "\"Exception when parsing schema response\"", ")", ":", "_", ",", "parsed_schema_json", "=", "await", "indy", ".", "ledger", ".", "parse_get_schema_response", "(", "response_json", ")", "parsed_response", "=", "json", ".", "loads", "(", "parsed_schema_json", ")", "if", "parsed_response", "and", "self", ".", "cache", ":", "await", "self", ".", "cache", ".", "set", "(", "[", "f\"schema::{schema_id}\"", ",", "f\"schema::{response['result']['seqNo']}\"", "]", ",", "parsed_response", ",", "self", ".", "cache_duration", ",", ")", "return", "parsed_response" ]
[ 403, 4 ]
[ 442, 30 ]
python
en
['en', 'error', 'th']
False
IndyLedger.fetch_schema_by_seq_no
(self, seq_no: int)
Fetch a schema by its sequence number. Args: seq_no: schema ledger sequence number Returns: Indy schema dict
Fetch a schema by its sequence number.
async def fetch_schema_by_seq_no(self, seq_no: int): """ Fetch a schema by its sequence number. Args: seq_no: schema ledger sequence number Returns: Indy schema dict """ # get txn by sequence number, retrieve schema identifier components request_json = await indy.ledger.build_get_txn_request( None, None, seq_no=seq_no ) response = json.loads(await self._submit(request_json)) # transaction data format assumes node protocol >= 1.4 (circa 2018-07) data_txn = (response["result"].get("data", {}) or {}).get("txn", {}) if data_txn.get("type", None) == "101": # marks indy-sdk schema txn type (origin_did, name, version) = ( data_txn["metadata"]["from"], data_txn["data"]["data"]["name"], data_txn["data"]["data"]["version"], ) schema_id = f"{origin_did}:2:{name}:{version}" return await self.get_schema(schema_id) raise LedgerTransactionError( f"Could not get schema from ledger for seq no {seq_no}" )
[ "async", "def", "fetch_schema_by_seq_no", "(", "self", ",", "seq_no", ":", "int", ")", ":", "# get txn by sequence number, retrieve schema identifier components", "request_json", "=", "await", "indy", ".", "ledger", ".", "build_get_txn_request", "(", "None", ",", "None", ",", "seq_no", "=", "seq_no", ")", "response", "=", "json", ".", "loads", "(", "await", "self", ".", "_submit", "(", "request_json", ")", ")", "# transaction data format assumes node protocol >= 1.4 (circa 2018-07)", "data_txn", "=", "(", "response", "[", "\"result\"", "]", ".", "get", "(", "\"data\"", ",", "{", "}", ")", "or", "{", "}", ")", ".", "get", "(", "\"txn\"", ",", "{", "}", ")", "if", "data_txn", ".", "get", "(", "\"type\"", ",", "None", ")", "==", "\"101\"", ":", "# marks indy-sdk schema txn type", "(", "origin_did", ",", "name", ",", "version", ")", "=", "(", "data_txn", "[", "\"metadata\"", "]", "[", "\"from\"", "]", ",", "data_txn", "[", "\"data\"", "]", "[", "\"data\"", "]", "[", "\"name\"", "]", ",", "data_txn", "[", "\"data\"", "]", "[", "\"data\"", "]", "[", "\"version\"", "]", ",", ")", "schema_id", "=", "f\"{origin_did}:2:{name}:{version}\"", "return", "await", "self", ".", "get_schema", "(", "schema_id", ")", "raise", "LedgerTransactionError", "(", "f\"Could not get schema from ledger for seq no {seq_no}\"", ")" ]
[ 444, 4 ]
[ 474, 9 ]
python
en
['en', 'error', 'th']
False
IndyLedger.send_credential_definition
(self, schema_id: str, tag: str = None)
Send credential definition to ledger and store relevant key matter in wallet. Args: schema_id: The schema id of the schema to create cred def for tag: Option tag to distinguish multiple credential definitions
Send credential definition to ledger and store relevant key matter in wallet.
async def send_credential_definition(self, schema_id: str, tag: str = None): """ Send credential definition to ledger and store relevant key matter in wallet. Args: schema_id: The schema id of the schema to create cred def for tag: Option tag to distinguish multiple credential definitions """ public_info = await self.wallet.get_public_did() if not public_info: raise BadLedgerRequestError( "Cannot publish credential definition without a public DID" ) schema = await self.get_schema(schema_id) # TODO: add support for tag, sig type, and config try: ( credential_definition_id, credential_definition_json, ) = await indy.anoncreds.issuer_create_and_store_credential_def( self.wallet.handle, public_info.did, json.dumps(schema), tag or "default", "CL", json.dumps({"support_revocation": False}), ) # If the cred def already exists in the wallet, we need some way of obtaining # that cred def id (from schema id passed) since we can now assume we can use # it in future operations. except IndyError as error: if error.error_code == ErrorCode.AnoncredsCredDefAlreadyExistsError: try: credential_definition_id = re.search( r"\w*:3:CL:(([1-9][0-9]*)|(.{21,22}:2:.+:[0-9.]+)):\w*", error.message, ).group(0) # The regex search failed so let the error bubble up except AttributeError: raise LedgerError( "Previous credential definition exists, but ID could " "not be extracted" ) else: raise IndyErrorHandler.wrap_error(error) from error # check if the cred def already exists on the ledger cred_def = json.loads(credential_definition_json) exist_def = await self.fetch_credential_definition(credential_definition_id) if exist_def: if exist_def["value"] != cred_def["value"]: self.logger.warning( "Ledger definition of cred def %s will be replaced", credential_definition_id, ) exist_def = None if not exist_def: with IndyErrorHandler("Exception when building cred def request"): request_json = await indy.ledger.build_cred_def_request( public_info.did, credential_definition_json ) await self._submit(request_json, True, public_did=public_info.did) else: self.logger.warning( "Ledger definition of cred def %s already exists", credential_definition_id, ) schema_id_parts = schema_id.split(":") cred_def_tags = { "schema_id": schema_id, "schema_issuer_did": schema_id_parts[0], "schema_name": schema_id_parts[-2], "schema_version": schema_id_parts[-1], "issuer_did": public_info.did, "cred_def_id": credential_definition_id, "epoch": str(int(time())), } record = StorageRecord( CRED_DEF_SENT_RECORD_TYPE, credential_definition_id, cred_def_tags, ) storage = self.get_indy_storage() await storage.add_record(record) return credential_definition_id
[ "async", "def", "send_credential_definition", "(", "self", ",", "schema_id", ":", "str", ",", "tag", ":", "str", "=", "None", ")", ":", "public_info", "=", "await", "self", ".", "wallet", ".", "get_public_did", "(", ")", "if", "not", "public_info", ":", "raise", "BadLedgerRequestError", "(", "\"Cannot publish credential definition without a public DID\"", ")", "schema", "=", "await", "self", ".", "get_schema", "(", "schema_id", ")", "# TODO: add support for tag, sig type, and config", "try", ":", "(", "credential_definition_id", ",", "credential_definition_json", ",", ")", "=", "await", "indy", ".", "anoncreds", ".", "issuer_create_and_store_credential_def", "(", "self", ".", "wallet", ".", "handle", ",", "public_info", ".", "did", ",", "json", ".", "dumps", "(", "schema", ")", ",", "tag", "or", "\"default\"", ",", "\"CL\"", ",", "json", ".", "dumps", "(", "{", "\"support_revocation\"", ":", "False", "}", ")", ",", ")", "# If the cred def already exists in the wallet, we need some way of obtaining", "# that cred def id (from schema id passed) since we can now assume we can use", "# it in future operations.", "except", "IndyError", "as", "error", ":", "if", "error", ".", "error_code", "==", "ErrorCode", ".", "AnoncredsCredDefAlreadyExistsError", ":", "try", ":", "credential_definition_id", "=", "re", ".", "search", "(", "r\"\\w*:3:CL:(([1-9][0-9]*)|(.{21,22}:2:.+:[0-9.]+)):\\w*\"", ",", "error", ".", "message", ",", ")", ".", "group", "(", "0", ")", "# The regex search failed so let the error bubble up", "except", "AttributeError", ":", "raise", "LedgerError", "(", "\"Previous credential definition exists, but ID could \"", "\"not be extracted\"", ")", "else", ":", "raise", "IndyErrorHandler", ".", "wrap_error", "(", "error", ")", "from", "error", "# check if the cred def already exists on the ledger", "cred_def", "=", "json", ".", "loads", "(", "credential_definition_json", ")", "exist_def", "=", "await", "self", ".", "fetch_credential_definition", "(", "credential_definition_id", ")", "if", "exist_def", ":", "if", "exist_def", "[", "\"value\"", "]", "!=", "cred_def", "[", "\"value\"", "]", ":", "self", ".", "logger", ".", "warning", "(", "\"Ledger definition of cred def %s will be replaced\"", ",", "credential_definition_id", ",", ")", "exist_def", "=", "None", "if", "not", "exist_def", ":", "with", "IndyErrorHandler", "(", "\"Exception when building cred def request\"", ")", ":", "request_json", "=", "await", "indy", ".", "ledger", ".", "build_cred_def_request", "(", "public_info", ".", "did", ",", "credential_definition_json", ")", "await", "self", ".", "_submit", "(", "request_json", ",", "True", ",", "public_did", "=", "public_info", ".", "did", ")", "else", ":", "self", ".", "logger", ".", "warning", "(", "\"Ledger definition of cred def %s already exists\"", ",", "credential_definition_id", ",", ")", "schema_id_parts", "=", "schema_id", ".", "split", "(", "\":\"", ")", "cred_def_tags", "=", "{", "\"schema_id\"", ":", "schema_id", ",", "\"schema_issuer_did\"", ":", "schema_id_parts", "[", "0", "]", ",", "\"schema_name\"", ":", "schema_id_parts", "[", "-", "2", "]", ",", "\"schema_version\"", ":", "schema_id_parts", "[", "-", "1", "]", ",", "\"issuer_did\"", ":", "public_info", ".", "did", ",", "\"cred_def_id\"", ":", "credential_definition_id", ",", "\"epoch\"", ":", "str", "(", "int", "(", "time", "(", ")", ")", ")", ",", "}", "record", "=", "StorageRecord", "(", "CRED_DEF_SENT_RECORD_TYPE", ",", "credential_definition_id", ",", "cred_def_tags", ",", ")", "storage", "=", "self", ".", "get_indy_storage", "(", ")", "await", "storage", ".", "add_record", "(", "record", ")", "return", "credential_definition_id" ]
[ 476, 4 ]
[ 565, 39 ]
python
en
['en', 'error', 'th']
False
IndyLedger.get_credential_definition
(self, credential_definition_id: str)
Get a credential definition from the cache if available, otherwise the ledger. Args: credential_definition_id: The schema id of the schema to fetch cred def for
Get a credential definition from the cache if available, otherwise the ledger.
async def get_credential_definition(self, credential_definition_id: str): """ Get a credential definition from the cache if available, otherwise the ledger. Args: credential_definition_id: The schema id of the schema to fetch cred def for """ if self.cache: result = await self.cache.get( f"credential_definition::{credential_definition_id}" ) if result: return result return await self.fetch_credential_definition(credential_definition_id)
[ "async", "def", "get_credential_definition", "(", "self", ",", "credential_definition_id", ":", "str", ")", ":", "if", "self", ".", "cache", ":", "result", "=", "await", "self", ".", "cache", ".", "get", "(", "f\"credential_definition::{credential_definition_id}\"", ")", "if", "result", ":", "return", "result", "return", "await", "self", ".", "fetch_credential_definition", "(", "credential_definition_id", ")" ]
[ 567, 4 ]
[ 582, 79 ]
python
en
['en', 'error', 'th']
False
IndyLedger.fetch_credential_definition
(self, credential_definition_id: str)
Get a credential definition from the ledger by id. Args: credential_definition_id: The cred def id of the cred def to fetch
Get a credential definition from the ledger by id.
async def fetch_credential_definition(self, credential_definition_id: str): """ Get a credential definition from the ledger by id. Args: credential_definition_id: The cred def id of the cred def to fetch """ public_info = await self.wallet.get_public_did() public_did = public_info.did if public_info else None with IndyErrorHandler("Exception when building cred def request"): request_json = await indy.ledger.build_get_cred_def_request( public_did, credential_definition_id ) response_json = await self._submit(request_json, public_did=public_did) with IndyErrorHandler("Exception when parsing cred def response"): try: ( _, parsed_credential_definition_json, ) = await indy.ledger.parse_get_cred_def_response(response_json) parsed_response = json.loads(parsed_credential_definition_json) except IndyError as error: if error.error_code == ErrorCode.LedgerNotFound: parsed_response = None if parsed_response and self.cache: await self.cache.set( f"credential_definition::{credential_definition_id}", parsed_response, self.cache_duration, ) return parsed_response
[ "async", "def", "fetch_credential_definition", "(", "self", ",", "credential_definition_id", ":", "str", ")", ":", "public_info", "=", "await", "self", ".", "wallet", ".", "get_public_did", "(", ")", "public_did", "=", "public_info", ".", "did", "if", "public_info", "else", "None", "with", "IndyErrorHandler", "(", "\"Exception when building cred def request\"", ")", ":", "request_json", "=", "await", "indy", ".", "ledger", ".", "build_get_cred_def_request", "(", "public_did", ",", "credential_definition_id", ")", "response_json", "=", "await", "self", ".", "_submit", "(", "request_json", ",", "public_did", "=", "public_did", ")", "with", "IndyErrorHandler", "(", "\"Exception when parsing cred def response\"", ")", ":", "try", ":", "(", "_", ",", "parsed_credential_definition_json", ",", ")", "=", "await", "indy", ".", "ledger", ".", "parse_get_cred_def_response", "(", "response_json", ")", "parsed_response", "=", "json", ".", "loads", "(", "parsed_credential_definition_json", ")", "except", "IndyError", "as", "error", ":", "if", "error", ".", "error_code", "==", "ErrorCode", ".", "LedgerNotFound", ":", "parsed_response", "=", "None", "if", "parsed_response", "and", "self", ".", "cache", ":", "await", "self", ".", "cache", ".", "set", "(", "f\"credential_definition::{credential_definition_id}\"", ",", "parsed_response", ",", "self", ".", "cache_duration", ",", ")", "return", "parsed_response" ]
[ 584, 4 ]
[ 621, 30 ]
python
en
['en', 'error', 'th']
False
IndyLedger.credential_definition_id2schema_id
(self, credential_definition_id)
From a credential definition, get the identifier for its schema. Args: credential_definition_id: The identifier of the credential definition from which to identify a schema
From a credential definition, get the identifier for its schema.
async def credential_definition_id2schema_id(self, credential_definition_id): """ From a credential definition, get the identifier for its schema. Args: credential_definition_id: The identifier of the credential definition from which to identify a schema """ # scrape schema id or sequence number from cred def id tokens = credential_definition_id.split(":") if len(tokens) == 8: # node protocol >= 1.4: cred def id has 5 or 8 tokens return ":".join(tokens[3:7]) # schema id spans 0-based positions 3-6 # get txn by sequence number, retrieve schema identifier components seq_no = tokens[3] return (await self.get_schema(seq_no))["id"]
[ "async", "def", "credential_definition_id2schema_id", "(", "self", ",", "credential_definition_id", ")", ":", "# scrape schema id or sequence number from cred def id", "tokens", "=", "credential_definition_id", ".", "split", "(", "\":\"", ")", "if", "len", "(", "tokens", ")", "==", "8", ":", "# node protocol >= 1.4: cred def id has 5 or 8 tokens", "return", "\":\"", ".", "join", "(", "tokens", "[", "3", ":", "7", "]", ")", "# schema id spans 0-based positions 3-6", "# get txn by sequence number, retrieve schema identifier components", "seq_no", "=", "tokens", "[", "3", "]", "return", "(", "await", "self", ".", "get_schema", "(", "seq_no", ")", ")", "[", "\"id\"", "]" ]
[ 623, 4 ]
[ 639, 52 ]
python
en
['en', 'error', 'th']
False
IndyLedger.get_key_for_did
(self, did: str)
Fetch the verkey for a ledger DID. Args: did: The DID to look up on the ledger or in the cache
Fetch the verkey for a ledger DID.
async def get_key_for_did(self, did: str) -> str: """Fetch the verkey for a ledger DID. Args: did: The DID to look up on the ledger or in the cache """ nym = self.did_to_nym(did) public_info = await self.wallet.get_public_did() public_did = public_info.did if public_info else None with IndyErrorHandler("Exception when building nym request"): request_json = await indy.ledger.build_get_nym_request(public_did, nym) response_json = await self._submit(request_json, public_did=public_did) data_json = (json.loads(response_json))["result"]["data"] return json.loads(data_json)["verkey"]
[ "async", "def", "get_key_for_did", "(", "self", ",", "did", ":", "str", ")", "->", "str", ":", "nym", "=", "self", ".", "did_to_nym", "(", "did", ")", "public_info", "=", "await", "self", ".", "wallet", ".", "get_public_did", "(", ")", "public_did", "=", "public_info", ".", "did", "if", "public_info", "else", "None", "with", "IndyErrorHandler", "(", "\"Exception when building nym request\"", ")", ":", "request_json", "=", "await", "indy", ".", "ledger", ".", "build_get_nym_request", "(", "public_did", ",", "nym", ")", "response_json", "=", "await", "self", ".", "_submit", "(", "request_json", ",", "public_did", "=", "public_did", ")", "data_json", "=", "(", "json", ".", "loads", "(", "response_json", ")", ")", "[", "\"result\"", "]", "[", "\"data\"", "]", "return", "json", ".", "loads", "(", "data_json", ")", "[", "\"verkey\"", "]" ]
[ 641, 4 ]
[ 654, 46 ]
python
en
['en', 'en', 'en']
True
IndyLedger.get_endpoint_for_did
(self, did: str)
Fetch the endpoint for a ledger DID. Args: did: The DID to look up on the ledger or in the cache
Fetch the endpoint for a ledger DID.
async def get_endpoint_for_did(self, did: str) -> str: """Fetch the endpoint for a ledger DID. Args: did: The DID to look up on the ledger or in the cache """ nym = self.did_to_nym(did) public_info = await self.wallet.get_public_did() public_did = public_info.did if public_info else None with IndyErrorHandler("Exception when building attribute request"): request_json = await indy.ledger.build_get_attrib_request( public_did, nym, "endpoint", None, None ) response_json = await self._submit(request_json, public_did=public_did) endpoint_json = json.loads(response_json)["result"]["data"] if endpoint_json: address = json.loads(endpoint_json)["endpoint"].get("endpoint", None) else: address = None return address
[ "async", "def", "get_endpoint_for_did", "(", "self", ",", "did", ":", "str", ")", "->", "str", ":", "nym", "=", "self", ".", "did_to_nym", "(", "did", ")", "public_info", "=", "await", "self", ".", "wallet", ".", "get_public_did", "(", ")", "public_did", "=", "public_info", ".", "did", "if", "public_info", "else", "None", "with", "IndyErrorHandler", "(", "\"Exception when building attribute request\"", ")", ":", "request_json", "=", "await", "indy", ".", "ledger", ".", "build_get_attrib_request", "(", "public_did", ",", "nym", ",", "\"endpoint\"", ",", "None", ",", "None", ")", "response_json", "=", "await", "self", ".", "_submit", "(", "request_json", ",", "public_did", "=", "public_did", ")", "endpoint_json", "=", "json", ".", "loads", "(", "response_json", ")", "[", "\"result\"", "]", "[", "\"data\"", "]", "if", "endpoint_json", ":", "address", "=", "json", ".", "loads", "(", "endpoint_json", ")", "[", "\"endpoint\"", "]", ".", "get", "(", "\"endpoint\"", ",", "None", ")", "else", ":", "address", "=", "None", "return", "address" ]
[ 656, 4 ]
[ 676, 22 ]
python
en
['en', 'en', 'en']
True
IndyLedger.update_endpoint_for_did
(self, did: str, endpoint: str)
Check and update the endpoint on the ledger. Args: did: The ledger DID endpoint: The endpoint address transport_vk: The endpoint transport verkey
Check and update the endpoint on the ledger.
async def update_endpoint_for_did(self, did: str, endpoint: str) -> bool: """Check and update the endpoint on the ledger. Args: did: The ledger DID endpoint: The endpoint address transport_vk: The endpoint transport verkey """ exist_endpoint = await self.get_endpoint_for_did(did) if exist_endpoint != endpoint: nym = self.did_to_nym(did) attr_json = json.dumps({"endpoint": {"endpoint": endpoint}}) with IndyErrorHandler("Exception when building attribute request"): request_json = await indy.ledger.build_attrib_request( nym, nym, None, attr_json, None ) await self._submit(request_json, True, True) return True return False
[ "async", "def", "update_endpoint_for_did", "(", "self", ",", "did", ":", "str", ",", "endpoint", ":", "str", ")", "->", "bool", ":", "exist_endpoint", "=", "await", "self", ".", "get_endpoint_for_did", "(", "did", ")", "if", "exist_endpoint", "!=", "endpoint", ":", "nym", "=", "self", ".", "did_to_nym", "(", "did", ")", "attr_json", "=", "json", ".", "dumps", "(", "{", "\"endpoint\"", ":", "{", "\"endpoint\"", ":", "endpoint", "}", "}", ")", "with", "IndyErrorHandler", "(", "\"Exception when building attribute request\"", ")", ":", "request_json", "=", "await", "indy", ".", "ledger", ".", "build_attrib_request", "(", "nym", ",", "nym", ",", "None", ",", "attr_json", ",", "None", ")", "await", "self", ".", "_submit", "(", "request_json", ",", "True", ",", "True", ")", "return", "True", "return", "False" ]
[ 678, 4 ]
[ 696, 20 ]
python
en
['en', 'en', 'en']
True
IndyLedger.register_nym
( self, did: str, verkey: str, alias: str = None, role: str = None )
Register a nym on the ledger. Args: did: DID to register on the ledger. verkey: The verification key of the keypair. alias: Human-friendly alias to assign to the DID. role: For permissioned ledgers, what role should the new DID have.
Register a nym on the ledger.
async def register_nym( self, did: str, verkey: str, alias: str = None, role: str = None ): """ Register a nym on the ledger. Args: did: DID to register on the ledger. verkey: The verification key of the keypair. alias: Human-friendly alias to assign to the DID. role: For permissioned ledgers, what role should the new DID have. """ public_info = await self.wallet.get_public_did() public_did = public_info.did if public_info else None r = await indy.ledger.build_nym_request(public_did, did, verkey, alias, role) await self._submit(r, True, True, public_did=public_did)
[ "async", "def", "register_nym", "(", "self", ",", "did", ":", "str", ",", "verkey", ":", "str", ",", "alias", ":", "str", "=", "None", ",", "role", ":", "str", "=", "None", ")", ":", "public_info", "=", "await", "self", ".", "wallet", ".", "get_public_did", "(", ")", "public_did", "=", "public_info", ".", "did", "if", "public_info", "else", "None", "r", "=", "await", "indy", ".", "ledger", ".", "build_nym_request", "(", "public_did", ",", "did", ",", "verkey", ",", "alias", ",", "role", ")", "await", "self", ".", "_submit", "(", "r", ",", "True", ",", "True", ",", "public_did", "=", "public_did", ")" ]
[ 698, 4 ]
[ 713, 64 ]
python
en
['en', 'error', 'th']
False
IndyLedger.nym_to_did
(self, nym: str)
Format a nym with the ledger's DID prefix.
Format a nym with the ledger's DID prefix.
def nym_to_did(self, nym: str) -> str: """Format a nym with the ledger's DID prefix.""" if nym: # remove any existing prefix nym = self.did_to_nym(nym) return f"did:sov:{nym}"
[ "def", "nym_to_did", "(", "self", ",", "nym", ":", "str", ")", "->", "str", ":", "if", "nym", ":", "# remove any existing prefix", "nym", "=", "self", ".", "did_to_nym", "(", "nym", ")", "return", "f\"did:sov:{nym}\"" ]
[ 715, 4 ]
[ 720, 35 ]
python
en
['en', 'en', 'en']
True
IndyLedger.get_txn_author_agreement
(self, reload: bool = False)
Get the current transaction author agreement, fetching it if necessary.
Get the current transaction author agreement, fetching it if necessary.
async def get_txn_author_agreement(self, reload: bool = False): """Get the current transaction author agreement, fetching it if necessary.""" if not self.taa_cache or reload: self.taa_cache = await self.fetch_txn_author_agreement() return self.taa_cache
[ "async", "def", "get_txn_author_agreement", "(", "self", ",", "reload", ":", "bool", "=", "False", ")", ":", "if", "not", "self", ".", "taa_cache", "or", "reload", ":", "self", ".", "taa_cache", "=", "await", "self", ".", "fetch_txn_author_agreement", "(", ")", "return", "self", ".", "taa_cache" ]
[ 722, 4 ]
[ 726, 29 ]
python
en
['en', 'en', 'en']
True
IndyLedger.fetch_txn_author_agreement
(self)
Fetch the current AML and TAA from the ledger.
Fetch the current AML and TAA from the ledger.
async def fetch_txn_author_agreement(self): """Fetch the current AML and TAA from the ledger.""" public_info = await self.wallet.get_public_did() public_did = public_info.did if public_info else None get_aml_req = await indy.ledger.build_get_acceptance_mechanisms_request( public_did, None, None ) response_json = await self._submit(get_aml_req, public_did=public_did) aml_found = (json.loads(response_json))["result"]["data"] get_taa_req = await indy.ledger.build_get_txn_author_agreement_request( public_did, None ) response_json = await self._submit(get_taa_req, public_did=public_did) taa_found = (json.loads(response_json))["result"]["data"] taa_required = taa_found and taa_found["text"] if taa_found: taa_plaintext = taa_found["version"] + taa_found["text"] taa_found["digest"] = sha256(taa_plaintext.encode("utf-8")).digest().hex() return { "aml_record": aml_found, "taa_record": taa_found, "taa_required": taa_required, }
[ "async", "def", "fetch_txn_author_agreement", "(", "self", ")", ":", "public_info", "=", "await", "self", ".", "wallet", ".", "get_public_did", "(", ")", "public_did", "=", "public_info", ".", "did", "if", "public_info", "else", "None", "get_aml_req", "=", "await", "indy", ".", "ledger", ".", "build_get_acceptance_mechanisms_request", "(", "public_did", ",", "None", ",", "None", ")", "response_json", "=", "await", "self", ".", "_submit", "(", "get_aml_req", ",", "public_did", "=", "public_did", ")", "aml_found", "=", "(", "json", ".", "loads", "(", "response_json", ")", ")", "[", "\"result\"", "]", "[", "\"data\"", "]", "get_taa_req", "=", "await", "indy", ".", "ledger", ".", "build_get_txn_author_agreement_request", "(", "public_did", ",", "None", ")", "response_json", "=", "await", "self", ".", "_submit", "(", "get_taa_req", ",", "public_did", "=", "public_did", ")", "taa_found", "=", "(", "json", ".", "loads", "(", "response_json", ")", ")", "[", "\"result\"", "]", "[", "\"data\"", "]", "taa_required", "=", "taa_found", "and", "taa_found", "[", "\"text\"", "]", "if", "taa_found", ":", "taa_plaintext", "=", "taa_found", "[", "\"version\"", "]", "+", "taa_found", "[", "\"text\"", "]", "taa_found", "[", "\"digest\"", "]", "=", "sha256", "(", "taa_plaintext", ".", "encode", "(", "\"utf-8\"", ")", ")", ".", "digest", "(", ")", ".", "hex", "(", ")", "return", "{", "\"aml_record\"", ":", "aml_found", ",", "\"taa_record\"", ":", "taa_found", ",", "\"taa_required\"", ":", "taa_required", ",", "}" ]
[ 728, 4 ]
[ 753, 9 ]
python
en
['en', 'en', 'en']
True
IndyLedger.get_indy_storage
(self)
Get an IndyStorage instance for the current wallet.
Get an IndyStorage instance for the current wallet.
def get_indy_storage(self) -> IndyStorage: """Get an IndyStorage instance for the current wallet.""" return IndyStorage(self.wallet)
[ "def", "get_indy_storage", "(", "self", ")", "->", "IndyStorage", ":", "return", "IndyStorage", "(", "self", ".", "wallet", ")" ]
[ 755, 4 ]
[ 757, 39 ]
python
en
['en', 'en', 'en']
True
IndyLedger.taa_rough_timestamp
(self)
Get a timestamp accurate to the day. Anything more accurate is a privacy concern.
Get a timestamp accurate to the day.
def taa_rough_timestamp(self) -> int: """Get a timestamp accurate to the day. Anything more accurate is a privacy concern. """ return int(datetime.combine(date.today(), datetime.min.time()).timestamp())
[ "def", "taa_rough_timestamp", "(", "self", ")", "->", "int", ":", "return", "int", "(", "datetime", ".", "combine", "(", "date", ".", "today", "(", ")", ",", "datetime", ".", "min", ".", "time", "(", ")", ")", ".", "timestamp", "(", ")", ")" ]
[ 759, 4 ]
[ 764, 83 ]
python
en
['en', 'en', 'en']
True
IndyLedger.accept_txn_author_agreement
( self, taa_record: dict, mechanism: str, accept_time: int = None, store: bool = False, )
Save a new record recording the acceptance of the TAA.
Save a new record recording the acceptance of the TAA.
async def accept_txn_author_agreement( self, taa_record: dict, mechanism: str, accept_time: int = None, store: bool = False, ): """Save a new record recording the acceptance of the TAA.""" if not accept_time: accept_time = self.taa_rough_timestamp() acceptance = { "text": taa_record["text"], "version": taa_record["version"], "digest": taa_record["digest"], "mechanism": mechanism, "time": accept_time, } record = StorageRecord( TAA_ACCEPTED_RECORD_TYPE, json.dumps(acceptance), {"pool_name": self.pool_name}, ) storage = self.get_indy_storage() await storage.add_record(record) cache_key = TAA_ACCEPTED_RECORD_TYPE + "::" + self.pool_name await self.cache.set(cache_key, acceptance, self.cache_duration)
[ "async", "def", "accept_txn_author_agreement", "(", "self", ",", "taa_record", ":", "dict", ",", "mechanism", ":", "str", ",", "accept_time", ":", "int", "=", "None", ",", "store", ":", "bool", "=", "False", ",", ")", ":", "if", "not", "accept_time", ":", "accept_time", "=", "self", ".", "taa_rough_timestamp", "(", ")", "acceptance", "=", "{", "\"text\"", ":", "taa_record", "[", "\"text\"", "]", ",", "\"version\"", ":", "taa_record", "[", "\"version\"", "]", ",", "\"digest\"", ":", "taa_record", "[", "\"digest\"", "]", ",", "\"mechanism\"", ":", "mechanism", ",", "\"time\"", ":", "accept_time", ",", "}", "record", "=", "StorageRecord", "(", "TAA_ACCEPTED_RECORD_TYPE", ",", "json", ".", "dumps", "(", "acceptance", ")", ",", "{", "\"pool_name\"", ":", "self", ".", "pool_name", "}", ",", ")", "storage", "=", "self", ".", "get_indy_storage", "(", ")", "await", "storage", ".", "add_record", "(", "record", ")", "cache_key", "=", "TAA_ACCEPTED_RECORD_TYPE", "+", "\"::\"", "+", "self", ".", "pool_name", "await", "self", ".", "cache", ".", "set", "(", "cache_key", ",", "acceptance", ",", "self", ".", "cache_duration", ")" ]
[ 766, 4 ]
[ 791, 72 ]
python
en
['en', 'en', 'en']
True
IndyLedger.get_latest_txn_author_acceptance
(self)
Look up the latest TAA acceptance.
Look up the latest TAA acceptance.
async def get_latest_txn_author_acceptance(self): """Look up the latest TAA acceptance.""" cache_key = TAA_ACCEPTED_RECORD_TYPE + "::" + self.pool_name acceptance = await self.cache.get(cache_key) if acceptance is None: storage = self.get_indy_storage() tag_filter = {"pool_name": self.pool_name} found = await storage.search_records( TAA_ACCEPTED_RECORD_TYPE, tag_filter ).fetch_all() if found: records = list(json.loads(record.value) for record in found) records.sort(key=lambda v: v["time"], reverse=True) acceptance = records[0] else: acceptance = {} await self.cache.set(cache_key, acceptance, self.cache_duration) return acceptance
[ "async", "def", "get_latest_txn_author_acceptance", "(", "self", ")", ":", "cache_key", "=", "TAA_ACCEPTED_RECORD_TYPE", "+", "\"::\"", "+", "self", ".", "pool_name", "acceptance", "=", "await", "self", ".", "cache", ".", "get", "(", "cache_key", ")", "if", "acceptance", "is", "None", ":", "storage", "=", "self", ".", "get_indy_storage", "(", ")", "tag_filter", "=", "{", "\"pool_name\"", ":", "self", ".", "pool_name", "}", "found", "=", "await", "storage", ".", "search_records", "(", "TAA_ACCEPTED_RECORD_TYPE", ",", "tag_filter", ")", ".", "fetch_all", "(", ")", "if", "found", ":", "records", "=", "list", "(", "json", ".", "loads", "(", "record", ".", "value", ")", "for", "record", "in", "found", ")", "records", ".", "sort", "(", "key", "=", "lambda", "v", ":", "v", "[", "\"time\"", "]", ",", "reverse", "=", "True", ")", "acceptance", "=", "records", "[", "0", "]", "else", ":", "acceptance", "=", "{", "}", "await", "self", ".", "cache", ".", "set", "(", "cache_key", ",", "acceptance", ",", "self", ".", "cache_duration", ")", "return", "acceptance" ]
[ 793, 4 ]
[ 810, 25 ]
python
en
['en', 'en', 'en']
True
Marker.color
(self)
Sets the marker color of all increasing values. The 'color' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: aliceblue, antiquewhite, aqua, aquamarine, azure, beige, bisque, black, blanchedalmond, blue, blueviolet, brown, burlywood, cadetblue, chartreuse, chocolate, coral, cornflowerblue, cornsilk, crimson, cyan, darkblue, darkcyan, darkgoldenrod, darkgray, darkgrey, darkgreen, darkkhaki, darkmagenta, darkolivegreen, darkorange, darkorchid, darkred, darksalmon, darkseagreen, darkslateblue, darkslategray, darkslategrey, darkturquoise, darkviolet, deeppink, deepskyblue, dimgray, dimgrey, dodgerblue, firebrick, floralwhite, forestgreen, fuchsia, gainsboro, ghostwhite, gold, goldenrod, gray, grey, green, greenyellow, honeydew, hotpink, indianred, indigo, ivory, khaki, lavender, lavenderblush, lawngreen, lemonchiffon, lightblue, lightcoral, lightcyan, lightgoldenrodyellow, lightgray, lightgrey, lightgreen, lightpink, lightsalmon, lightseagreen, lightskyblue, lightslategray, lightslategrey, lightsteelblue, lightyellow, lime, limegreen, linen, magenta, maroon, mediumaquamarine, mediumblue, mediumorchid, mediumpurple, mediumseagreen, mediumslateblue, mediumspringgreen, mediumturquoise, mediumvioletred, midnightblue, mintcream, mistyrose, moccasin, navajowhite, navy, oldlace, olive, olivedrab, orange, orangered, orchid, palegoldenrod, palegreen, paleturquoise, palevioletred, papayawhip, peachpuff, peru, pink, plum, powderblue, purple, red, rosybrown, royalblue, rebeccapurple, saddlebrown, salmon, sandybrown, seagreen, seashell, sienna, silver, skyblue, slateblue, slategray, slategrey, snow, springgreen, steelblue, tan, teal, thistle, tomato, turquoise, violet, wheat, white, whitesmoke, yellow, yellowgreen Returns ------- str
Sets the marker color of all increasing values. The 'color' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: aliceblue, antiquewhite, aqua, aquamarine, azure, beige, bisque, black, blanchedalmond, blue, blueviolet, brown, burlywood, cadetblue, chartreuse, chocolate, coral, cornflowerblue, cornsilk, crimson, cyan, darkblue, darkcyan, darkgoldenrod, darkgray, darkgrey, darkgreen, darkkhaki, darkmagenta, darkolivegreen, darkorange, darkorchid, darkred, darksalmon, darkseagreen, darkslateblue, darkslategray, darkslategrey, darkturquoise, darkviolet, deeppink, deepskyblue, dimgray, dimgrey, dodgerblue, firebrick, floralwhite, forestgreen, fuchsia, gainsboro, ghostwhite, gold, goldenrod, gray, grey, green, greenyellow, honeydew, hotpink, indianred, indigo, ivory, khaki, lavender, lavenderblush, lawngreen, lemonchiffon, lightblue, lightcoral, lightcyan, lightgoldenrodyellow, lightgray, lightgrey, lightgreen, lightpink, lightsalmon, lightseagreen, lightskyblue, lightslategray, lightslategrey, lightsteelblue, lightyellow, lime, limegreen, linen, magenta, maroon, mediumaquamarine, mediumblue, mediumorchid, mediumpurple, mediumseagreen, mediumslateblue, mediumspringgreen, mediumturquoise, mediumvioletred, midnightblue, mintcream, mistyrose, moccasin, navajowhite, navy, oldlace, olive, olivedrab, orange, orangered, orchid, palegoldenrod, palegreen, paleturquoise, palevioletred, papayawhip, peachpuff, peru, pink, plum, powderblue, purple, red, rosybrown, royalblue, rebeccapurple, saddlebrown, salmon, sandybrown, seagreen, seashell, sienna, silver, skyblue, slateblue, slategray, slategrey, snow, springgreen, steelblue, tan, teal, thistle, tomato, turquoise, violet, wheat, white, whitesmoke, yellow, yellowgreen
def color(self): """ Sets the marker color of all increasing values. The 'color' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: aliceblue, antiquewhite, aqua, aquamarine, azure, beige, bisque, black, blanchedalmond, blue, blueviolet, brown, burlywood, cadetblue, chartreuse, chocolate, coral, cornflowerblue, cornsilk, crimson, cyan, darkblue, darkcyan, darkgoldenrod, darkgray, darkgrey, darkgreen, darkkhaki, darkmagenta, darkolivegreen, darkorange, darkorchid, darkred, darksalmon, darkseagreen, darkslateblue, darkslategray, darkslategrey, darkturquoise, darkviolet, deeppink, deepskyblue, dimgray, dimgrey, dodgerblue, firebrick, floralwhite, forestgreen, fuchsia, gainsboro, ghostwhite, gold, goldenrod, gray, grey, green, greenyellow, honeydew, hotpink, indianred, indigo, ivory, khaki, lavender, lavenderblush, lawngreen, lemonchiffon, lightblue, lightcoral, lightcyan, lightgoldenrodyellow, lightgray, lightgrey, lightgreen, lightpink, lightsalmon, lightseagreen, lightskyblue, lightslategray, lightslategrey, lightsteelblue, lightyellow, lime, limegreen, linen, magenta, maroon, mediumaquamarine, mediumblue, mediumorchid, mediumpurple, mediumseagreen, mediumslateblue, mediumspringgreen, mediumturquoise, mediumvioletred, midnightblue, mintcream, mistyrose, moccasin, navajowhite, navy, oldlace, olive, olivedrab, orange, orangered, orchid, palegoldenrod, palegreen, paleturquoise, palevioletred, papayawhip, peachpuff, peru, pink, plum, powderblue, purple, red, rosybrown, royalblue, rebeccapurple, saddlebrown, salmon, sandybrown, seagreen, seashell, sienna, silver, skyblue, slateblue, slategray, slategrey, snow, springgreen, steelblue, tan, teal, thistle, tomato, turquoise, violet, wheat, white, whitesmoke, yellow, yellowgreen Returns ------- str """ return self["color"]
[ "def", "color", "(", "self", ")", ":", "return", "self", "[", "\"color\"", "]" ]
[ 15, 4 ]
[ 65, 28 ]
python
en
['en', 'error', 'th']
False
Marker.line
(self)
The 'line' property is an instance of Line that may be specified as: - An instance of :class:`plotly.graph_objs.waterfall.increasing.marker.Line` - A dict of string/value properties that will be passed to the Line constructor Supported dict properties: color Sets the line color of all increasing values. width Sets the line width of all increasing values. Returns ------- plotly.graph_objs.waterfall.increasing.marker.Line
The 'line' property is an instance of Line that may be specified as: - An instance of :class:`plotly.graph_objs.waterfall.increasing.marker.Line` - A dict of string/value properties that will be passed to the Line constructor Supported dict properties: color Sets the line color of all increasing values. width Sets the line width of all increasing values.
def line(self): """ The 'line' property is an instance of Line that may be specified as: - An instance of :class:`plotly.graph_objs.waterfall.increasing.marker.Line` - A dict of string/value properties that will be passed to the Line constructor Supported dict properties: color Sets the line color of all increasing values. width Sets the line width of all increasing values. Returns ------- plotly.graph_objs.waterfall.increasing.marker.Line """ return self["line"]
[ "def", "line", "(", "self", ")", ":", "return", "self", "[", "\"line\"", "]" ]
[ 74, 4 ]
[ 93, 27 ]
python
en
['en', 'error', 'th']
False
Marker.__init__
(self, arg=None, color=None, line=None, **kwargs)
Construct a new Marker object Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.waterfall.increasing.Marker` color Sets the marker color of all increasing values. line :class:`plotly.graph_objects.waterfall.increasing.marke r.Line` instance or dict with compatible properties Returns ------- Marker
Construct a new Marker object Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.waterfall.increasing.Marker` color Sets the marker color of all increasing values. line :class:`plotly.graph_objects.waterfall.increasing.marke r.Line` instance or dict with compatible properties
def __init__(self, arg=None, color=None, line=None, **kwargs): """ Construct a new Marker object Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.waterfall.increasing.Marker` color Sets the marker color of all increasing values. line :class:`plotly.graph_objects.waterfall.increasing.marke r.Line` instance or dict with compatible properties Returns ------- Marker """ super(Marker, self).__init__("marker") if "_parent" in kwargs: self._parent = kwargs["_parent"] return # Validate arg # ------------ if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError( """\ The first argument to the plotly.graph_objs.waterfall.increasing.Marker constructor must be a dict or an instance of :class:`plotly.graph_objs.waterfall.increasing.Marker`""" ) # Handle skip_invalid # ------------------- self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) # Populate data dict with properties # ---------------------------------- _v = arg.pop("color", None) _v = color if color is not None else _v if _v is not None: self["color"] = _v _v = arg.pop("line", None) _v = line if line is not None else _v if _v is not None: self["line"] = _v # Process unknown kwargs # ---------------------- self._process_kwargs(**dict(arg, **kwargs)) # Reset skip_invalid # ------------------ self._skip_invalid = False
[ "def", "__init__", "(", "self", ",", "arg", "=", "None", ",", "color", "=", "None", ",", "line", "=", "None", ",", "*", "*", "kwargs", ")", ":", "super", "(", "Marker", ",", "self", ")", ".", "__init__", "(", "\"marker\"", ")", "if", "\"_parent\"", "in", "kwargs", ":", "self", ".", "_parent", "=", "kwargs", "[", "\"_parent\"", "]", "return", "# Validate arg", "# ------------", "if", "arg", "is", "None", ":", "arg", "=", "{", "}", "elif", "isinstance", "(", "arg", ",", "self", ".", "__class__", ")", ":", "arg", "=", "arg", ".", "to_plotly_json", "(", ")", "elif", "isinstance", "(", "arg", ",", "dict", ")", ":", "arg", "=", "_copy", ".", "copy", "(", "arg", ")", "else", ":", "raise", "ValueError", "(", "\"\"\"\\\nThe first argument to the plotly.graph_objs.waterfall.increasing.Marker \nconstructor must be a dict or \nan instance of :class:`plotly.graph_objs.waterfall.increasing.Marker`\"\"\"", ")", "# Handle skip_invalid", "# -------------------", "self", ".", "_skip_invalid", "=", "kwargs", ".", "pop", "(", "\"skip_invalid\"", ",", "False", ")", "self", ".", "_validate", "=", "kwargs", ".", "pop", "(", "\"_validate\"", ",", "True", ")", "# Populate data dict with properties", "# ----------------------------------", "_v", "=", "arg", ".", "pop", "(", "\"color\"", ",", "None", ")", "_v", "=", "color", "if", "color", "is", "not", "None", "else", "_v", "if", "_v", "is", "not", "None", ":", "self", "[", "\"color\"", "]", "=", "_v", "_v", "=", "arg", ".", "pop", "(", "\"line\"", ",", "None", ")", "_v", "=", "line", "if", "line", "is", "not", "None", "else", "_v", "if", "_v", "is", "not", "None", ":", "self", "[", "\"line\"", "]", "=", "_v", "# Process unknown kwargs", "# ----------------------", "self", ".", "_process_kwargs", "(", "*", "*", "dict", "(", "arg", ",", "*", "*", "kwargs", ")", ")", "# Reset skip_invalid", "# ------------------", "self", ".", "_skip_invalid", "=", "False" ]
[ 111, 4 ]
[ 175, 34 ]
python
en
['en', 'error', 'th']
False
Font.color
(self)
The 'color' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: aliceblue, antiquewhite, aqua, aquamarine, azure, beige, bisque, black, blanchedalmond, blue, blueviolet, brown, burlywood, cadetblue, chartreuse, chocolate, coral, cornflowerblue, cornsilk, crimson, cyan, darkblue, darkcyan, darkgoldenrod, darkgray, darkgrey, darkgreen, darkkhaki, darkmagenta, darkolivegreen, darkorange, darkorchid, darkred, darksalmon, darkseagreen, darkslateblue, darkslategray, darkslategrey, darkturquoise, darkviolet, deeppink, deepskyblue, dimgray, dimgrey, dodgerblue, firebrick, floralwhite, forestgreen, fuchsia, gainsboro, ghostwhite, gold, goldenrod, gray, grey, green, greenyellow, honeydew, hotpink, indianred, indigo, ivory, khaki, lavender, lavenderblush, lawngreen, lemonchiffon, lightblue, lightcoral, lightcyan, lightgoldenrodyellow, lightgray, lightgrey, lightgreen, lightpink, lightsalmon, lightseagreen, lightskyblue, lightslategray, lightslategrey, lightsteelblue, lightyellow, lime, limegreen, linen, magenta, maroon, mediumaquamarine, mediumblue, mediumorchid, mediumpurple, mediumseagreen, mediumslateblue, mediumspringgreen, mediumturquoise, mediumvioletred, midnightblue, mintcream, mistyrose, moccasin, navajowhite, navy, oldlace, olive, olivedrab, orange, orangered, orchid, palegoldenrod, palegreen, paleturquoise, palevioletred, papayawhip, peachpuff, peru, pink, plum, powderblue, purple, red, rosybrown, royalblue, rebeccapurple, saddlebrown, salmon, sandybrown, seagreen, seashell, sienna, silver, skyblue, slateblue, slategray, slategrey, snow, springgreen, steelblue, tan, teal, thistle, tomato, turquoise, violet, wheat, white, whitesmoke, yellow, yellowgreen - A list or array of any of the above Returns ------- str|numpy.ndarray
The 'color' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: aliceblue, antiquewhite, aqua, aquamarine, azure, beige, bisque, black, blanchedalmond, blue, blueviolet, brown, burlywood, cadetblue, chartreuse, chocolate, coral, cornflowerblue, cornsilk, crimson, cyan, darkblue, darkcyan, darkgoldenrod, darkgray, darkgrey, darkgreen, darkkhaki, darkmagenta, darkolivegreen, darkorange, darkorchid, darkred, darksalmon, darkseagreen, darkslateblue, darkslategray, darkslategrey, darkturquoise, darkviolet, deeppink, deepskyblue, dimgray, dimgrey, dodgerblue, firebrick, floralwhite, forestgreen, fuchsia, gainsboro, ghostwhite, gold, goldenrod, gray, grey, green, greenyellow, honeydew, hotpink, indianred, indigo, ivory, khaki, lavender, lavenderblush, lawngreen, lemonchiffon, lightblue, lightcoral, lightcyan, lightgoldenrodyellow, lightgray, lightgrey, lightgreen, lightpink, lightsalmon, lightseagreen, lightskyblue, lightslategray, lightslategrey, lightsteelblue, lightyellow, lime, limegreen, linen, magenta, maroon, mediumaquamarine, mediumblue, mediumorchid, mediumpurple, mediumseagreen, mediumslateblue, mediumspringgreen, mediumturquoise, mediumvioletred, midnightblue, mintcream, mistyrose, moccasin, navajowhite, navy, oldlace, olive, olivedrab, orange, orangered, orchid, palegoldenrod, palegreen, paleturquoise, palevioletred, papayawhip, peachpuff, peru, pink, plum, powderblue, purple, red, rosybrown, royalblue, rebeccapurple, saddlebrown, salmon, sandybrown, seagreen, seashell, sienna, silver, skyblue, slateblue, slategray, slategrey, snow, springgreen, steelblue, tan, teal, thistle, tomato, turquoise, violet, wheat, white, whitesmoke, yellow, yellowgreen - A list or array of any of the above
def color(self): """ The 'color' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: aliceblue, antiquewhite, aqua, aquamarine, azure, beige, bisque, black, blanchedalmond, blue, blueviolet, brown, burlywood, cadetblue, chartreuse, chocolate, coral, cornflowerblue, cornsilk, crimson, cyan, darkblue, darkcyan, darkgoldenrod, darkgray, darkgrey, darkgreen, darkkhaki, darkmagenta, darkolivegreen, darkorange, darkorchid, darkred, darksalmon, darkseagreen, darkslateblue, darkslategray, darkslategrey, darkturquoise, darkviolet, deeppink, deepskyblue, dimgray, dimgrey, dodgerblue, firebrick, floralwhite, forestgreen, fuchsia, gainsboro, ghostwhite, gold, goldenrod, gray, grey, green, greenyellow, honeydew, hotpink, indianred, indigo, ivory, khaki, lavender, lavenderblush, lawngreen, lemonchiffon, lightblue, lightcoral, lightcyan, lightgoldenrodyellow, lightgray, lightgrey, lightgreen, lightpink, lightsalmon, lightseagreen, lightskyblue, lightslategray, lightslategrey, lightsteelblue, lightyellow, lime, limegreen, linen, magenta, maroon, mediumaquamarine, mediumblue, mediumorchid, mediumpurple, mediumseagreen, mediumslateblue, mediumspringgreen, mediumturquoise, mediumvioletred, midnightblue, mintcream, mistyrose, moccasin, navajowhite, navy, oldlace, olive, olivedrab, orange, orangered, orchid, palegoldenrod, palegreen, paleturquoise, palevioletred, papayawhip, peachpuff, peru, pink, plum, powderblue, purple, red, rosybrown, royalblue, rebeccapurple, saddlebrown, salmon, sandybrown, seagreen, seashell, sienna, silver, skyblue, slateblue, slategray, slategrey, snow, springgreen, steelblue, tan, teal, thistle, tomato, turquoise, violet, wheat, white, whitesmoke, yellow, yellowgreen - A list or array of any of the above Returns ------- str|numpy.ndarray """ return self["color"]
[ "def", "color", "(", "self", ")", ":", "return", "self", "[", "\"color\"", "]" ]
[ 15, 4 ]
[ 64, 28 ]
python
en
['en', 'error', 'th']
False
Font.colorsrc
(self)
Sets the source reference on Chart Studio Cloud for color . The 'colorsrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str
Sets the source reference on Chart Studio Cloud for color . The 'colorsrc' property must be specified as a string or as a plotly.grid_objs.Column object
def colorsrc(self): """ Sets the source reference on Chart Studio Cloud for color . The 'colorsrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["colorsrc"]
[ "def", "colorsrc", "(", "self", ")", ":", "return", "self", "[", "\"colorsrc\"", "]" ]
[ 73, 4 ]
[ 84, 31 ]
python
en
['en', 'error', 'th']
False
Font.family
(self)
HTML font family - the typeface that will be applied by the web browser. The web browser will only be able to apply a font if it is available on the system which it operates. Provide multiple font families, separated by commas, to indicate the preference in which to apply fonts if they aren't available on the system. The Chart Studio Cloud (at https://chart- studio.plotly.com or on-premise) generates images on a server, where only a select number of fonts are installed and supported. These include "Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New Roman". The 'family' property is a string and must be specified as: - A non-empty string - A tuple, list, or one-dimensional numpy array of the above Returns ------- str|numpy.ndarray
HTML font family - the typeface that will be applied by the web browser. The web browser will only be able to apply a font if it is available on the system which it operates. Provide multiple font families, separated by commas, to indicate the preference in which to apply fonts if they aren't available on the system. The Chart Studio Cloud (at https://chart- studio.plotly.com or on-premise) generates images on a server, where only a select number of fonts are installed and supported. These include "Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New Roman". The 'family' property is a string and must be specified as: - A non-empty string - A tuple, list, or one-dimensional numpy array of the above
def family(self): """ HTML font family - the typeface that will be applied by the web browser. The web browser will only be able to apply a font if it is available on the system which it operates. Provide multiple font families, separated by commas, to indicate the preference in which to apply fonts if they aren't available on the system. The Chart Studio Cloud (at https://chart- studio.plotly.com or on-premise) generates images on a server, where only a select number of fonts are installed and supported. These include "Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New Roman". The 'family' property is a string and must be specified as: - A non-empty string - A tuple, list, or one-dimensional numpy array of the above Returns ------- str|numpy.ndarray """ return self["family"]
[ "def", "family", "(", "self", ")", ":", "return", "self", "[", "\"family\"", "]" ]
[ 93, 4 ]
[ 116, 29 ]
python
en
['en', 'error', 'th']
False
Font.familysrc
(self)
Sets the source reference on Chart Studio Cloud for family . The 'familysrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str
Sets the source reference on Chart Studio Cloud for family . The 'familysrc' property must be specified as a string or as a plotly.grid_objs.Column object
def familysrc(self): """ Sets the source reference on Chart Studio Cloud for family . The 'familysrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["familysrc"]
[ "def", "familysrc", "(", "self", ")", ":", "return", "self", "[", "\"familysrc\"", "]" ]
[ 125, 4 ]
[ 136, 32 ]
python
en
['en', 'error', 'th']
False
Font.size
(self)
The 'size' property is a number and may be specified as: - An int or float in the interval [1, inf] - A tuple, list, or one-dimensional numpy array of the above Returns ------- int|float|numpy.ndarray
The 'size' property is a number and may be specified as: - An int or float in the interval [1, inf] - A tuple, list, or one-dimensional numpy array of the above
def size(self): """ The 'size' property is a number and may be specified as: - An int or float in the interval [1, inf] - A tuple, list, or one-dimensional numpy array of the above Returns ------- int|float|numpy.ndarray """ return self["size"]
[ "def", "size", "(", "self", ")", ":", "return", "self", "[", "\"size\"", "]" ]
[ 145, 4 ]
[ 155, 27 ]
python
en
['en', 'error', 'th']
False
Font.sizesrc
(self)
Sets the source reference on Chart Studio Cloud for size . The 'sizesrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str
Sets the source reference on Chart Studio Cloud for size . The 'sizesrc' property must be specified as a string or as a plotly.grid_objs.Column object
def sizesrc(self): """ Sets the source reference on Chart Studio Cloud for size . The 'sizesrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["sizesrc"]
[ "def", "sizesrc", "(", "self", ")", ":", "return", "self", "[", "\"sizesrc\"", "]" ]
[ 164, 4 ]
[ 175, 30 ]
python
en
['en', 'error', 'th']
False
Font.__init__
( self, arg=None, color=None, colorsrc=None, family=None, familysrc=None, size=None, sizesrc=None, **kwargs )
Construct a new Font object Sets the font used in hover labels. Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.scatterpolargl .hoverlabel.Font` color colorsrc Sets the source reference on Chart Studio Cloud for color . family HTML font family - the typeface that will be applied by the web browser. The web browser will only be able to apply a font if it is available on the system which it operates. Provide multiple font families, separated by commas, to indicate the preference in which to apply fonts if they aren't available on the system. The Chart Studio Cloud (at https://chart-studio.plotly.com or on- premise) generates images on a server, where only a select number of fonts are installed and supported. These include "Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New Roman". familysrc Sets the source reference on Chart Studio Cloud for family . size sizesrc Sets the source reference on Chart Studio Cloud for size . Returns ------- Font
Construct a new Font object Sets the font used in hover labels.
def __init__( self, arg=None, color=None, colorsrc=None, family=None, familysrc=None, size=None, sizesrc=None, **kwargs ): """ Construct a new Font object Sets the font used in hover labels. Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.scatterpolargl .hoverlabel.Font` color colorsrc Sets the source reference on Chart Studio Cloud for color . family HTML font family - the typeface that will be applied by the web browser. The web browser will only be able to apply a font if it is available on the system which it operates. Provide multiple font families, separated by commas, to indicate the preference in which to apply fonts if they aren't available on the system. The Chart Studio Cloud (at https://chart-studio.plotly.com or on- premise) generates images on a server, where only a select number of fonts are installed and supported. These include "Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New Roman". familysrc Sets the source reference on Chart Studio Cloud for family . size sizesrc Sets the source reference on Chart Studio Cloud for size . Returns ------- Font """ super(Font, self).__init__("font") if "_parent" in kwargs: self._parent = kwargs["_parent"] return # Validate arg # ------------ if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError( """\ The first argument to the plotly.graph_objs.scatterpolargl.hoverlabel.Font constructor must be a dict or an instance of :class:`plotly.graph_objs.scatterpolargl.hoverlabel.Font`""" ) # Handle skip_invalid # ------------------- self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) # Populate data dict with properties # ---------------------------------- _v = arg.pop("color", None) _v = color if color is not None else _v if _v is not None: self["color"] = _v _v = arg.pop("colorsrc", None) _v = colorsrc if colorsrc is not None else _v if _v is not None: self["colorsrc"] = _v _v = arg.pop("family", None) _v = family if family is not None else _v if _v is not None: self["family"] = _v _v = arg.pop("familysrc", None) _v = familysrc if familysrc is not None else _v if _v is not None: self["familysrc"] = _v _v = arg.pop("size", None) _v = size if size is not None else _v if _v is not None: self["size"] = _v _v = arg.pop("sizesrc", None) _v = sizesrc if sizesrc is not None else _v if _v is not None: self["sizesrc"] = _v # Process unknown kwargs # ---------------------- self._process_kwargs(**dict(arg, **kwargs)) # Reset skip_invalid # ------------------ self._skip_invalid = False
[ "def", "__init__", "(", "self", ",", "arg", "=", "None", ",", "color", "=", "None", ",", "colorsrc", "=", "None", ",", "family", "=", "None", ",", "familysrc", "=", "None", ",", "size", "=", "None", ",", "sizesrc", "=", "None", ",", "*", "*", "kwargs", ")", ":", "super", "(", "Font", ",", "self", ")", ".", "__init__", "(", "\"font\"", ")", "if", "\"_parent\"", "in", "kwargs", ":", "self", ".", "_parent", "=", "kwargs", "[", "\"_parent\"", "]", "return", "# Validate arg", "# ------------", "if", "arg", "is", "None", ":", "arg", "=", "{", "}", "elif", "isinstance", "(", "arg", ",", "self", ".", "__class__", ")", ":", "arg", "=", "arg", ".", "to_plotly_json", "(", ")", "elif", "isinstance", "(", "arg", ",", "dict", ")", ":", "arg", "=", "_copy", ".", "copy", "(", "arg", ")", "else", ":", "raise", "ValueError", "(", "\"\"\"\\\nThe first argument to the plotly.graph_objs.scatterpolargl.hoverlabel.Font \nconstructor must be a dict or \nan instance of :class:`plotly.graph_objs.scatterpolargl.hoverlabel.Font`\"\"\"", ")", "# Handle skip_invalid", "# -------------------", "self", ".", "_skip_invalid", "=", "kwargs", ".", "pop", "(", "\"skip_invalid\"", ",", "False", ")", "self", ".", "_validate", "=", "kwargs", ".", "pop", "(", "\"_validate\"", ",", "True", ")", "# Populate data dict with properties", "# ----------------------------------", "_v", "=", "arg", ".", "pop", "(", "\"color\"", ",", "None", ")", "_v", "=", "color", "if", "color", "is", "not", "None", "else", "_v", "if", "_v", "is", "not", "None", ":", "self", "[", "\"color\"", "]", "=", "_v", "_v", "=", "arg", ".", "pop", "(", "\"colorsrc\"", ",", "None", ")", "_v", "=", "colorsrc", "if", "colorsrc", "is", "not", "None", "else", "_v", "if", "_v", "is", "not", "None", ":", "self", "[", "\"colorsrc\"", "]", "=", "_v", "_v", "=", "arg", ".", "pop", "(", "\"family\"", ",", "None", ")", "_v", "=", "family", "if", "family", "is", "not", "None", "else", "_v", "if", "_v", "is", "not", "None", ":", "self", "[", "\"family\"", "]", "=", "_v", "_v", "=", "arg", ".", "pop", "(", "\"familysrc\"", ",", "None", ")", "_v", "=", "familysrc", "if", "familysrc", "is", "not", "None", "else", "_v", "if", "_v", "is", "not", "None", ":", "self", "[", "\"familysrc\"", "]", "=", "_v", "_v", "=", "arg", ".", "pop", "(", "\"size\"", ",", "None", ")", "_v", "=", "size", "if", "size", "is", "not", "None", "else", "_v", "if", "_v", "is", "not", "None", ":", "self", "[", "\"size\"", "]", "=", "_v", "_v", "=", "arg", ".", "pop", "(", "\"sizesrc\"", ",", "None", ")", "_v", "=", "sizesrc", "if", "sizesrc", "is", "not", "None", "else", "_v", "if", "_v", "is", "not", "None", ":", "self", "[", "\"sizesrc\"", "]", "=", "_v", "# Process unknown kwargs", "# ----------------------", "self", ".", "_process_kwargs", "(", "*", "*", "dict", "(", "arg", ",", "*", "*", "kwargs", ")", ")", "# Reset skip_invalid", "# ------------------", "self", ".", "_skip_invalid", "=", "False" ]
[ 215, 4 ]
[ 329, 34 ]
python
en
['en', 'error', 'th']
False
ChatServiceAgent.data
(self)
ChatServiceAgent data property.
ChatServiceAgent data property.
def data(self): """ ChatServiceAgent data property. """ return self._data
[ "def", "data", "(", "self", ")", ":", "return", "self", ".", "_data" ]
[ 33, 4 ]
[ 37, 25 ]
python
en
['en', 'error', 'th']
False
ChatServiceAgent.data
(self, value)
Setter for ChatServiceAgent.data. The data within a ChatServiceAgent is persistent, in the sense that keys _cannot_ be removed from the data. This is important to ensure persistence of agent state across various parts of the ChatService pipeline. To ensure this property, we call `agent._data.update(value)` when explicitly setting the `data` property of an agent. This protects against cases where, e.g., the `__init__` function sets a property for the agent, and then later someone manually sets `agent.data = new_data`.
Setter for ChatServiceAgent.data.
def data(self, value): """ Setter for ChatServiceAgent.data. The data within a ChatServiceAgent is persistent, in the sense that keys _cannot_ be removed from the data. This is important to ensure persistence of agent state across various parts of the ChatService pipeline. To ensure this property, we call `agent._data.update(value)` when explicitly setting the `data` property of an agent. This protects against cases where, e.g., the `__init__` function sets a property for the agent, and then later someone manually sets `agent.data = new_data`. """ self._data.update(value)
[ "def", "data", "(", "self", ",", "value", ")", ":", "self", ".", "_data", ".", "update", "(", "value", ")" ]
[ 40, 4 ]
[ 53, 32 ]
python
en
['en', 'error', 'th']
False
ChatServiceAgent.observe
(self, act)
Send an agent a message through the manager.
Send an agent a message through the manager.
def observe(self, act): """ Send an agent a message through the manager. """ pass
[ "def", "observe", "(", "self", ",", "act", ")", ":", "pass" ]
[ 56, 4 ]
[ 60, 12 ]
python
en
['en', 'error', 'th']
False
ChatServiceAgent._send_payload
(self, receiver_id, data, quick_replies=None, persona_id=None)
Send a payload through the message manager. :param receiver_id: int identifier for agent to send message to :param data: object data to send :param quick_replies: list of quick replies :param persona_id: identifier of persona :return: a dictionary of a json response from the manager observing a payload
Send a payload through the message manager.
def _send_payload(self, receiver_id, data, quick_replies=None, persona_id=None): """ Send a payload through the message manager. :param receiver_id: int identifier for agent to send message to :param data: object data to send :param quick_replies: list of quick replies :param persona_id: identifier of persona :return: a dictionary of a json response from the manager observing a payload """ return self.manager.observe_payload( receiver_id, data, quick_replies, persona_id )
[ "def", "_send_payload", "(", "self", ",", "receiver_id", ",", "data", ",", "quick_replies", "=", "None", ",", "persona_id", "=", "None", ")", ":", "return", "self", ".", "manager", ".", "observe_payload", "(", "receiver_id", ",", "data", ",", "quick_replies", ",", "persona_id", ")" ]
[ 62, 4 ]
[ 79, 9 ]
python
en
['en', 'error', 'th']
False
ChatServiceAgent.put_data
(self, message)
Put data into the message queue if it hasn't already been seen.
Put data into the message queue if it hasn't already been seen.
def put_data(self, message): """ Put data into the message queue if it hasn't already been seen. """ pass
[ "def", "put_data", "(", "self", ",", "message", ")", ":", "pass" ]
[ 82, 4 ]
[ 86, 12 ]
python
en
['en', 'error', 'th']
False
ChatServiceAgent._queue_action
(self, action, act_id, act_data=None)
Add an action to the queue with given id and info if it hasn't already been seen. :param action: action to be added to message queue :param act_id: an identifier to check if the action has been seen or to mark the action as seen :param act_data: any data about the given action you may want to record when marking it as seen
Add an action to the queue with given id and info if it hasn't already been seen.
def _queue_action(self, action, act_id, act_data=None): """ Add an action to the queue with given id and info if it hasn't already been seen. :param action: action to be added to message queue :param act_id: an identifier to check if the action has been seen or to mark the action as seen :param act_data: any data about the given action you may want to record when marking it as seen """ if act_id not in self.acted_packets: self.acted_packets[act_id] = act_data self.msg_queue.put(action)
[ "def", "_queue_action", "(", "self", ",", "action", ",", "act_id", ",", "act_data", "=", "None", ")", ":", "if", "act_id", "not", "in", "self", ".", "acted_packets", ":", "self", ".", "acted_packets", "[", "act_id", "]", "=", "act_data", "self", ".", "msg_queue", ".", "put", "(", "action", ")" ]
[ 88, 4 ]
[ 104, 38 ]
python
en
['en', 'error', 'th']
False
ChatServiceAgent.set_stored_data
(self)
Gets agent state data from manager.
Gets agent state data from manager.
def set_stored_data(self): """ Gets agent state data from manager. """ agent_state = self.manager.get_agent_state(self.id) if agent_state is not None and hasattr(agent_state, 'stored_data'): self.stored_data = agent_state.stored_data
[ "def", "set_stored_data", "(", "self", ")", ":", "agent_state", "=", "self", ".", "manager", ".", "get_agent_state", "(", "self", ".", "id", ")", "if", "agent_state", "is", "not", "None", "and", "hasattr", "(", "agent_state", ",", "'stored_data'", ")", ":", "self", ".", "stored_data", "=", "agent_state", ".", "stored_data" ]
[ 106, 4 ]
[ 112, 54 ]
python
en
['en', 'error', 'th']
False
ChatServiceAgent.get_new_act_message
(self)
Get a new act message if one exists, return None otherwise.
Get a new act message if one exists, return None otherwise.
def get_new_act_message(self): """ Get a new act message if one exists, return None otherwise. """ if not self.msg_queue.empty(): return self.msg_queue.get() return None
[ "def", "get_new_act_message", "(", "self", ")", ":", "if", "not", "self", ".", "msg_queue", ".", "empty", "(", ")", ":", "return", "self", ".", "msg_queue", ".", "get", "(", ")", "return", "None" ]
[ 114, 4 ]
[ 120, 19 ]
python
en
['en', 'error', 'th']
False
ChatServiceAgent.act
(self)
Pulls a message from the message queue. If none exist returns None.
Pulls a message from the message queue.
def act(self): """ Pulls a message from the message queue. If none exist returns None. """ msg = self.get_new_act_message() return msg
[ "def", "act", "(", "self", ")", ":", "msg", "=", "self", ".", "get_new_act_message", "(", ")", "return", "msg" ]
[ 122, 4 ]
[ 129, 18 ]
python
en
['en', 'error', 'th']
False